diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000..da3ba441 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,7 @@ +# GitHub code owners +# See https://help.github.com/articles/about-codeowners/ +# +# KEEP THIS FILE SORTED. Order is important. Last match takes precedence. + +* @mrunalp @runcom +pkg/storage/** @nalind @runcom @rhatdan diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 00000000..296f83e6 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,58 @@ + + +**Description** + + + +**Steps to reproduce the issue:** +1. +2. +3. + +**Describe the results you received:** + + +**Describe the results you expected:** + + +**Additional information you deem important (e.g. issue happens only occasionally):** + +**Output of `crio --version`:** + +``` +(paste your output here) +``` + +**Additional environment details (AWS, VirtualBox, physical, etc.):** diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..d25e940f --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,23 @@ + + +**- What I did** + +**- How I did it** + +**- How to verify it** + +**- Description for the changelog** + diff --git a/.gitignore b/.gitignore index f9c8e7d9..8217e6ba 100644 --- a/.gitignore +++ b/.gitignore @@ -1,17 +1,18 @@ /.artifacts/ /_output/ -/conmon/conmon /conmon/conmon.o /docs/*.[158] /docs/*.[158].gz -/kpod -/crioctl -/crio /crio.conf *.o *.orig -/pause/pause /pause/pause.o +/bin/ /test/bin2img/bin2img /test/checkseccomp/checkseccomp /test/copyimg/copyimg + +Vagrantfile +.vagrant/ + +.vscode/ diff --git a/.mailmap b/.mailmap new file mode 100644 index 00000000..29d8c860 --- /dev/null +++ b/.mailmap @@ -0,0 +1,10 @@ +Aleksa Sarai +Antonio Murdaca +CuiHaozhi +Daniel J Walsh +Haiyan Meng +Lorenzo Fontana +Mrunal Patel +Mrunal Patel +Pengfei Ni +Tobias Klauser diff --git a/.travis.yml b/.travis.yml index 52f91a71..3e1047b2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,7 +8,7 @@ services: before_install: - sudo apt-get -qq update - sudo apt-get -qq install btrfs-tools libdevmapper-dev libgpgme11-dev libapparmor-dev libseccomp-dev - - sudo apt-get -qq install autoconf automake bison e2fslibs-dev libfuse-dev libtool liblzma-dev + - sudo apt-get -qq install autoconf automake bison e2fslibs-dev libfuse-dev libtool liblzma-dev gettext install: - make install.tools @@ -32,13 +32,22 @@ jobs: - make .gitvalidation - make gofmt - make lint + - make testunit - make docs - make go: 1.8.x - - script: + - stage: Build and Verify + script: - make .gitvalidation - make gofmt - make lint + - make testunit + - make docs + - make + go: 1.9.x + - script: + - make .gitvalidation + - make testunit - make docs - make go: tip diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..cc549116 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,142 @@ +# Contributing to CRI-O + +We'd love to have you join the community! Below summarizes the processes +that we follow. + +## Topics + +* [Reporting Issues](#reporting-issues) +* [Submitting Pull Requests](#submitting-pull-requests) +* [Communications](#communications) +* [Becoming a Maintainer](#becoming-a-maintainer) + +## Reporting Issues + +Before reporting an issue, check our backlog of +[open issues](https://github.com/kubernetes-incubator/cri-o/issues) +to see if someone else has already reported it. If so, feel free to add +your scenario, or additional information, to the discussion. Or simply +"subscribe" to it to be notified when it is updated. + +If you find a new issue with the project we'd love to hear about it! The most +important aspect of a bug report is that it includes enough information for +us to reproduce it. So, please include as much detail as possible and try +to remove the extra stuff that doesn't really relate to the issue itself. +The easier it is for us to reproduce it, the faster it'll be fixed! + +Please don't include any private/sensitive information in your issue! + +## Submitting Pull Requests + +No Pull Request (PR) is too small! Typos, additional comments in the code, +new testcases, bug fixes, new features, more documentation, ... it's all +welcome! + +While bug fixes can first be identified via an "issue", that is not required. +It's ok to just open up a PR with the fix, but make sure you include the same +information you would have included in an issue - like how to reproduce it. + +PRs for new features should include some background on what use cases the +new code is trying to address. When possible and when it makes sense, try to break-up +larger PRs into smaller ones - it's easier to review smaller +code changes. But only if those smaller ones make sense as stand-alone PRs. + +Regardless of the type of PR, all PRs should include: +* well documented code changes +* additional testcases. Ideally, they should fail w/o your code change applied +* documentation changes + +Squash your commits into logical pieces of work that might want to be reviewed +separate from the rest of the PRs. But, squashing down to just one commit is ok +too since in the end the entire PR will be reviewed anyway. When in doubt, +squash. + +PRs that fix issues should include a reference like `Closes #XXXX` in the +commit message so that github will automatically close the referenced issue +when the PR is merged. + + + +### Sign your PRs + +The sign-off is a line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. + +## Communications + +For general questions, or discussions, please use the +IRC group on `irc.freenode.net` called `cri-o` +that has been setup. + +For discussions around issues/bugs and features, you can use the github +[issues](https://github.com/kubernetes-incubator/cri-o/issues) +and +[PRs](https://github.com/kubernetes-incubator/cri-o/pulls) +tracking system. + + diff --git a/Dockerfile b/Dockerfile index 7bb06579..1fb5e569 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,6 +12,7 @@ RUN apt-get update && apt-get install -y \ curl \ e2fslibs-dev \ gawk \ + gettext \ iptables \ pkg-config \ libaio-dev \ @@ -23,6 +24,7 @@ RUN apt-get update && apt-get install -y \ libseccomp2/jessie-backports \ libseccomp-dev/jessie-backports \ libtool \ + libudev-dev \ protobuf-c-compiler \ protobuf-compiler \ python-minimal \ @@ -34,7 +36,9 @@ RUN apt-get update && apt-get install -y \ libgpgme11-dev \ liblzma-dev \ netcat \ + socat \ --no-install-recommends \ + bsdmainutils \ && apt-get clean # install bats @@ -53,7 +57,7 @@ RUN mkdir -p /usr/src/criu \ && rm -rf /usr/src/criu # Install runc -ENV RUNC_COMMIT 84a082bfef6f932de921437815355186db37aeb1 +ENV RUNC_COMMIT c6e4a1ebeb1a72b529c6f1b6ee2b1ae5b868b14f RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ @@ -61,7 +65,7 @@ RUN set -x \ && git fetch origin --tags \ && git checkout -q "$RUNC_COMMIT" \ && make static BUILDTAGS="seccomp selinux" \ - && cp runc /usr/local/bin/runc \ + && cp runc /usr/bin/runc \ && rm -rf "$GOPATH" # Install CNI plugins @@ -94,7 +98,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Install crictl -ENV CRICTL_COMMIT 16e6fe4d7199c5689db4630a9330e6a8a12cecd1 +ENV CRICTL_COMMIT b42fc3f364dd48f649d55926c34492beeb9b2e99 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/kubernetes-incubator/cri-tools.git "$GOPATH/src/github.com/kubernetes-incubator/cri-tools" \ @@ -107,11 +111,8 @@ RUN set -x \ # Make sure we have some policy for pulling images RUN mkdir -p /etc/containers COPY test/policy.json /etc/containers/policy.json +COPY test/redhat_sigstore.yaml /etc/containers/registries.d/registry.access.redhat.com.yaml WORKDIR /go/src/github.com/kubernetes-incubator/cri-o ADD . /go/src/github.com/kubernetes-incubator/cri-o - -RUN make test/copyimg/copyimg \ - && mkdir -p .artifacts/redis-image \ - && ./test/copyimg/copyimg --import-from=docker://redis --export-to=dir:.artifacts/redis-image --signature-policy ./test/policy.json diff --git a/Makefile b/Makefile index 8c21e145..7cc1a4c1 100644 --- a/Makefile +++ b/Makefile @@ -11,15 +11,19 @@ LIBEXECDIR ?= ${PREFIX}/libexec MANDIR ?= ${PREFIX}/share/man ETCDIR ?= ${DESTDIR}/etc ETCDIR_CRIO ?= ${ETCDIR}/crio -BUILDTAGS ?= selinux seccomp $(shell hack/btrfs_tag.sh) $(shell hack/libdm_tag.sh) +BUILDTAGS ?= seccomp $(shell hack/btrfs_tag.sh) $(shell hack/libdm_installed.sh) $(shell hack/libdm_no_deferred_remove_tag.sh) $(shell hack/btrfs_installed_tag.sh) $(shell hack/ostree_tag.sh) $(shell hack/selinux_tag.sh) +CRICTL_CONFIG_DIR=${DESTDIR}/etc + BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions -SELINUXOPT ?= $(shell test -x /usr/sbin/selinuxenabled && selinuxenabled && echo -Z) +OCIUMOUNTINSTALLDIR=$(PREFIX)/share/oci-umount/oci-umount.d -GIT_COMMIT := $(shell git rev-parse --short HEAD) +SELINUXOPT ?= $(shell test -x /usr/sbin/selinuxenabled && selinuxenabled && echo -Z) +PACKAGES ?= $(shell go list -tags "${BUILDTAGS}" ./... | grep -v github.com/kubernetes-incubator/cri-o/vendor) + +COMMIT_NO := $(shell git rev-parse HEAD 2> /dev/null || true) +GIT_COMMIT := $(if $(shell git status --porcelain --untracked-files=no),"${COMMIT_NO}-dirty","${COMMIT_NO}") BUILD_INFO := $(shell date +%s) -VERSION := ${shell cat ./VERSION} - # If GOPATH not specified, use one in the local directory ifeq ($(GOPATH),) export GOPATH := $(CURDIR)/_output @@ -30,8 +34,9 @@ GOPKGBASEDIR := $(shell dirname "$(GOPKGDIR)") # Update VPATH so make finds .gopathok VPATH := $(VPATH):$(GOPATH) - -LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} -X main.buildInfo=${BUILD_INFO} -X main.version=${VERSION}' +SHRINKFLAGS := -s -w +BASE_LDFLAGS := ${SHRINKFLAGS} -X main.gitCommit=${GIT_COMMIT} -X main.buildInfo=${BUILD_INFO} +LDFLAGS := -ldflags '${BASE_LDFLAGS}' all: binaries crio.conf docs @@ -41,7 +46,7 @@ help: @echo "Usage: make " @echo @echo " * 'install' - Install binaries to system locations" - @echo " * 'binaries' - Build crio, conmon and crioctl" + @echo " * 'binaries' - Build crio, conmon and pause" @echo " * 'integration' - Execute integration tests" @echo " * 'clean' - Clean artifacts" @echo " * 'lint' - Execute the source code linter" @@ -59,7 +64,8 @@ lint: .gopathok @./.tool/lint gofmt: - @./hack/verify-gofmt.sh + find . -name '*.go' ! -path './vendor/*' -exec gofmt -s -w {} \+ + git diff --exit-code conmon: $(MAKE) -C $@ @@ -68,36 +74,30 @@ pause: $(MAKE) -C $@ test/bin2img/bin2img: .gopathok $(wildcard test/bin2img/*.go) - $(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/test/bin2img + $(GO) build -i $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/bin2img test/copyimg/copyimg: .gopathok $(wildcard test/copyimg/*.go) - $(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/test/copyimg + $(GO) build -i $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/copyimg test/checkseccomp/checkseccomp: .gopathok $(wildcard test/checkseccomp/*.go) - $(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/test/checkseccomp + $(GO) build -i $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/checkseccomp crio: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/crio $(PROJECT)) - $(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/cmd/crio - -crioctl: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/crioctl $(PROJECT)) - $(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/cmd/crioctl - -kpod: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/kpod $(PROJECT)) - $(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/cmd/kpod + $(GO) build -i $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o bin/$@ $(PROJECT)/cmd/crio crio.conf: crio - ./crio --config="" config --default > crio.conf + ./bin/crio --config="" config --default > crio.conf clean: ifneq ($(GOPATH),) rm -f "$(GOPATH)/.gopathok" endif rm -rf _output - rm -f docs/*.1 docs/*.5 docs/*.8 + rm -f docs/*.5 docs/*.8 rm -fr test/testdata/redis-image find . -name \*~ -delete find . -name \#\* -delete - rm -f crioctl crio kpod + rm -f bin/crio make -C conmon clean make -C pause clean rm -f test/bin2img/bin2img @@ -108,22 +108,23 @@ crioimage: docker build -t ${CRIO_IMAGE} . dbuild: crioimage - docker run --name=${CRIO_INSTANCE} --privileged ${CRIO_IMAGE} -v ${PWD}:/go/src/${PROJECT} --rm make binaries + docker run --name=${CRIO_INSTANCE} -e BUILDTAGS --privileged -v ${PWD}:/go/src/${PROJECT} --rm ${CRIO_IMAGE} make binaries integration: crioimage - docker run -e TESTFLAGS -e TRAVIS -t --privileged --rm -v ${CURDIR}:/go/src/${PROJECT} ${CRIO_IMAGE} make localintegration + docker run -e STORAGE_OPTIONS="--storage-driver=vfs" -e TESTFLAGS -e TRAVIS -t --privileged --rm -v ${CURDIR}:/go/src/${PROJECT} ${CRIO_IMAGE} make localintegration -localintegration: clean binaries +testunit: + $(GO) test -tags "$(BUILDTAGS)" -cover $(PACKAGES) + +localintegration: clean binaries test-binaries ./test/test_runner.sh ${TESTFLAGS} -binaries: crio crioctl kpod conmon pause test/bin2img/bin2img test/copyimg/copyimg test/checkseccomp/checkseccomp +binaries: crio conmon pause +test-binaries: test/bin2img/bin2img test/copyimg/copyimg test/checkseccomp/checkseccomp MANPAGES_MD := $(wildcard docs/*.md) MANPAGES := $(MANPAGES_MD:%.md=%) -docs/%.1: docs/%.1.md .gopathok - (go-md2man -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@) || ($(GOPATH)/bin/go-md2man -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@) - docs/%.5: docs/%.5.md .gopathok (go-md2man -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@) || ($(GOPATH)/bin/go-md2man -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@) @@ -132,26 +133,27 @@ docs/%.8: docs/%.8.md .gopathok docs: $(MANPAGES) -install: .gopathok - install ${SELINUXOPT} -D -m 755 crio $(BINDIR)/crio - install ${SELINUXOPT} -D -m 755 crioctl $(BINDIR)/crioctl - install ${SELINUXOPT} -D -m 755 kpod $(BINDIR)/kpod - install ${SELINUXOPT} -D -m 755 conmon/conmon $(LIBEXECDIR)/crio/conmon - install ${SELINUXOPT} -D -m 755 pause/pause $(LIBEXECDIR)/crio/pause - install ${SELINUXOPT} -d -m 755 $(MANDIR)/man1 +install: .gopathok install.bin install.man + +install.bin: + install ${SELINUXOPT} -D -m 755 bin/crio $(BINDIR)/crio + install ${SELINUXOPT} -D -m 755 bin/conmon $(LIBEXECDIR)/crio/conmon + install ${SELINUXOPT} -D -m 755 bin/pause $(LIBEXECDIR)/crio/pause + +install.man: install ${SELINUXOPT} -d -m 755 $(MANDIR)/man5 install ${SELINUXOPT} -d -m 755 $(MANDIR)/man8 - install ${SELINUXOPT} -m 644 $(filter %.1,$(MANPAGES)) -t $(MANDIR)/man1 install ${SELINUXOPT} -m 644 $(filter %.5,$(MANPAGES)) -t $(MANDIR)/man5 install ${SELINUXOPT} -m 644 $(filter %.8,$(MANPAGES)) -t $(MANDIR)/man8 -install.config: +install.config: crio.conf install ${SELINUXOPT} -D -m 644 crio.conf $(ETCDIR_CRIO)/crio.conf install ${SELINUXOPT} -D -m 644 seccomp.json $(ETCDIR_CRIO)/seccomp.json + install ${SELINUXOPT} -D -m 644 crio-umount.conf $(OCIUMOUNTINSTALLDIR)/crio-umount.conf + install ${SELINUXOPT} -D -m 644 crictl.yaml $(CRICTL_CONFIG_DIR) install.completions: install ${SELINUXOPT} -d -m 755 ${BASHINSTALLDIR} - install ${SELINUXOPT} -m 644 -D completions/bash/kpod ${BASHINSTALLDIR} install.systemd: install ${SELINUXOPT} -D -m 644 contrib/systemd/crio.service $(PREFIX)/lib/systemd/system/crio.service @@ -160,7 +162,6 @@ install.systemd: uninstall: rm -f $(BINDIR)/crio - rm -f $(BINDIR)/crioctl rm -f $(LIBEXECDIR)/crio/conmon rm -f $(LIBEXECDIR)/crio/pause for i in $(filter %.1,$(MANPAGES)); do \ diff --git a/OWNERS b/OWNERS index 7696ec17..6b945bba 100644 --- a/OWNERS +++ b/OWNERS @@ -1,4 +1,4 @@ -assignees: +approvers: - mrunalp - runcom - cyphar diff --git a/README.md b/README.md index 2c40205c..69feaa34 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,32 @@ -![cri-o logo](https://cdn.rawgit.com/kubernetes-incubator/cri-o/master/logo/crio-logo.svg) -# cri-o - OCI-based implementation of Kubernetes Container Runtime Interface +![CRI-O logo](https://cdn.rawgit.com/kubernetes-incubator/cri-o/master/logo/crio-logo.svg) +# CRI-O - OCI-based implementation of Kubernetes Container Runtime Interface [![Build Status](https://img.shields.io/travis/kubernetes-incubator/cri-o.svg?maxAge=2592000&style=flat-square)](https://travis-ci.org/kubernetes-incubator/cri-o) [![Go Report Card](https://goreportcard.com/badge/github.com/kubernetes-incubator/cri-o?style=flat-square)](https://goreportcard.com/report/github.com/kubernetes-incubator/cri-o) -### Status: beta +### Status: Stable + +## Compatibility matrix: CRI-O <-> Kubernetes clusters + +| Version - Branch | Kubernetes branch/version | Maintenance status | +|----------------------------|-------------------------------|--------------------| +| CRI-O 1.0.x - release-1.0 | Kubernetes 1.7 branch, v1.7.x | = | +| CRI-O 1.8.x - release-1.8 | Kubernetes 1.8 branch, v1.8.x | = | +| CRI-O 1.9.x - release-1.9 | Kubernetes 1.9 branch, v1.9.x | = | +| CRI-O HEAD - master | Kubernetes master branch | ✓ | + +Key: + +* `✓` Changes in main Kubernetes repo about CRI are actively implemented in CRI-O +* `=` Maintenance is manual, only bugs will be patched. ## What is the scope of this project? -cri-o is meant to provide an integration path between OCI conformant runtimes and the kubelet. +CRI-O is meant to provide an integration path between OCI conformant runtimes and the kubelet. Specifically, it implements the Kubelet [Container Runtime Interface (CRI)](https://github.com/kubernetes/community/blob/master/contributors/devel/container-runtime-interface.md) using OCI conformant runtimes. -The scope of cri-o is tied to the scope of the CRI. +The scope of CRI-O is tied to the scope of the CRI. -At a high level, we expect the scope of cri-o to be restricted to the following functionalities: +At a high level, we expect the scope of CRI-O to be restricted to the following functionalities: * Support multiple image formats including the existing Docker image format * Support for multiple means to download images including trust & image verification @@ -24,7 +38,7 @@ At a high level, we expect the scope of cri-o to be restricted to the following ## What is not in scope for this project? * Building, signing and pushing images to various image storages -* A CLI utility for interacting with cri-o. Any CLIs built as part of this project are only meant for testing this project and there will be no guarantees on the backward compatibility with it. +* A CLI utility for interacting with CRI-O. Any CLIs built as part of this project are only meant for testing this project and there will be no guarantees on the backward compatibility with it. This is an implementation of the Kubernetes Container Runtime Interface (CRI) that will allow Kubernetes to directly launch and manage Open Container Initiative (OCI) containers. @@ -40,28 +54,8 @@ It is currently in active development in the Kubernetes community through the [d | Command | Description | Demo| | ---------------------------------------------------- | --------------------------------------------------------------------------|-----| | [crio(8)](/docs/crio.8.md) | OCI Kubernetes Container Runtime daemon || -| [kpod(1)](/docs/kpod.1.md) | Simple management tool for pods and images || -| [kpod-cp(1)](/docs/kpod-cp.1.md) | Copy files/folders between a container and the local filesystem || -| [kpod-diff(1)](/docs/kpod-diff.1.md) | Inspect changes on a container or image's filesystem || -| [kpod-export(1)](/docs/kpod-export.1.md) | Export container's filesystem contents as a tar archive || -| [kpod-history(1)](/docs/kpod-history.1.md) | Shows the history of an image |[![...](/docs/play.png)](https://asciinema.org/a/bCvUQJ6DkxInMELZdc5DinNSx)| -| [kpod-images(1)](/docs/kpod-images.1.md) | List images in local storage |[![...](/docs/play.png)](https://asciinema.org/a/133649)| -| [kpod-info(1)](/docs/kpod-info.1.md) | Display system information || -| [kpod-inspect(1)](/docs/kpod-inspect.1.md) | Display the configuration of a container or image |[![...](/docs/play.png)](https://asciinema.org/a/133418)| -| [kpod-load(1)](/docs/kpod-load.1.md) | Load an image from docker archive or oci |[![...](/docs/play.png)](https://asciinema.org/a/kp8kOaexEhEa20P1KLZ3L5X4g)| -| [kpod-logs(1)](/docs/kpod-logs.1.md) | Display the logs of a container || -| [kpod-mount(1)](/docs/kpod-mount.1.md) | Mount a working container's root filesystem || -| [kpod-ps(1)](/docs/kpod-ps.1.md) | Prints out information about containers || -| [kpod-pull(1)](/docs/kpod-pull.1.md) | Pull an image from a registry |[![...](/docs/play.png)](https://asciinema.org/a/lr4zfoynHJOUNu1KaXa1dwG2X)| -| [kpod-push(1)](/docs/kpod-push.1.md) | Push an image to a specified destination |[![...](/docs/play.png)](https://asciinema.org/a/133276)| -| [kpod-rename(1)](/docs/kpod-rename.1.md) | Rename a container || -| [kpod-rm(1)](/docs/kpod-rm.1.md) | Removes one or more containers || -| [kpod-rmi(1)](/docs/kpod-rmi.1.md) | Removes one or more images |[![...](/docs/play.png)](https://asciinema.org/a/133799)| -| [kpod-save(1)](/docs/kpod-save.1.md) | Saves an image to an archive |[![...](/docs/play.png)](https://asciinema.org/a/kp8kOaexEhEa20P1KLZ3L5X4g)| -| [kpod-stats(1)](/docs/kpod-stats.1.md) | Display a live stream of one or more containers' resource usage statistics|| -| [kpod-tag(1)](/docs/kpod-tag.1.md) | Add an additional name to a local image |[![...](/docs/play.png)](https://asciinema.org/a/133803)| -| [kpod-umount(1)](/docs/kpod-umount.1.md) | Unmount a working container's root filesystem || -| [kpod-version(1)](/docs/kpod-version.1.md) | Display the version information |[![...](/docs/play.png)](https://asciinema.org/a/mfrn61pjZT9Fc8L4NbfdSqfgu)| + +Note that kpod and its container management and debugging commands have moved to a separate repository, located [here](https://github.com/projectatomic/libpod). ## Configuration | File | Description | @@ -72,23 +66,28 @@ It is currently in active development in the Kubernetes community through the [d [CRI-O configures OCI Hooks to run when launching a container](./hooks.md) -## cri-o Usage Transfer +## CRI-O Usage Transfer -[Useful information for ops and dev transfer as it relates to infrastructure that utilizes cri-o](/transfer.md) +[Useful information for ops and dev transfer as it relates to infrastructure that utilizes CRI-O](/transfer.md) ## Communication For async communication and long running discussions please use issues and pull requests on the github repo. This will be the best place to discuss design and implementation. -For sync communication we have an IRC channel #cri-o, on chat.freenode.net, that everyone is welcome to join and chat about development. +For sync communication we have an IRC channel #CRI-O, on chat.freenode.net, that everyone is welcome to join and chat about development. ## Getting started -### Prerequisites +### Runtime dependencies -Latest version of `runc` is expected to be installed on the system. It is picked up as the default runtime by crio. +- runc, Clear Containers runtime, or any other OCI compatible runtime +- socat +- iproute +- iptables -### Build Dependencies +Latest version of `runc` is expected to be installed on the system. It is picked up as the default runtime by CRI-O. + +### Build and Run Dependencies **Required** @@ -98,9 +97,12 @@ Fedora, CentOS, RHEL, and related distributions: yum install -y \ btrfs-progs-devel \ device-mapper-devel \ + git \ glib2-devel \ glibc-devel \ glibc-static \ + go \ + golang-github-cpuguy83-go-md2man \ gpgme-devel \ libassuan-devel \ libgpg-error-devel \ @@ -108,7 +110,8 @@ yum install -y \ libselinux-devel \ ostree-devel \ pkgconfig \ - runc + runc \ + skopeo-containers ``` Debian, Ubuntu, and related distributions: @@ -116,6 +119,8 @@ Debian, Ubuntu, and related distributions: ```bash apt-get install -y \ btrfs-tools \ + git \ + golang-go \ libassuan-dev \ libdevmapper-dev \ libglib2.0-dev \ @@ -125,13 +130,19 @@ apt-get install -y \ libseccomp-dev \ libselinux1-dev \ pkg-config \ - runc + go-md2man \ + runc \ + skopeo-containers ``` Debian, Ubuntu, and related distributions will also need a copy of the development libraries for `ostree`, either in the form of the `libostree-dev` package from the [flatpak](https://launchpad.net/~alexlarsson/+archive/ubuntu/flatpak) PPA, or built [from source](https://github.com/ostreedev/ostree) (more on that [here](https://ostree.readthedocs.io/en/latest/#building)). If using an older release or a long-term support release, be careful to double-check that the version of `runc` is new enough (running `runc --version` should produce `spec: 1.0.0`), or else build your own. +**NOTE** + +Be careful to double-check that the version of golang is new enough, version 1.8.x or higher is required. If needed, golang kits are avaliable at https://golang.org/dl/ + **Optional** Fedora, CentOS, RHEL, and related distributions: @@ -147,7 +158,7 @@ apt-get install -y \ ### Get Source Code -As with other Go projects, cri-o must be cloned into a directory structure like: +As with other Go projects, CRI-O must be cloned into a directory structure like: ``` GOPATH @@ -181,7 +192,7 @@ make sudo make install ``` -Otherwise, if you do not want to build `cri-o` with seccomp support you can add `BUILDTAGS=""` when running make. +Otherwise, if you do not want to build `CRI-O` with seccomp support you can add `BUILDTAGS=""` when running make. ```bash make BUILDTAGS="" @@ -190,7 +201,7 @@ sudo make install #### Build Tags -`cri-o` supports optional build tags for compiling support of various features. +`CRI-O` supports optional build tags for compiling support of various features. To add build tags to the make option the `BUILDTAGS` variable must be set. ```bash @@ -216,14 +227,15 @@ your system. ### Running with kubernetes -You can run a local version of kubernetes with cri-o using `local-up-cluster.sh`: +You can run a local version of kubernetes with CRI-O using `local-up-cluster.sh`: 1. Clone the [kubernetes repository](https://github.com/kubernetes/kubernetes) -1. Start the cri-o daemon (`crio`) +1. Start the CRI-O daemon (`crio`) 1. From the kubernetes project directory, run: ```shell +CGROUP_DRIVER=systemd \ CONTAINER_RUNTIME=remote \ -CONTAINER_RUNTIME_ENDPOINT='/var/run/crio.sock --runtime-request-timeout=15m' \ +CONTAINER_RUNTIME_ENDPOINT='/var/run/crio/crio.sock --runtime-request-timeout=15m' \ ./hack/local-up-cluster.sh ``` @@ -237,5 +249,4 @@ To run a full cluster, see [the instructions](kubernetes.md). 1. Support for log management, networking integration using CNI, pluggable image/storage management (done) 1. Support for exec/attach (done) 1. Target fully automated kubernetes testing without failures [e2e status](https://github.com/kubernetes-incubator/cri-o/issues/533) -1. Release 1.0 1. Track upstream k8s releases diff --git a/VERSION b/VERSION deleted file mode 100644 index 867bf6ba..00000000 --- a/VERSION +++ /dev/null @@ -1 +0,0 @@ -1.0.0-rc1 diff --git a/client/client.go b/client/client.go new file mode 100644 index 00000000..ad717b97 --- /dev/null +++ b/client/client.go @@ -0,0 +1,103 @@ +package client + +import ( + "encoding/json" + "fmt" + "net" + "net/http" + "syscall" + "time" + + "github.com/kubernetes-incubator/cri-o/types" +) + +const ( + maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) +) + +// CrioClient is an interface to get information from crio daemon endpoint. +type CrioClient interface { + DaemonInfo() (types.CrioInfo, error) + ContainerInfo(string) (*types.ContainerInfo, error) +} + +type crioClientImpl struct { + client *http.Client + crioSocketPath string +} + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + if len(addr) > maxUnixSocketPathSize { + return fmt.Errorf("Unix socket path %q is too long", addr) + } + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return net.DialTimeout(proto, addr, 32*time.Second) + } + return nil +} + +// New returns a crio client +func New(crioSocketPath string) (CrioClient, error) { + tr := new(http.Transport) + configureUnixTransport(tr, "unix", crioSocketPath) + c := &http.Client{ + Transport: tr, + } + return &crioClientImpl{ + client: c, + crioSocketPath: crioSocketPath, + }, nil +} + +func (c *crioClientImpl) getRequest(path string) (*http.Request, error) { + req, err := http.NewRequest("GET", path, nil) + if err != nil { + return nil, err + } + // For local communications over a unix socket, it doesn't matter what + // the host is. We just need a valid and meaningful host name. + req.Host = "crio" + req.URL.Host = c.crioSocketPath + req.URL.Scheme = "http" + return req, nil +} + +// DaemonInfo return cri-o daemon info from the cri-o +// info endpoint. +func (c *crioClientImpl) DaemonInfo() (types.CrioInfo, error) { + info := types.CrioInfo{} + req, err := c.getRequest("/info") + if err != nil { + return info, err + } + resp, err := c.client.Do(req) + if err != nil { + return info, err + } + defer resp.Body.Close() + if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { + return info, err + } + return info, nil +} + +// ContainerInfo returns container info by querying +// the cri-o container endpoint. +func (c *crioClientImpl) ContainerInfo(id string) (*types.ContainerInfo, error) { + req, err := c.getRequest("/containers/" + id) + if err != nil { + return nil, err + } + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + cInfo := types.ContainerInfo{} + if err := json.NewDecoder(resp.Body).Decode(&cInfo); err != nil { + return nil, err + } + return &cInfo, nil +} diff --git a/cmd/crio/config.go b/cmd/crio/config.go index 17ec67ca..2564baf1 100644 --- a/cmd/crio/config.go +++ b/cmd/crio/config.go @@ -28,8 +28,7 @@ storage_driver = "{{ .Storage }}" storage_option = [ {{ range $opt := .StorageOptions }}{{ printf "\t%q,\n" $opt }}{{ end }}] -# The "crio.api" table contains settings for the kubelet/gRPC -# interface (which is also used by crioctl). +# The "crio.api" table contains settings for the kubelet/gRPC interface. [crio.api] # listen is the path to the AF_LOCAL socket on which crio will listen. @@ -77,6 +76,9 @@ runtime_untrusted_workload = "{{ .RuntimeUntrustedWorkload }}" # container runtime for all containers. default_workload_trust = "{{ .DefaultWorkloadTrust }}" +# no_pivot instructs the runtime to not use pivot_root, but instead use MS_MOVE +no_pivot = {{ .NoPivot }} + # conmon is the path to conmon binary, used for managing the runtime. conmon = "{{ .Conmon }}" @@ -105,9 +107,20 @@ cgroup_manager = "{{ .CgroupManager }}" # hooks_dir_path is the oci hooks directory for automatically executed hooks hooks_dir_path = "{{ .HooksDirPath }}" +# default_mounts is the mounts list to be mounted for the container when created +default_mounts = [ +{{ range $mount := .DefaultMounts }}{{ printf "\t%q, \n" $mount }}{{ end }}] + # pids_limit is the number of processes allowed in a container pids_limit = {{ .PidsLimit }} +# enable using a shared PID namespace for containers in a pod +enable_shared_pid_namespace = {{ .EnableSharedPIDNamespace }} + +# log_size_max is the max limit for the container log size in bytes. +# Negative values indicate that no limit is imposed. +log_size_max = {{ .LogSizeMax }} + # The "crio.image" table contains settings pertaining to the # management of OCI images. [crio.image] diff --git a/cmd/crio/main.go b/cmd/crio/main.go index cebbd657..a058f296 100644 --- a/cmd/crio/main.go +++ b/cmd/crio/main.go @@ -8,12 +8,15 @@ import ( _ "net/http/pprof" "os" "os/signal" + "path/filepath" "sort" "strings" + "time" "github.com/containers/storage/pkg/reexec" - "github.com/kubernetes-incubator/cri-o/libkpod" + "github.com/kubernetes-incubator/cri-o/lib" "github.com/kubernetes-incubator/cri-o/server" + "github.com/kubernetes-incubator/cri-o/version" "github.com/opencontainers/selinux/go-selinux" "github.com/sirupsen/logrus" "github.com/soheilhy/cmux" @@ -23,23 +26,24 @@ import ( "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) -// This is populated by the Makefile from the VERSION file -// in the repository -var version = "" - // gitCommit is the commit that the binary is being built from. // It will be populated by the Makefile. var gitCommit = "" func validateConfig(config *server.Config) error { switch config.ImageVolumes { - case libkpod.ImageVolumesMkdir: - case libkpod.ImageVolumesIgnore: - case libkpod.ImageVolumesBind: + case lib.ImageVolumesMkdir: + case lib.ImageVolumesIgnore: + case lib.ImageVolumesBind: default: return fmt.Errorf("Unrecognized image volume type specified") } + + // This needs to match the read buffer size in conmon + if config.LogSizeMax >= 0 && config.LogSizeMax < 8192 { + return fmt.Errorf("log size max should be negative or >= 8192") + } return nil } @@ -122,9 +126,18 @@ func mergeConfig(config *server.Config, ctx *cli.Context) error { if ctx.GlobalIsSet("hooks-dir-path") { config.HooksDirPath = ctx.GlobalString("hooks-dir-path") } + if ctx.GlobalIsSet("default-mounts") { + config.DefaultMounts = ctx.GlobalStringSlice("default-mounts") + } if ctx.GlobalIsSet("pids-limit") { config.PidsLimit = ctx.GlobalInt64("pids-limit") } + if ctx.GlobalIsSet("enable-shared-pid-namespace") { + config.EnableSharedPIDNamespace = ctx.GlobalBool("enable-shared-pid-namespace") + } + if ctx.GlobalIsSet("log-size-max") { + config.LogSizeMax = ctx.GlobalInt64("log-size-max") + } if ctx.GlobalIsSet("cni-config-dir") { config.NetworkDir = ctx.GlobalString("cni-config-dir") } @@ -132,7 +145,7 @@ func mergeConfig(config *server.Config, ctx *cli.Context) error { config.PluginDir = ctx.GlobalString("cni-plugin-dir") } if ctx.GlobalIsSet("image-volumes") { - config.ImageVolumes = libkpod.ImageVolumesType(ctx.GlobalString("image-volumes")) + config.ImageVolumes = lib.ImageVolumesType(ctx.GlobalString("image-volumes")) } return nil } @@ -153,8 +166,7 @@ func catchShutdown(gserver *grpc.Server, sserver *server.Server, hserver *http.S *signalled = true gserver.GracefulStop() hserver.Shutdown(context.Background()) - // TODO(runcom): enable this after https://github.com/kubernetes/kubernetes/pull/51377 - //sserver.StopStreamServer() + sserver.StopStreamServer() sserver.StopExitMonitor() if err := sserver.Shutdown(); err != nil { logrus.Warnf("error shutting down main service %v", err) @@ -171,9 +183,7 @@ func main() { app := cli.NewApp() var v []string - if version != "" { - v = append(v, version) - } + v = append(v, version.Version) if gitCommit != "" { v = append(v, fmt.Sprintf("commit: %s", gitCommit)) } @@ -194,10 +204,6 @@ func main() { Name: "conmon", Usage: "path to the conmon executable", }, - cli.BoolFlag{ - Name: "debug", - Usage: "enable debug output for logging", - }, cli.StringFlag{ Name: "listen", Usage: "path to crio socket", @@ -220,6 +226,11 @@ func main() { Value: "text", Usage: "set the format used by logs ('text' (default), or 'json')", }, + cli.StringFlag{ + Name: "log-level", + Usage: "log messages above specified level: debug, info (default), warn, error, fatal or panic", + }, + cli.StringFlag{ Name: "pause-command", Usage: "name of the pause command in the pause image", @@ -286,9 +297,18 @@ func main() { }, cli.Int64Flag{ Name: "pids-limit", - Value: libkpod.DefaultPidsLimit, + Value: lib.DefaultPidsLimit, Usage: "maximum number of processes allowed in a container", }, + cli.BoolFlag{ + Name: "enable-shared-pid-namespace", + Usage: "enable using a shared PID namespace for containers in a pod", + }, + cli.Int64Flag{ + Name: "log-size-max", + Value: lib.DefaultLogSizeMax, + Usage: "maximum log size in bytes for a container", + }, cli.StringFlag{ Name: "cni-config-dir", Usage: "CNI configuration files directory", @@ -299,13 +319,18 @@ func main() { }, cli.StringFlag{ Name: "image-volumes", - Value: string(libkpod.ImageVolumesMkdir), - Usage: "image volume handling ('mkdir' or 'ignore')", + Value: string(lib.ImageVolumesMkdir), + Usage: "image volume handling ('mkdir', 'bind', or 'ignore')", }, cli.StringFlag{ Name: "hooks-dir-path", Usage: "set the OCI hooks directory path", - Value: libkpod.DefaultHooksDirPath, + Value: lib.DefaultHooksDirPath, + Hidden: true, + }, + cli.StringSliceFlag{ + Name: "default-mounts", + Usage: "add one or more default mount paths in the form host:container", Hidden: true, }, cli.BoolFlag{ @@ -353,8 +378,13 @@ func main() { logrus.SetFormatter(cf) - if c.GlobalBool("debug") { - logrus.SetLevel(logrus.DebugLevel) + if loglevel := c.GlobalString("log-level"); loglevel != "" { + level, err := logrus.ParseLevel(loglevel) + if err != nil { + return err + } + + logrus.SetLevel(level) } if path := c.GlobalString("log"); path != "" { @@ -386,6 +416,16 @@ func main() { }() } + args := c.Args() + if len(args) > 0 { + for _, command := range app.Commands { + if args[0] == command.Name { + break + } + } + return fmt.Errorf("command %q not supported", args[0]) + } + config := c.App.Metadata["config"].(*server.Config) if !config.SELinux { @@ -397,6 +437,10 @@ func main() { return fmt.Errorf("invalid --runtime value %q", err) } + if err := os.MkdirAll(filepath.Dir(config.Listen), 0755); err != nil { + return err + } + // Remove the socket if it already exists if _, err := os.Stat(config.Listen); err == nil { if err := os.Remove(config.Listen); err != nil { @@ -448,7 +492,8 @@ func main() { infoMux := service.GetInfoMux() srv := &http.Server{ - Handler: infoMux, + Handler: infoMux, + ReadTimeout: 5 * time.Second, } graceful := false @@ -464,26 +509,23 @@ func main() { if graceful && strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") { err = nil } else { - logrus.Errorf("Failed to serve grpc grpc request: %v", err) + logrus.Errorf("Failed to serve grpc request: %v", err) } } }() - // TODO(runcom): enable this after https://github.com/kubernetes/kubernetes/pull/51377 - //streamServerCloseCh := service.StreamingServerCloseChan() + streamServerCloseCh := service.StreamingServerCloseChan() serverExitMonitorCh := service.ExitMonitorCloseChan() select { - // TODO(runcom): enable this after https://github.com/kubernetes/kubernetes/pull/51377 - //case <-streamServerCloseCh: + case <-streamServerCloseCh: case <-serverExitMonitorCh: case <-serverCloseCh: } service.Shutdown() - // TODO(runcom): enable this after https://github.com/kubernetes/kubernetes/pull/51377 - //<-streamServerCloseCh - //logrus.Debug("closed stream server") + <-streamServerCloseCh + logrus.Debug("closed stream server") <-serverExitMonitorCh logrus.Debug("closed exit monitor") <-serverCloseCh diff --git a/cmd/crioctl/container.go b/cmd/crioctl/container.go deleted file mode 100644 index e02ce9f8..00000000 --- a/cmd/crioctl/container.go +++ /dev/null @@ -1,619 +0,0 @@ -package main - -import ( - "fmt" - "log" - "net/url" - "os" - "strings" - "time" - - "github.com/urfave/cli" - "golang.org/x/net/context" - remocommandconsts "k8s.io/apimachinery/pkg/util/remotecommand" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/remotecommand" - pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" -) - -var containerCommand = cli.Command{ - Name: "container", - Aliases: []string{"ctr"}, - Subcommands: []cli.Command{ - createContainerCommand, - startContainerCommand, - stopContainerCommand, - removeContainerCommand, - containerStatusCommand, - listContainersCommand, - execSyncCommand, - execCommand, - }, -} - -type createOptions struct { - // configPath is path to the config for container - configPath string - // name sets the container name - name string - // podID of the container - podID string - // labels for the container - labels map[string]string -} - -var createContainerCommand = cli.Command{ - Name: "create", - Usage: "create a container", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "pod", - Usage: "the id of the pod sandbox to which the container belongs", - }, - cli.StringFlag{ - Name: "config", - Value: "config.json", - Usage: "the path of a container config file", - }, - cli.StringFlag{ - Name: "name", - Value: "", - Usage: "the name of the container", - }, - cli.StringSliceFlag{ - Name: "label", - Usage: "add key=value labels to the container", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - if !context.IsSet("pod") { - return fmt.Errorf("Please specify the id of the pod sandbox to which the container belongs via the --pod option") - } - - opts := createOptions{ - configPath: context.String("config"), - name: context.String("name"), - podID: context.String("pod"), - labels: make(map[string]string), - } - - for _, l := range context.StringSlice("label") { - pair := strings.Split(l, "=") - if len(pair) != 2 { - return fmt.Errorf("incorrectly specified label: %v", l) - } - opts.labels[pair[0]] = pair[1] - } - - // Test RuntimeServiceClient.CreateContainer - err = CreateContainer(client, opts) - if err != nil { - return fmt.Errorf("Creating container failed: %v", err) - } - return nil - }, -} - -var startContainerCommand = cli.Command{ - Name: "start", - Usage: "start a container", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the container", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - err = StartContainer(client, context.String("id")) - if err != nil { - return fmt.Errorf("Starting the container failed: %v", err) - } - return nil - }, -} - -var stopContainerCommand = cli.Command{ - Name: "stop", - Usage: "stop a container", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the container", - }, - cli.Int64Flag{ - Name: "timeout", - Value: 10, - Usage: "seconds to wait to kill the container after a graceful stop is requested", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - err = StopContainer(client, context.String("id"), context.Int64("timeout")) - if err != nil { - return fmt.Errorf("Stopping the container failed: %v", err) - } - return nil - }, -} - -var removeContainerCommand = cli.Command{ - Name: "remove", - Usage: "remove a container", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the container", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - err = RemoveContainer(client, context.String("id")) - if err != nil { - return fmt.Errorf("Removing the container failed: %v", err) - } - return nil - }, -} - -var containerStatusCommand = cli.Command{ - Name: "status", - Usage: "get the status of a container", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the container", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - err = ContainerStatus(client, context.String("id")) - if err != nil { - return fmt.Errorf("Getting the status of the container failed: %v", err) - } - return nil - }, -} - -var execSyncCommand = cli.Command{ - Name: "execsync", - Usage: "exec a command synchronously in a container", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the container", - }, - cli.Int64Flag{ - Name: "timeout", - Value: 0, - Usage: "timeout for the command", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - err = ExecSync(client, context.String("id"), context.Args(), context.Int64("timeout")) - if err != nil { - return fmt.Errorf("execing command in container failed: %v", err) - } - return nil - }, -} - -var execCommand = cli.Command{ - Name: "exec", - Usage: "prepare a streaming endpoint to execute a command in the container", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the container", - }, - cli.BoolFlag{ - Name: "tty", - Usage: "whether to use tty", - }, - cli.BoolFlag{ - Name: "stdin", - Usage: "whether to stream to stdin", - }, - cli.BoolFlag{ - Name: "url", - Usage: "do not exec command, just prepare streaming endpoint", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - err = Exec(client, context.String("id"), context.Bool("tty"), context.Bool("stdin"), context.Bool("url"), context.Args()) - if err != nil { - return fmt.Errorf("execing command in container failed: %v", err) - } - return nil - }, -} - -type listOptions struct { - // id of the container - id string - // podID of the container - podID string - // state of the container - state string - // quiet is for listing just container IDs - quiet bool - // labels are selectors for the container - labels map[string]string -} - -var listContainersCommand = cli.Command{ - Name: "list", - Usage: "list containers", - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "quiet", - Usage: "list only container IDs", - }, - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "filter by container id", - }, - cli.StringFlag{ - Name: "pod", - Value: "", - Usage: "filter by container pod id", - }, - cli.StringFlag{ - Name: "state", - Value: "", - Usage: "filter by container state", - }, - cli.StringSliceFlag{ - Name: "label", - Usage: "filter by key=value label", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - opts := listOptions{ - id: context.String("id"), - podID: context.String("pod"), - state: context.String("state"), - quiet: context.Bool("quiet"), - labels: make(map[string]string), - } - - for _, l := range context.StringSlice("label") { - pair := strings.Split(l, "=") - if len(pair) != 2 { - return fmt.Errorf("incorrectly specified label: %v", l) - } - opts.labels[pair[0]] = pair[1] - } - - err = ListContainers(client, opts) - if err != nil { - return fmt.Errorf("listing containers failed: %v", err) - } - return nil - }, -} - -// CreateContainer sends a CreateContainerRequest to the server, and parses -// the returned CreateContainerResponse. -func CreateContainer(client pb.RuntimeServiceClient, opts createOptions) error { - config, err := loadContainerConfig(opts.configPath) - if err != nil { - return err - } - - // Override the name by the one specified through CLI - if opts.name != "" { - config.Metadata.Name = opts.name - } - - for k, v := range opts.labels { - config.Labels[k] = v - } - - r, err := client.CreateContainer(context.Background(), &pb.CreateContainerRequest{ - PodSandboxId: opts.podID, - Config: config, - // TODO(runcom): this is missing PodSandboxConfig!!! - // we should/could find a way to retrieve it from the fs and set it here - }) - if err != nil { - return err - } - fmt.Println(r.ContainerId) - return nil -} - -// StartContainer sends a StartContainerRequest to the server, and parses -// the returned StartContainerResponse. -func StartContainer(client pb.RuntimeServiceClient, ID string) error { - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - _, err := client.StartContainer(context.Background(), &pb.StartContainerRequest{ - ContainerId: ID, - }) - if err != nil { - return err - } - fmt.Println(ID) - return nil -} - -// StopContainer sends a StopContainerRequest to the server, and parses -// the returned StopContainerResponse. -func StopContainer(client pb.RuntimeServiceClient, ID string, timeout int64) error { - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - _, err := client.StopContainer(context.Background(), &pb.StopContainerRequest{ - ContainerId: ID, - Timeout: timeout, - }) - if err != nil { - return err - } - fmt.Println(ID) - return nil -} - -// RemoveContainer sends a RemoveContainerRequest to the server, and parses -// the returned RemoveContainerResponse. -func RemoveContainer(client pb.RuntimeServiceClient, ID string) error { - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - _, err := client.RemoveContainer(context.Background(), &pb.RemoveContainerRequest{ - ContainerId: ID, - }) - if err != nil { - return err - } - fmt.Println(ID) - return nil -} - -// ContainerStatus sends a ContainerStatusRequest to the server, and parses -// the returned ContainerStatusResponse. -func ContainerStatus(client pb.RuntimeServiceClient, ID string) error { - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - r, err := client.ContainerStatus(context.Background(), &pb.ContainerStatusRequest{ - ContainerId: ID}) - if err != nil { - return err - } - fmt.Printf("ID: %s\n", r.Status.Id) - if r.Status.Metadata != nil { - if r.Status.Metadata.Name != "" { - fmt.Printf("Name: %s\n", r.Status.Metadata.Name) - } - fmt.Printf("Attempt: %v\n", r.Status.Metadata.Attempt) - } - // TODO(mzylowski): print it prettier - fmt.Printf("Status: %s\n", r.Status.State) - ctm := time.Unix(0, r.Status.CreatedAt) - fmt.Printf("Created: %v\n", ctm) - stm := time.Unix(0, r.Status.StartedAt) - fmt.Printf("Started: %v\n", stm) - ftm := time.Unix(0, r.Status.FinishedAt) - fmt.Printf("Finished: %v\n", ftm) - fmt.Printf("Exit Code: %v\n", r.Status.ExitCode) - fmt.Printf("Reason: %v\n", r.Status.Reason) - if r.Status.Image != nil { - fmt.Printf("Image: %v\n", r.Status.Image.Image) - } - // - // TODO: https://github.com/kubernetes-incubator/cri-o/issues/531 - // - //fmt.Printf("ImageRef: %v\n", r.Status.ImageRef) - - return nil -} - -// ExecSync sends an ExecSyncRequest to the server, and parses -// the returned ExecSyncResponse. -func ExecSync(client pb.RuntimeServiceClient, ID string, cmd []string, timeout int64) error { - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - r, err := client.ExecSync(context.Background(), &pb.ExecSyncRequest{ - ContainerId: ID, - Cmd: cmd, - Timeout: timeout, - }) - if err != nil { - return err - } - fmt.Println("Stdout:") - fmt.Println(string(r.Stdout)) - fmt.Println("Stderr:") - fmt.Println(string(r.Stderr)) - fmt.Printf("Exit code: %v\n", r.ExitCode) - - return nil -} - -// Exec sends an ExecRequest to the server, and parses -// the returned ExecResponse. -func Exec(client pb.RuntimeServiceClient, ID string, tty bool, stdin bool, urlOnly bool, cmd []string) error { - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - r, err := client.Exec(context.Background(), &pb.ExecRequest{ - ContainerId: ID, - Cmd: cmd, - Tty: tty, - Stdin: stdin, - }) - if err != nil { - return err - } - - if urlOnly { - fmt.Println("URL:") - fmt.Println(r.Url) - return nil - } - - execURL, err := url.Parse(r.Url) - if err != nil { - return err - } - - streamExec, err := remotecommand.NewExecutor(&restclient.Config{}, "GET", execURL) - if err != nil { - return err - } - - options := remotecommand.StreamOptions{ - SupportedProtocols: remocommandconsts.SupportedStreamingProtocols, - Stdout: os.Stdout, - Stderr: os.Stderr, - Tty: tty, - } - - if stdin { - options.Stdin = os.Stdin - } - - return streamExec.Stream(options) -} - -// ListContainers sends a ListContainerRequest to the server, and parses -// the returned ListContainerResponse. -func ListContainers(client pb.RuntimeServiceClient, opts listOptions) error { - filter := &pb.ContainerFilter{} - if opts.id != "" { - filter.Id = opts.id - } - if opts.podID != "" { - filter.PodSandboxId = opts.podID - } - if opts.state != "" { - st := &pb.ContainerStateValue{} - st.State = pb.ContainerState_CONTAINER_UNKNOWN - switch opts.state { - case "created": - st.State = pb.ContainerState_CONTAINER_CREATED - filter.State = st - case "running": - st.State = pb.ContainerState_CONTAINER_RUNNING - filter.State = st - case "stopped": - st.State = pb.ContainerState_CONTAINER_EXITED - filter.State = st - default: - log.Fatalf("--state should be one of created, running or stopped") - } - } - if opts.labels != nil { - filter.LabelSelector = opts.labels - } - r, err := client.ListContainers(context.Background(), &pb.ListContainersRequest{ - Filter: filter, - }) - if err != nil { - return err - } - for _, c := range r.GetContainers() { - if opts.quiet { - fmt.Println(c.Id) - continue - } - fmt.Printf("ID: %s\n", c.Id) - fmt.Printf("Pod: %s\n", c.PodSandboxId) - if c.Metadata != nil { - if c.Metadata.Name != "" { - fmt.Printf("Name: %s\n", c.Metadata.Name) - } - fmt.Printf("Attempt: %v\n", c.Metadata.Attempt) - } - fmt.Printf("Status: %s\n", c.State) - if c.Image != nil { - fmt.Printf("Image: %s\n", c.Image.Image) - } - ctm := time.Unix(0, c.CreatedAt) - fmt.Printf("Created: %v\n", ctm) - if c.Labels != nil { - fmt.Println("Labels:") - for _, k := range getSortedKeys(c.Labels) { - fmt.Printf("\t%s -> %s\n", k, c.Labels[k]) - } - } - if c.Annotations != nil { - fmt.Println("Annotations:") - for _, k := range getSortedKeys(c.Annotations) { - fmt.Printf("\t%s -> %s\n", k, c.Annotations[k]) - } - } - fmt.Println() - } - return nil -} diff --git a/cmd/crioctl/image.go b/cmd/crioctl/image.go deleted file mode 100644 index 426c67e9..00000000 --- a/cmd/crioctl/image.go +++ /dev/null @@ -1,173 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/urfave/cli" - "golang.org/x/net/context" - pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" -) - -var imageCommand = cli.Command{ - Name: "image", - Subcommands: []cli.Command{ - pullImageCommand, - listImageCommand, - imageStatusCommand, - removeImageCommand, - }, -} - -var pullImageCommand = cli.Command{ - Name: "pull", - Usage: "pull an image", - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewImageServiceClient(conn) - - _, err = PullImage(client, context.Args().Get(0)) - if err != nil { - return fmt.Errorf("pulling image failed: %v", err) - } - return nil - }, -} - -var listImageCommand = cli.Command{ - Name: "list", - Usage: "list images", - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "quiet", - Usage: "list only image IDs", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewImageServiceClient(conn) - - r, err := ListImages(client, context.Args().Get(0)) - if err != nil { - return fmt.Errorf("listing images failed: %v", err) - } - quiet := context.Bool("quiet") - for _, image := range r.Images { - if quiet { - fmt.Printf("%s\n", image.Id) - continue - } - fmt.Printf("ID: %s\n", image.Id) - for _, tag := range image.RepoTags { - fmt.Printf("Tag: %s\n", tag) - } - for _, digest := range image.RepoDigests { - fmt.Printf("Digest: %s\n", digest) - } - if image.Size_ != 0 { - fmt.Printf("Size: %d\n", image.Size_) - } - } - return nil - }, -} - -var imageStatusCommand = cli.Command{ - Name: "status", - Usage: "return the status of an image", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Usage: "id of the image", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewImageServiceClient(conn) - - r, err := ImageStatus(client, context.String("id")) - if err != nil { - return fmt.Errorf("image status request failed: %v", err) - } - image := r.Image - if image == nil { - return fmt.Errorf("no such image present") - } - fmt.Printf("ID: %s\n", image.Id) - for _, tag := range image.RepoTags { - fmt.Printf("Tag: %s\n", tag) - } - for _, digest := range image.RepoDigests { - fmt.Printf("Digest: %s\n", digest) - } - fmt.Printf("Size: %d\n", image.Size_) - return nil - }, -} -var removeImageCommand = cli.Command{ - Name: "remove", - Usage: "remove an image", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the image", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewImageServiceClient(conn) - - _, err = RemoveImage(client, context.String("id")) - if err != nil { - return fmt.Errorf("removing the image failed: %v", err) - } - return nil - }, -} - -// PullImage sends a PullImageRequest to the server, and parses -// the returned PullImageResponse. -func PullImage(client pb.ImageServiceClient, image string) (*pb.PullImageResponse, error) { - return client.PullImage(context.Background(), &pb.PullImageRequest{Image: &pb.ImageSpec{Image: image}}) -} - -// ListImages sends a ListImagesRequest to the server, and parses -// the returned ListImagesResponse. -func ListImages(client pb.ImageServiceClient, image string) (*pb.ListImagesResponse, error) { - return client.ListImages(context.Background(), &pb.ListImagesRequest{Filter: &pb.ImageFilter{Image: &pb.ImageSpec{Image: image}}}) -} - -// ImageStatus sends an ImageStatusRequest to the server, and parses -// the returned ImageStatusResponse. -func ImageStatus(client pb.ImageServiceClient, image string) (*pb.ImageStatusResponse, error) { - return client.ImageStatus(context.Background(), &pb.ImageStatusRequest{Image: &pb.ImageSpec{Image: image}}) -} - -// RemoveImage sends a RemoveImageRequest to the server, and parses -// the returned RemoveImageResponse. -func RemoveImage(client pb.ImageServiceClient, image string) (*pb.RemoveImageResponse, error) { - if image == "" { - return nil, fmt.Errorf("ID cannot be empty") - } - return client.RemoveImage(context.Background(), &pb.RemoveImageRequest{Image: &pb.ImageSpec{Image: image}}) -} diff --git a/cmd/crioctl/main.go b/cmd/crioctl/main.go deleted file mode 100644 index 247906a9..00000000 --- a/cmd/crioctl/main.go +++ /dev/null @@ -1,112 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "net" - "os" - "strings" - "time" - - "github.com/sirupsen/logrus" - "github.com/urfave/cli" - "google.golang.org/grpc" - pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" -) - -// This is populated by the Makefile from the VERSION file -// in the repository -var version = "" - -// gitCommit is the commit that the binary is being built from. -// It will be populated by the Makefile. -var gitCommit = "" - -func getClientConnection(context *cli.Context) (*grpc.ClientConn, error) { - conn, err := grpc.Dial(context.GlobalString("connect"), grpc.WithInsecure(), grpc.WithTimeout(context.GlobalDuration("timeout")), - grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { - return net.DialTimeout("unix", addr, timeout) - })) - if err != nil { - return nil, fmt.Errorf("failed to connect: %v", err) - } - return conn, nil -} - -func openFile(path string) (*os.File, error) { - f, err := os.Open(path) - if err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("config at %s not found", path) - } - return nil, err - } - return f, nil -} - -func loadPodSandboxConfig(path string) (*pb.PodSandboxConfig, error) { - f, err := openFile(path) - if err != nil { - return nil, err - } - defer f.Close() - - var config pb.PodSandboxConfig - if err := json.NewDecoder(f).Decode(&config); err != nil { - return nil, err - } - return &config, nil -} - -func loadContainerConfig(path string) (*pb.ContainerConfig, error) { - f, err := openFile(path) - if err != nil { - return nil, err - } - defer f.Close() - - var config pb.ContainerConfig - if err := json.NewDecoder(f).Decode(&config); err != nil { - return nil, err - } - return &config, nil -} - -func main() { - app := cli.NewApp() - var v []string - if version != "" { - v = append(v, version) - } - if gitCommit != "" { - v = append(v, fmt.Sprintf("commit: %s", gitCommit)) - } - - app.Name = "crioctl" - app.Usage = "client for crio" - app.Version = strings.Join(v, "\n") - - app.Commands = []cli.Command{ - podSandboxCommand, - containerCommand, - runtimeVersionCommand, - imageCommand, - } - - app.Flags = []cli.Flag{ - cli.StringFlag{ - Name: "connect", - Value: "/var/run/crio.sock", - Usage: "Socket to connect to", - }, - cli.DurationFlag{ - Name: "timeout", - Value: 10 * time.Second, - Usage: "Timeout of connecting to server", - }, - } - - if err := app.Run(os.Args); err != nil { - logrus.Fatal(err) - } -} diff --git a/cmd/crioctl/sandbox.go b/cmd/crioctl/sandbox.go deleted file mode 100644 index e44183be..00000000 --- a/cmd/crioctl/sandbox.go +++ /dev/null @@ -1,386 +0,0 @@ -package main - -import ( - "fmt" - "log" - "sort" - "strings" - "time" - - "github.com/urfave/cli" - "golang.org/x/net/context" - pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" -) - -var podSandboxCommand = cli.Command{ - Name: "pod", - Subcommands: []cli.Command{ - runPodSandboxCommand, - stopPodSandboxCommand, - removePodSandboxCommand, - podSandboxStatusCommand, - listPodSandboxCommand, - }, -} - -var runPodSandboxCommand = cli.Command{ - Name: "run", - Usage: "run a pod", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "config", - Value: "", - Usage: "the path of a pod sandbox config file", - }, - cli.StringFlag{ - Name: "name", - Value: "", - Usage: "the name of the pod sandbox", - }, - cli.StringSliceFlag{ - Name: "label", - Usage: "add key=value labels to the container", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - opts := createOptions{ - configPath: context.String("config"), - name: context.String("name"), - labels: make(map[string]string), - } - - for _, l := range context.StringSlice("label") { - pair := strings.Split(l, "=") - if len(pair) != 2 { - return fmt.Errorf("incorrectly specified label: %v", l) - } - opts.labels[pair[0]] = pair[1] - } - - // Test RuntimeServiceClient.RunPodSandbox - err = RunPodSandbox(client, opts) - if err != nil { - return fmt.Errorf("Creating the pod sandbox failed: %v", err) - } - return nil - }, -} - -var stopPodSandboxCommand = cli.Command{ - Name: "stop", - Usage: "stop a pod sandbox", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the pod sandbox", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - err = StopPodSandbox(client, context.String("id")) - if err != nil { - return fmt.Errorf("stopping the pod sandbox failed: %v", err) - } - return nil - }, -} - -var removePodSandboxCommand = cli.Command{ - Name: "remove", - Usage: "remove a pod sandbox", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the pod sandbox", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - err = RemovePodSandbox(client, context.String("id")) - if err != nil { - return fmt.Errorf("removing the pod sandbox failed: %v", err) - } - return nil - }, -} - -var podSandboxStatusCommand = cli.Command{ - Name: "status", - Usage: "return the status of a pod", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the pod", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - err = PodSandboxStatus(client, context.String("id")) - if err != nil { - return fmt.Errorf("getting the pod sandbox status failed: %v", err) - } - return nil - }, -} - -var listPodSandboxCommand = cli.Command{ - Name: "list", - Usage: "list pod sandboxes", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "filter by pod sandbox id", - }, - cli.StringFlag{ - Name: "state", - Value: "", - Usage: "filter by pod sandbox state", - }, - cli.StringSliceFlag{ - Name: "label", - Usage: "filter by key=value label", - }, - cli.BoolFlag{ - Name: "quiet", - Usage: "list only pod IDs", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - opts := listOptions{ - id: context.String("id"), - state: context.String("state"), - quiet: context.Bool("quiet"), - labels: make(map[string]string), - } - - for _, l := range context.StringSlice("label") { - pair := strings.Split(l, "=") - if len(pair) != 2 { - return fmt.Errorf("incorrectly specified label: %v", l) - } - opts.labels[pair[0]] = pair[1] - } - - err = ListPodSandboxes(client, opts) - if err != nil { - return fmt.Errorf("listing pod sandboxes failed: %v", err) - } - return nil - }, -} - -// RunPodSandbox sends a RunPodSandboxRequest to the server, and parses -// the returned RunPodSandboxResponse. -func RunPodSandbox(client pb.RuntimeServiceClient, opts createOptions) error { - config, err := loadPodSandboxConfig(opts.configPath) - if err != nil { - return err - } - - // Override the name by the one specified through CLI - if opts.name != "" { - config.Metadata.Name = opts.name - } - - for k, v := range opts.labels { - config.Labels[k] = v - } - - r, err := client.RunPodSandbox(context.Background(), &pb.RunPodSandboxRequest{Config: config}) - if err != nil { - return err - } - fmt.Println(r.PodSandboxId) - return nil -} - -// StopPodSandbox sends a StopPodSandboxRequest to the server, and parses -// the returned StopPodSandboxResponse. -func StopPodSandbox(client pb.RuntimeServiceClient, ID string) error { - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - _, err := client.StopPodSandbox(context.Background(), &pb.StopPodSandboxRequest{PodSandboxId: ID}) - if err != nil { - return err - } - fmt.Println(ID) - return nil -} - -// RemovePodSandbox sends a RemovePodSandboxRequest to the server, and parses -// the returned RemovePodSandboxResponse. -func RemovePodSandbox(client pb.RuntimeServiceClient, ID string) error { - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - _, err := client.RemovePodSandbox(context.Background(), &pb.RemovePodSandboxRequest{PodSandboxId: ID}) - if err != nil { - return err - } - fmt.Println(ID) - return nil -} - -// PodSandboxStatus sends a PodSandboxStatusRequest to the server, and parses -// the returned PodSandboxStatusResponse. -func PodSandboxStatus(client pb.RuntimeServiceClient, ID string) error { - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - r, err := client.PodSandboxStatus(context.Background(), &pb.PodSandboxStatusRequest{PodSandboxId: ID}) - if err != nil { - return err - } - fmt.Printf("ID: %s\n", r.Status.Id) - if r.Status.Metadata != nil { - if r.Status.Metadata.Name != "" { - fmt.Printf("Name: %s\n", r.Status.Metadata.Name) - } - if r.Status.Metadata.Uid != "" { - fmt.Printf("UID: %s\n", r.Status.Metadata.Uid) - } - if r.Status.Metadata.Namespace != "" { - fmt.Printf("Namespace: %s\n", r.Status.Metadata.Namespace) - } - fmt.Printf("Attempt: %v\n", r.Status.Metadata.Attempt) - } - fmt.Printf("Status: %s\n", r.Status.State) - ctm := time.Unix(0, r.Status.CreatedAt) - fmt.Printf("Created: %v\n", ctm) - if r.Status.Network != nil { - fmt.Printf("IP Address: %v\n", r.Status.Network.Ip) - } - if r.Status.Labels != nil { - fmt.Println("Labels:") - for _, k := range getSortedKeys(r.Status.Labels) { - fmt.Printf("\t%s -> %s\n", k, r.Status.Labels[k]) - } - } - if r.Status.Annotations != nil { - fmt.Println("Annotations:") - for _, k := range getSortedKeys(r.Status.Annotations) { - fmt.Printf("\t%s -> %s\n", k, r.Status.Annotations[k]) - } - } - return nil -} - -// ListPodSandboxes sends a ListPodSandboxRequest to the server, and parses -// the returned ListPodSandboxResponse. -func ListPodSandboxes(client pb.RuntimeServiceClient, opts listOptions) error { - filter := &pb.PodSandboxFilter{} - if opts.id != "" { - filter.Id = opts.id - } - if opts.state != "" { - st := &pb.PodSandboxStateValue{} - st.State = pb.PodSandboxState_SANDBOX_NOTREADY - switch opts.state { - case "ready": - st.State = pb.PodSandboxState_SANDBOX_READY - filter.State = st - case "notready": - st.State = pb.PodSandboxState_SANDBOX_NOTREADY - filter.State = st - default: - log.Fatalf("--state should be ready or notready") - } - } - if opts.labels != nil { - filter.LabelSelector = opts.labels - } - r, err := client.ListPodSandbox(context.Background(), &pb.ListPodSandboxRequest{ - Filter: filter, - }) - if err != nil { - return err - } - for _, pod := range r.Items { - if opts.quiet { - fmt.Println(pod.Id) - continue - } - fmt.Printf("ID: %s\n", pod.Id) - if pod.Metadata != nil { - if pod.Metadata.Name != "" { - fmt.Printf("Name: %s\n", pod.Metadata.Name) - } - if pod.Metadata.Uid != "" { - fmt.Printf("UID: %s\n", pod.Metadata.Uid) - } - if pod.Metadata.Namespace != "" { - fmt.Printf("Namespace: %s\n", pod.Metadata.Namespace) - } - fmt.Printf("Attempt: %v\n", pod.Metadata.Attempt) - } - fmt.Printf("Status: %s\n", pod.State) - ctm := time.Unix(0, pod.CreatedAt) - fmt.Printf("Created: %v\n", ctm) - if pod.Labels != nil { - fmt.Println("Labels:") - for _, k := range getSortedKeys(pod.Labels) { - fmt.Printf("\t%s -> %s\n", k, pod.Labels[k]) - } - } - if pod.Annotations != nil { - fmt.Println("Annotations:") - for _, k := range getSortedKeys(pod.Annotations) { - fmt.Printf("\t%s -> %s\n", k, pod.Annotations[k]) - } - } - fmt.Println() - } - return nil -} - -func getSortedKeys(m map[string]string) []string { - var keys []string - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - return keys -} diff --git a/cmd/crioctl/system.go b/cmd/crioctl/system.go deleted file mode 100644 index 7e04161c..00000000 --- a/cmd/crioctl/system.go +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/urfave/cli" - "golang.org/x/net/context" - pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" -) - -var runtimeVersionCommand = cli.Command{ - Name: "runtimeversion", - Usage: "get runtime version information", - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - // Test RuntimeServiceClient.Version - version := "v1alpha1" - err = Version(client, version) - if err != nil { - return fmt.Errorf("Getting the runtime version failed: %v", err) - } - return nil - }, -} - -// Version sends a VersionRequest to the server, and parses the returned VersionResponse. -func Version(client pb.RuntimeServiceClient, version string) error { - r, err := client.Version(context.Background(), &pb.VersionRequest{Version: version}) - if err != nil { - return err - } - fmt.Printf("VersionResponse: Version: %s, RuntimeName: %s, RuntimeVersion: %s, RuntimeApiVersion: %s\n", r.Version, r.RuntimeName, r.RuntimeVersion, r.RuntimeApiVersion) - return nil -} diff --git a/cmd/kpod/README.md b/cmd/kpod/README.md deleted file mode 100644 index 7a79e489..00000000 --- a/cmd/kpod/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# kpod - Simple debugging tool for pods and images -kpod is a simple client only tool to help with debugging issues when daemons such as CRI runtime and the kubelet are not responding or -failing. A shared API layer could be created to share code between the daemon and kpod. kpod does not require any daemon running. kpod -utilizes the same underlying components that crio uses i.e. containers/image, container/storage, oci-runtime-tool/generate, runc or -any other OCI compatible runtime. kpod shares state with crio and so has the capability to debug pods/images created by crio. - -## Use cases -1. List pods. -2. Launch simple pods (that require no daemon support). -3. Exec commands in a container in a pod. -4. Launch additional containers in a pod. -5. List images. -6. Remove images not in use. -7. Pull images. -8. Check image size. -9. Report pod disk resource usage. diff --git a/cmd/kpod/common.go b/cmd/kpod/common.go deleted file mode 100644 index 1c1503e2..00000000 --- a/cmd/kpod/common.go +++ /dev/null @@ -1,102 +0,0 @@ -package main - -import ( - "os" - "strings" - - is "github.com/containers/image/storage" - "github.com/containers/storage" - "github.com/fatih/camelcase" - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/kubernetes-incubator/cri-o/libpod" - "github.com/kubernetes-incubator/cri-o/server" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - stores = make(map[storage.Store]struct{}) -) - -func getStore(c *libkpod.Config) (storage.Store, error) { - options := storage.DefaultStoreOptions - options.GraphRoot = c.Root - options.RunRoot = c.RunRoot - options.GraphDriverName = c.Storage - options.GraphDriverOptions = c.StorageOptions - - store, err := storage.GetStore(options) - if err != nil { - return nil, err - } - is.Transport.SetStore(store) - stores[store] = struct{}{} - return store, nil -} - -func getRuntime(c *cli.Context) (*libpod.Runtime, error) { - - config, err := getConfig(c) - if err != nil { - return nil, errors.Wrapf(err, "could not get config") - } - - options := storage.DefaultStoreOptions - options.GraphRoot = config.Root - options.RunRoot = config.RunRoot - options.GraphDriverName = config.Storage - options.GraphDriverOptions = config.StorageOptions - - return libpod.NewRuntime(libpod.WithStorageConfig(options)) -} - -func shutdownStores() { - for store := range stores { - if _, err := store.Shutdown(false); err != nil { - break - } - } -} - -func getConfig(c *cli.Context) (*libkpod.Config, error) { - config := libkpod.DefaultConfig() - var configFile string - if c.GlobalIsSet("config") { - configFile = c.GlobalString("config") - } else if _, err := os.Stat(server.CrioConfigPath); err == nil { - configFile = server.CrioConfigPath - } - // load and merge the configfile from the commandline or use - // the default crio config file - if configFile != "" { - err := config.UpdateFromFile(configFile) - if err != nil { - return config, err - } - } - if c.GlobalIsSet("root") { - config.Root = c.GlobalString("root") - } - if c.GlobalIsSet("runroot") { - config.RunRoot = c.GlobalString("runroot") - } - - if c.GlobalIsSet("storage-driver") { - config.Storage = c.GlobalString("storage-driver") - } - if c.GlobalIsSet("storage-opt") { - opts := c.GlobalStringSlice("storage-opt") - if len(opts) > 0 { - config.StorageOptions = opts - } - } - if c.GlobalIsSet("runtime") { - config.Runtime = c.GlobalString("runtime") - } - return config, nil -} - -func splitCamelCase(src string) string { - entries := camelcase.Split(src) - return strings.Join(entries, " ") -} diff --git a/cmd/kpod/common_test.go b/cmd/kpod/common_test.go deleted file mode 100644 index 711c8c3e..00000000 --- a/cmd/kpod/common_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package main - -import ( - "os/exec" - "os/user" - "testing" - - "flag" - - "github.com/containers/storage" - "github.com/urfave/cli" -) - -func TestGetStore(t *testing.T) { - // Make sure the tests are running as root - failTestIfNotRoot(t) - - set := flag.NewFlagSet("test", 0) - globalSet := flag.NewFlagSet("test", 0) - globalSet.String("root", "", "path to the root directory in which data, including images, is stored") - globalCtx := cli.NewContext(nil, globalSet, nil) - command := cli.Command{Name: "imagesCommand"} - c := cli.NewContext(nil, set, globalCtx) - c.Command = command - - _, err := getStore(c) - if err != nil { - t.Error(err) - } -} - -func failTestIfNotRoot(t *testing.T) { - u, err := user.Current() - if err != nil { - t.Log("Could not determine user. Running without root may cause tests to fail") - } else if u.Uid != "0" { - t.Fatal("tests will fail unless run as root") - } -} - -func getStoreForTests() (storage.Store, error) { - set := flag.NewFlagSet("test", 0) - globalSet := flag.NewFlagSet("test", 0) - globalSet.String("root", "", "path to the root directory in which data, including images, is stored") - globalCtx := cli.NewContext(nil, globalSet, nil) - command := cli.Command{Name: "testCommand"} - c := cli.NewContext(nil, set, globalCtx) - c.Command = command - - return getStore(c) -} - -func pullTestImage(name string) error { - cmd := exec.Command("crioctl", "image", "pull", name) - err := cmd.Run() - if err != nil { - return err - } - return nil -} diff --git a/cmd/kpod/diff.go b/cmd/kpod/diff.go deleted file mode 100644 index fa8c1d56..00000000 --- a/cmd/kpod/diff.go +++ /dev/null @@ -1,128 +0,0 @@ -package main - -import ( - "fmt" - "github.com/containers/storage/pkg/archive" - "github.com/kubernetes-incubator/cri-o/cmd/kpod/formats" - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -type diffJSONOutput struct { - Changed []string `json:"changed,omitempty"` - Added []string `json:"added,omitempty"` - Deleted []string `json:"deleted,omitempty"` -} - -type diffOutputParams struct { - Change archive.ChangeType - Path string -} - -type stdoutStruct struct { - output []diffOutputParams -} - -func (so stdoutStruct) Out() error { - for _, d := range so.output { - fmt.Printf("%s %s\n", d.Change, d.Path) - } - return nil -} - -var ( - diffFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "archive", - Usage: "Save the diff as a tar archive", - Hidden: true, - }, - cli.StringFlag{ - Name: "format", - Usage: "Change the output format.", - }, - } - diffDescription = fmt.Sprint(`Displays changes on a container or image's filesystem. The - container or image will be compared to its parent layer`) - - diffCommand = cli.Command{ - Name: "diff", - Usage: "Inspect changes on container's file systems", - Description: diffDescription, - Flags: diffFlags, - Action: diffCmd, - ArgsUsage: "ID-NAME", - } -) - -func formatJSON(output []diffOutputParams) (diffJSONOutput, error) { - jsonStruct := diffJSONOutput{} - for _, output := range output { - switch output.Change { - case archive.ChangeModify: - jsonStruct.Changed = append(jsonStruct.Changed, output.Path) - case archive.ChangeAdd: - jsonStruct.Added = append(jsonStruct.Added, output.Path) - case archive.ChangeDelete: - jsonStruct.Deleted = append(jsonStruct.Deleted, output.Path) - default: - return jsonStruct, errors.Errorf("output kind %q not recognized", output.Change.String()) - } - } - return jsonStruct, nil -} - -func diffCmd(c *cli.Context) error { - if len(c.Args()) != 1 { - return errors.Errorf("container, layer, or image name must be specified: kpod diff [options [...]] ID-NAME") - } - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "could not get config") - } - - server, err := libkpod.New(config) - if err != nil { - return errors.Wrapf(err, "could not get container server") - } - defer server.Shutdown() - - to := c.Args().Get(0) - changes, err := server.GetDiff("", to) - if err != nil { - return errors.Wrapf(err, "could not get changes for %q", to) - } - - diffOutput := []diffOutputParams{} - outputFormat := c.String("format") - - for _, change := range changes { - - params := diffOutputParams{ - Change: change.Kind, - Path: change.Path, - } - diffOutput = append(diffOutput, params) - } - - var out formats.Writer - - if outputFormat != "" { - switch outputFormat { - case formats.JSONString: - data, err := formatJSON(diffOutput) - if err != nil { - return err - } - out = formats.JSONStruct{Output: data} - default: - return errors.New("only valid format for diff is 'json'") - } - } else { - out = stdoutStruct{output: diffOutput} - } - formats.Writer(out).Out() - - return nil -} diff --git a/cmd/kpod/docker/types.go b/cmd/kpod/docker/types.go deleted file mode 100644 index a7e45655..00000000 --- a/cmd/kpod/docker/types.go +++ /dev/null @@ -1,271 +0,0 @@ -package docker - -// -// Types extracted from Docker -// - -import ( - "time" - - "github.com/containers/image/pkg/strslice" - "github.com/opencontainers/go-digest" -) - -// TypeLayers github.com/docker/docker/image/rootfs.go -const TypeLayers = "layers" - -// V2S2MediaTypeManifest github.com/docker/distribution/manifest/schema2/manifest.go -const V2S2MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" - -// V2S2MediaTypeImageConfig github.com/docker/distribution/manifest/schema2/manifest.go -const V2S2MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json" - -// V2S2MediaTypeLayer github.com/docker/distribution/manifest/schema2/manifest.go -const V2S2MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" - -// V2S2MediaTypeUncompressedLayer github.com/docker/distribution/manifest/schema2/manifest.go -const V2S2MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar" - -// V2S2RootFS describes images root filesystem -// This is currently a placeholder that only supports layers. In the future -// this can be made into an interface that supports different implementations. -// github.com/docker/docker/image/rootfs.go -type V2S2RootFS struct { - Type string `json:"type"` - DiffIDs []digest.Digest `json:"diff_ids,omitempty"` -} - -// V2S2History stores build commands that were used to create an image -// github.com/docker/docker/image/image.go -type V2S2History struct { - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // CreatedBy keeps the Dockerfile command used while building the image - CreatedBy string `json:"created_by,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // EmptyLayer is set to true if this history item did not generate a - // layer. Otherwise, the history item is associated with the next - // layer in the RootFS section. - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// ID is the content-addressable ID of an image. -// github.com/docker/docker/image/image.go -type ID digest.Digest - -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -// github.com/docker/docker/api/types/container/config.go -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// PortSet is a collection of structs indexed by Port -// github.com/docker/go-connections/nat/nat.go -type PortSet map[Port]struct{} - -// Port is a string containing port number and protocol in the format "80/tcp" -// github.com/docker/go-connections/nat/nat.go -type Port string - -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -// github.com/docker/docker/api/types/container/config.go -type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts PortSet `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} - -// V1Compatibility - For non-top-level layers, create fake V1Compatibility -// strings that fit the format and don't collide with anything else, but -// don't result in runnable images on their own. -// github.com/docker/distribution/manifest/schema1/config_builder.go -type V1Compatibility struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig struct { - Cmd []string - } `json:"container_config,omitempty"` - Author string `json:"author,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` -} - -// V1Image stores the V1 image configuration. -// github.com/docker/docker/image/image.go -type V1Image struct { - // ID is a unique 64 character identifier of the image - ID string `json:"id,omitempty"` - // Parent is the ID of the parent image - Parent string `json:"parent,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Container is the id of the container used to commit - Container string `json:"container,omitempty"` - // ContainerConfig is the configuration of the container that is committed into the image - ContainerConfig Config `json:"container_config,omitempty"` - // DockerVersion specifies the version of Docker that was used to build the image - DockerVersion string `json:"docker_version,omitempty"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // Config is the configuration of the container received from the client - Config *Config `json:"config,omitempty"` - // Architecture is the hardware that the image is build and runs on - Architecture string `json:"architecture,omitempty"` - // OS is the operating system used to build and run the image - OS string `json:"os,omitempty"` - // Size is the total size of the image including all layers it is composed of - Size int64 `json:",omitempty"` -} - -// V2Image stores the image configuration -// github.com/docker/docker/image/image.go -type V2Image struct { - V1Image - Parent ID `json:"parent,omitempty"` - RootFS *V2S2RootFS `json:"rootfs,omitempty"` - History []V2S2History `json:"history,omitempty"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - - // rawJSON caches the immutable JSON associated with this image. - //rawJSON []byte - - // computedID is the ID computed from the hash of the image config. - // Not to be confused with the legacy V1 ID in V1Image. - //computedID ID -} - -// V2Versioned provides a struct with the manifest schemaVersion and mediaType. -// Incoming content with unknown schema version can be decoded against this -// struct to check the version. -// github.com/docker/distribution/manifest/versioned.go -type V2Versioned struct { - // SchemaVersion is the image manifest schema that this image follows - SchemaVersion int `json:"schemaVersion"` - - // MediaType is the media type of this schema. - MediaType string `json:"mediaType,omitempty"` -} - -// V2S1FSLayer is a container struct for BlobSums defined in an image manifest -// github.com/docker/distribution/manifest/schema1/manifest.go -type V2S1FSLayer struct { - // BlobSum is the tarsum of the referenced filesystem image layer - BlobSum digest.Digest `json:"blobSum"` -} - -// V2S1History stores unstructured v1 compatibility information -// github.com/docker/distribution/manifest/schema1/manifest.go -type V2S1History struct { - // V1Compatibility is the raw v1 compatibility information - V1Compatibility string `json:"v1Compatibility"` -} - -// V2S1Manifest provides the base accessible fields for working with V2 image -// format in the registry. -// github.com/docker/distribution/manifest/schema1/manifest.go -type V2S1Manifest struct { - V2Versioned - - // Name is the name of the image's repository - Name string `json:"name"` - - // Tag is the tag of the image specified by this manifest - Tag string `json:"tag"` - - // Architecture is the host architecture on which this image is intended to - // run - Architecture string `json:"architecture"` - - // FSLayers is a list of filesystem layer blobSums contained in this image - FSLayers []V2S1FSLayer `json:"fsLayers"` - - // History is a list of unstructured historical data for v1 compatibility - History []V2S1History `json:"history"` -} - -// V2S2Descriptor describes targeted content. Used in conjunction with a blob -// store, a descriptor can be used to fetch, store and target any kind of -// blob. The struct also describes the wire protocol format. Fields should -// only be added but never changed. -// github.com/docker/distribution/blobs.go -type V2S2Descriptor struct { - // MediaType describe the type of the content. All text based formats are - // encoded as utf-8. - MediaType string `json:"mediaType,omitempty"` - - // Size in bytes of content. - Size int64 `json:"size,omitempty"` - - // Digest uniquely identifies the content. A byte stream can be verified - // against against this digest. - Digest digest.Digest `json:"digest,omitempty"` - - // URLs contains the source URLs of this content. - URLs []string `json:"urls,omitempty"` - - // NOTE: Before adding a field here, please ensure that all - // other options have been exhausted. Much of the type relationships - // depend on the simplicity of this type. -} - -// V2S2Manifest defines a schema2 manifest. -// github.com/docker/distribution/manifest/schema2/manifest.go -type V2S2Manifest struct { - V2Versioned - - // Config references the image configuration as a blob. - Config V2S2Descriptor `json:"config"` - - // Layers lists descriptors for the layers referenced by the - // configuration. - Layers []V2S2Descriptor `json:"layers"` -} diff --git a/cmd/kpod/export.go b/cmd/kpod/export.go deleted file mode 100644 index 53eb5080..00000000 --- a/cmd/kpod/export.go +++ /dev/null @@ -1,103 +0,0 @@ -package main - -import ( - "io" - "os" - - "fmt" - - "github.com/containers/storage" - "github.com/containers/storage/pkg/archive" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/urfave/cli" -) - -type exportOptions struct { - output string - container string -} - -var ( - exportFlags = []cli.Flag{ - cli.StringFlag{ - Name: "output, o", - Usage: "Write to a file, default is STDOUT", - Value: "/dev/stdout", - }, - } - exportDescription = "Exports container's filesystem contents as a tar archive" + - " and saves it on the local machine." - exportCommand = cli.Command{ - Name: "export", - Usage: "Export container's filesystem contents as a tar archive", - Description: exportDescription, - Flags: exportFlags, - Action: exportCmd, - ArgsUsage: "CONTAINER", - } -) - -// exportCmd saves a container to a tarball on disk -func exportCmd(c *cli.Context) error { - args := c.Args() - if len(args) == 0 { - return errors.Errorf("container id must be specified") - } - if len(args) > 1 { - return errors.Errorf("too many arguments given, need 1 at most.") - } - container := args[0] - - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "could not get config") - } - store, err := getStore(config) - if err != nil { - return err - } - - output := c.String("output") - if output == "/dev/stdout" { - file := os.Stdout - if logrus.IsTerminal(file) { - return errors.Errorf("refusing to export to terminal. Use -o flag or redirect") - } - } - - opts := exportOptions{ - output: output, - container: container, - } - - return exportContainer(store, opts) -} - -// exportContainer exports the contents of a container and saves it as -// a tarball on disk -func exportContainer(store storage.Store, opts exportOptions) error { - mountPoint, err := store.Mount(opts.container, "") - if err != nil { - return errors.Wrapf(err, "error finding container %q", opts.container) - } - defer func() { - if err := store.Unmount(opts.container); err != nil { - fmt.Printf("error unmounting container %q: %v\n", opts.container, err) - } - }() - - input, err := archive.Tar(mountPoint, archive.Uncompressed) - if err != nil { - return errors.Wrapf(err, "error reading container directory %q", opts.container) - } - - outFile, err := os.Create(opts.output) - if err != nil { - return errors.Wrapf(err, "error creating file %q", opts.output) - } - defer outFile.Close() - - _, err = io.Copy(outFile, input) - return err -} diff --git a/cmd/kpod/formats/formats.go b/cmd/kpod/formats/formats.go deleted file mode 100644 index 007f09c6..00000000 --- a/cmd/kpod/formats/formats.go +++ /dev/null @@ -1,132 +0,0 @@ -package formats - -import ( - "encoding/json" - "fmt" - "os" - "strings" - "text/tabwriter" - "text/template" - - "github.com/ghodss/yaml" - "github.com/pkg/errors" -) - -const ( - // JSONString const to save on duplicate variable names - JSONString = "json" - // IDString const to save on duplicates for Go templates - IDString = "{{.ID}}" -) - -// Writer interface for outputs -type Writer interface { - Out() error -} - -// JSONStructArray for JSON output -type JSONStructArray struct { - Output []interface{} -} - -// StdoutTemplateArray for Go template output -type StdoutTemplateArray struct { - Output []interface{} - Template string - Fields map[string]string -} - -// JSONStruct for JSON output -type JSONStruct struct { - Output interface{} -} - -// StdoutTemplate for Go template output -type StdoutTemplate struct { - Output interface{} - Template string - Fields map[string]string -} - -// YAMLStruct for YAML output -type YAMLStruct struct { - Output interface{} -} - -// Out method for JSON Arrays -func (j JSONStructArray) Out() error { - data, err := json.MarshalIndent(j.Output, "", " ") - if err != nil { - return err - } - fmt.Printf("%s\n", data) - return nil -} - -// Out method for Go templates -func (t StdoutTemplateArray) Out() error { - w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0) - if strings.HasPrefix(t.Template, "table") { - // replace any spaces with tabs in template so that tabwriter can align it - t.Template = strings.Replace(strings.TrimSpace(t.Template[5:]), " ", "\t", -1) - headerTmpl, err := template.New("header").Funcs(headerFunctions).Parse(t.Template) - if err != nil { - return errors.Wrapf(err, "Template parsing error") - } - err = headerTmpl.Execute(w, t.Fields) - if err != nil { - return err - } - fmt.Fprintln(w, "") - } - t.Template = strings.Replace(t.Template, " ", "\t", -1) - tmpl, err := template.New("image").Funcs(basicFunctions).Parse(t.Template) - if err != nil { - return errors.Wrapf(err, "Template parsing error") - } - for _, img := range t.Output { - basicTmpl := tmpl.Funcs(basicFunctions) - err = basicTmpl.Execute(w, img) - if err != nil { - return err - } - fmt.Fprintln(w, "") - } - return w.Flush() -} - -// Out method for JSON struct -func (j JSONStruct) Out() error { - data, err := json.MarshalIndent(j.Output, "", " ") - if err != nil { - return err - } - fmt.Printf("%s\n", data) - return nil -} - -//Out method for Go templates -func (t StdoutTemplate) Out() error { - tmpl, err := template.New("image").Parse(t.Template) - if err != nil { - return errors.Wrapf(err, "template parsing error") - } - err = tmpl.Execute(os.Stdout, t.Output) - if err != nil { - return err - } - fmt.Println() - return nil -} - -// Out method for YAML -func (y YAMLStruct) Out() error { - var buf []byte - var err error - buf, err = yaml.Marshal(y.Output) - if err != nil { - return err - } - fmt.Println(string(buf)) - return nil -} diff --git a/cmd/kpod/history.go b/cmd/kpod/history.go deleted file mode 100644 index f85cd134..00000000 --- a/cmd/kpod/history.go +++ /dev/null @@ -1,272 +0,0 @@ -package main - -import ( - "reflect" - "strconv" - "strings" - "time" - - is "github.com/containers/image/storage" - "github.com/containers/image/types" - "github.com/containers/storage" - units "github.com/docker/go-units" - "github.com/kubernetes-incubator/cri-o/cmd/kpod/formats" - "github.com/kubernetes-incubator/cri-o/libpod/common" - "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -const ( - createdByTruncLength = 45 - idTruncLength = 13 -) - -// historyTemplateParams stores info about each layer -type historyTemplateParams struct { - ID string - Created string - CreatedBy string - Size string - Comment string -} - -// historyJSONParams is only used when the JSON format is specified, -// and is better for data processing from JSON. -// historyJSONParams will be populated by data from v1.History and types.BlobInfo, -// the members of the struct are the sama data types as their sources. -type historyJSONParams struct { - ID string `json:"id"` - Created *time.Time `json:"created"` - CreatedBy string `json:"createdBy"` - Size int64 `json:"size"` - Comment string `json:"comment"` -} - -// historyOptions stores cli flag values -type historyOptions struct { - image string - human bool - noTrunc bool - quiet bool - format string -} - -var ( - historyFlags = []cli.Flag{ - cli.BoolTFlag{ - Name: "human, H", - Usage: "Display sizes and dates in human readable format", - }, - cli.BoolFlag{ - Name: "no-trunc, notruncate", - Usage: "Do not truncate the output", - }, - cli.BoolFlag{ - Name: "quiet, q", - Usage: "Display the numeric IDs only", - }, - cli.StringFlag{ - Name: "format", - Usage: "Change the output to JSON or a Go template", - }, - } - - historyDescription = "Displays the history of an image. The information can be printed out in an easy to read, " + - "or user specified format, and can be truncated." - historyCommand = cli.Command{ - Name: "history", - Usage: "Show history of a specified image", - Description: historyDescription, - Flags: historyFlags, - Action: historyCmd, - ArgsUsage: "", - } -) - -func historyCmd(c *cli.Context) error { - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "Could not get config") - } - store, err := getStore(config) - if err != nil { - return err - } - - format := genHistoryFormat(c.Bool("quiet")) - if c.IsSet("format") { - format = c.String("format") - } - - args := c.Args() - if len(args) == 0 { - return errors.Errorf("an image name must be specified") - } - if len(args) > 1 { - return errors.Errorf("Kpod history takes at most 1 argument") - } - imgName := args[0] - - opts := historyOptions{ - image: imgName, - human: c.BoolT("human"), - noTrunc: c.Bool("no-trunc"), - quiet: c.Bool("quiet"), - format: format, - } - return generateHistoryOutput(store, opts) -} - -func genHistoryFormat(quiet bool) (format string) { - if quiet { - return formats.IDString - } - return "table {{.ID}}\t{{.Created}}\t{{.CreatedBy}}\t{{.Size}}\t{{.Comment}}\t" -} - -// historyToGeneric makes an empty array of interfaces for output -func historyToGeneric(templParams []historyTemplateParams, JSONParams []historyJSONParams) (genericParams []interface{}) { - if len(templParams) > 0 { - for _, v := range templParams { - genericParams = append(genericParams, interface{}(v)) - } - return - } - for _, v := range JSONParams { - genericParams = append(genericParams, interface{}(v)) - } - return -} - -// generate the header based on the template provided -func (h *historyTemplateParams) headerMap() map[string]string { - v := reflect.Indirect(reflect.ValueOf(h)) - values := make(map[string]string) - for h := 0; h < v.NumField(); h++ { - key := v.Type().Field(h).Name - value := key - values[key] = strings.ToUpper(splitCamelCase(value)) - } - return values -} - -// getHistory gets the history of an image and information about its layers -func getHistory(store storage.Store, image string) ([]v1.History, []types.BlobInfo, string, error) { - ref, err := is.Transport.ParseStoreReference(store, image) - if err != nil { - return nil, nil, "", errors.Wrapf(err, "error parsing reference to image %q", image) - } - - img, err := is.Transport.GetStoreImage(store, ref) - if err != nil { - return nil, nil, "", errors.Wrapf(err, "no such image %q", image) - } - - systemContext := common.GetSystemContext("") - - src, err := ref.NewImage(systemContext) - if err != nil { - return nil, nil, "", errors.Wrapf(err, "error instantiating image %q", image) - } - - oci, err := src.OCIConfig() - if err != nil { - return nil, nil, "", err - } - - return oci.History, src.LayerInfos(), img.ID, nil -} - -// getHistorytemplateOutput gets the modified history information to be printed in human readable format -func getHistoryTemplateOutput(history []v1.History, layers []types.BlobInfo, imageID string, opts historyOptions) (historyOutput []historyTemplateParams) { - var ( - outputSize string - createdTime string - createdBy string - count = 1 - ) - for i := len(history) - 1; i >= 0; i-- { - if i != len(history)-1 { - imageID = "" - } - if !opts.noTrunc && i == len(history)-1 { - imageID = imageID[:idTruncLength] - } - - var size int64 - if !history[i].EmptyLayer { - size = layers[len(layers)-count].Size - count++ - } - - if opts.human { - createdTime = units.HumanDuration(time.Since((*history[i].Created))) + " ago" - outputSize = units.HumanSize(float64(size)) - } else { - createdTime = (history[i].Created).Format(time.RFC3339) - outputSize = strconv.FormatInt(size, 10) - } - - createdBy = strings.Join(strings.Fields(history[i].CreatedBy), " ") - if !opts.noTrunc && len(createdBy) > createdByTruncLength { - createdBy = createdBy[:createdByTruncLength-3] + "..." - } - - params := historyTemplateParams{ - ID: imageID, - Created: createdTime, - CreatedBy: createdBy, - Size: outputSize, - Comment: history[i].Comment, - } - historyOutput = append(historyOutput, params) - } - return -} - -// getHistoryJSONOutput returns the history information in its raw form -func getHistoryJSONOutput(history []v1.History, layers []types.BlobInfo, imageID string) (historyOutput []historyJSONParams) { - count := 1 - for i := len(history) - 1; i >= 0; i-- { - var size int64 - if !history[i].EmptyLayer { - size = layers[len(layers)-count].Size - count++ - } - - params := historyJSONParams{ - ID: imageID, - Created: history[i].Created, - CreatedBy: history[i].CreatedBy, - Size: size, - Comment: history[i].Comment, - } - historyOutput = append(historyOutput, params) - } - return -} - -// generateHistoryOutput generates the history based on the format given -func generateHistoryOutput(store storage.Store, opts historyOptions) error { - history, layers, imageID, err := getHistory(store, opts.image) - if err != nil { - return errors.Wrapf(err, "error getting history of image %q", opts.image) - } - if len(history) == 0 { - return nil - } - - var out formats.Writer - - switch opts.format { - case formats.JSONString: - historyOutput := getHistoryJSONOutput(history, layers, imageID) - out = formats.JSONStructArray{Output: historyToGeneric([]historyTemplateParams{}, historyOutput)} - default: - historyOutput := getHistoryTemplateOutput(history, layers, imageID, opts) - out = formats.StdoutTemplateArray{Output: historyToGeneric(historyOutput, []historyJSONParams{}), Template: opts.format, Fields: historyOutput[0].headerMap()} - } - - return formats.Writer(out).Out() -} diff --git a/cmd/kpod/images.go b/cmd/kpod/images.go deleted file mode 100644 index e1eb7660..00000000 --- a/cmd/kpod/images.go +++ /dev/null @@ -1,206 +0,0 @@ -package main - -import ( - "reflect" - "strings" - - "github.com/containers/storage" - "github.com/kubernetes-incubator/cri-o/cmd/kpod/formats" - libpod "github.com/kubernetes-incubator/cri-o/libpod/images" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - imagesFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "quiet, q", - Usage: "display only image IDs", - }, - cli.BoolFlag{ - Name: "noheading, n", - Usage: "do not print column headings", - }, - cli.BoolFlag{ - Name: "no-trunc, notruncate", - Usage: "do not truncate output", - }, - cli.BoolFlag{ - Name: "digests", - Usage: "show digests", - }, - cli.StringFlag{ - Name: "format", - Usage: "Change the output format to JSON or a Go template", - }, - cli.StringFlag{ - Name: "filter, f", - Usage: "filter output based on conditions provided (default [])", - }, - } - - imagesDescription = "lists locally stored images." - imagesCommand = cli.Command{ - Name: "images", - Usage: "list images in local storage", - Description: imagesDescription, - Flags: imagesFlags, - Action: imagesCmd, - ArgsUsage: "", - } -) - -func imagesCmd(c *cli.Context) error { - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "Could not get config") - } - store, err := getStore(config) - if err != nil { - return err - } - - quiet := false - if c.IsSet("quiet") { - quiet = c.Bool("quiet") - } - noheading := false - if c.IsSet("noheading") { - noheading = c.Bool("noheading") - } - truncate := true - if c.IsSet("no-trunc") { - truncate = !c.Bool("no-trunc") - } - digests := false - if c.IsSet("digests") { - digests = c.Bool("digests") - } - outputFormat := genImagesFormat(quiet, truncate, digests) - if c.IsSet("format") { - outputFormat = c.String("format") - } - - name := "" - if len(c.Args()) == 1 { - name = c.Args().Get(0) - } else if len(c.Args()) > 1 { - return errors.New("'kpod images' requires at most 1 argument") - } - - var params *libpod.FilterParams - if c.IsSet("filter") { - params, err = libpod.ParseFilter(store, c.String("filter")) - if err != nil { - return errors.Wrapf(err, "error parsing filter") - } - } else { - params = nil - } - - imageList, err := libpod.GetImagesMatchingFilter(store, params, name) - if err != nil { - return errors.Wrapf(err, "could not get list of images matching filter") - } - - return outputImages(store, imageList, truncate, digests, quiet, outputFormat, noheading) -} - -func genImagesFormat(quiet, truncate, digests bool) (format string) { - if quiet { - return formats.IDString - } - if truncate { - format = "table {{ .ID | printf \"%-20.12s\" }} " - } else { - format = "table {{ .ID | printf \"%-64s\" }} " - } - format += "{{ .Name | printf \"%-56s\" }} " - - if digests { - format += "{{ .Digest | printf \"%-71s \"}} " - } - - format += "{{ .CreatedAt | printf \"%-22s\" }} {{.Size}}" - return -} - -func outputImages(store storage.Store, images []storage.Image, truncate, digests, quiet bool, outputFormat string, noheading bool) error { - imageOutput := []imageOutputParams{} - - lastID := "" - for _, img := range images { - if quiet && lastID == img.ID { - continue // quiet should not show the same ID multiple times - } - createdTime := img.Created - - names := []string{""} - if len(img.Names) > 0 { - names = img.Names - } - - info, imageDigest, size, _ := libpod.InfoAndDigestAndSize(store, img) - if info != nil { - createdTime = info.Created - } - - params := imageOutputParams{ - ID: img.ID, - Name: names, - Digest: imageDigest, - CreatedAt: createdTime.Format("Jan 2, 2006 15:04"), - Size: libpod.FormattedSize(float64(size)), - } - imageOutput = append(imageOutput, params) - } - - var out formats.Writer - - switch outputFormat { - case formats.JSONString: - out = formats.JSONStructArray{Output: toGeneric(imageOutput)} - default: - if len(imageOutput) == 0 { - out = formats.StdoutTemplateArray{} - } else { - out = formats.StdoutTemplateArray{Output: toGeneric(imageOutput), Template: outputFormat, Fields: imageOutput[0].headerMap()} - } - } - - formats.Writer(out).Out() - - return nil -} - -type imageOutputParams struct { - ID string `json:"id"` - Name []string `json:"names"` - Digest digest.Digest `json:"digest"` - CreatedAt string `json:"created"` - Size string `json:"size"` -} - -func toGeneric(params []imageOutputParams) []interface{} { - genericParams := make([]interface{}, len(params)) - for i, v := range params { - genericParams[i] = interface{}(v) - } - return genericParams -} - -func (i *imageOutputParams) headerMap() map[string]string { - v := reflect.Indirect(reflect.ValueOf(i)) - values := make(map[string]string) - - for i := 0; i < v.NumField(); i++ { - key := v.Type().Field(i).Name - value := key - if value == "ID" || value == "Name" { - value = "Image" + value - } - values[key] = strings.ToUpper(splitCamelCase(value)) - } - return values -} diff --git a/cmd/kpod/info.go b/cmd/kpod/info.go deleted file mode 100644 index d641a7e0..00000000 --- a/cmd/kpod/info.go +++ /dev/null @@ -1,195 +0,0 @@ -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "runtime" - - "github.com/docker/docker/pkg/system" - "github.com/kubernetes-incubator/cri-o/cmd/kpod/formats" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - infoDescription = "display system information" - infoCommand = cli.Command{ - Name: "info", - Usage: infoDescription, - Description: `Information display here pertain to the host, current storage stats, and build of kpod. Useful for the user and when reporting issues.`, - Flags: infoFlags, - Action: infoCmd, - ArgsUsage: "", - } - infoFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "debug, D", - Usage: "display additional debug information", - }, - cli.StringFlag{ - Name: "format", - Usage: "Change the output format to JSON or a Go template", - }, - } -) - -func infoCmd(c *cli.Context) error { - info := map[string]interface{}{} - - infoGivers := []infoGiverFunc{ - storeInfo, - hostInfo, - } - - if c.Bool("debug") { - infoGivers = append(infoGivers, debugInfo) - } - - for _, giver := range infoGivers { - thisName, thisInfo, err := giver(c) - if err != nil { - info[thisName] = infoErr(err) - continue - } - info[thisName] = thisInfo - } - - var out formats.Writer - infoOutputFormat := c.String("format") - switch infoOutputFormat { - case formats.JSONString: - out = formats.JSONStruct{Output: info} - case "": - out = formats.YAMLStruct{Output: info} - default: - out = formats.StdoutTemplate{Output: info, Template: infoOutputFormat} - } - - formats.Writer(out).Out() - - return nil -} - -func infoErr(err error) map[string]interface{} { - return map[string]interface{}{ - "error": err.Error(), - } -} - -type infoGiverFunc func(c *cli.Context) (name string, info map[string]interface{}, err error) - -// top-level "debug" info -func debugInfo(c *cli.Context) (string, map[string]interface{}, error) { - info := map[string]interface{}{} - info["compiler"] = runtime.Compiler - info["go version"] = runtime.Version() - return "debug", info, nil -} - -// top-level "host" info -func hostInfo(c *cli.Context) (string, map[string]interface{}, error) { - // lets say OS, arch, number of cpus, amount of memory, maybe os distribution/version, hostname, kernel version, uptime - info := map[string]interface{}{} - info["os"] = runtime.GOOS - info["arch"] = runtime.GOARCH - info["cpus"] = runtime.NumCPU() - mi, err := system.ReadMemInfo() - if err != nil { - info["meminfo"] = infoErr(err) - } else { - // TODO this might be a place for github.com/dustin/go-humanize - info["MemTotal"] = mi.MemTotal - info["MemFree"] = mi.MemFree - info["SwapTotal"] = mi.SwapTotal - info["SwapFree"] = mi.SwapFree - } - if kv, err := readKernelVersion(); err != nil { - info["kernel"] = infoErr(err) - } else { - info["kernel"] = kv - } - - if up, err := readUptime(); err != nil { - info["uptime"] = infoErr(err) - } else { - info["uptime"] = up - } - if host, err := os.Hostname(); err != nil { - info["hostname"] = infoErr(err) - } else { - info["hostname"] = host - } - return "host", info, nil -} - -// top-level "store" info -func storeInfo(c *cli.Context) (string, map[string]interface{}, error) { - storeStr := "store" - config, err := getConfig(c) - if err != nil { - return storeStr, nil, errors.Wrapf(err, "Could not get config") - } - store, err := getStore(config) - if err != nil { - return storeStr, nil, err - } - - // lets say storage driver in use, number of images, number of containers - info := map[string]interface{}{} - info["GraphRoot"] = store.GraphRoot() - info["RunRoot"] = store.RunRoot() - info["GraphDriverName"] = store.GraphDriverName() - info["GraphOptions"] = store.GraphOptions() - statusPairs, err := store.Status() - if err != nil { - return storeStr, nil, err - } - status := map[string]string{} - for _, pair := range statusPairs { - status[pair[0]] = pair[1] - } - info["GraphStatus"] = status - images, err := store.Images() - if err != nil { - info["ImageStore"] = infoErr(err) - } else { - info["ImageStore"] = map[string]interface{}{ - "number": len(images), - } - } - containers, err := store.Containers() - if err != nil { - info["ContainerStore"] = infoErr(err) - } else { - info["ContainerStore"] = map[string]interface{}{ - "number": len(containers), - } - } - return storeStr, info, nil -} - -func readKernelVersion() (string, error) { - buf, err := ioutil.ReadFile("/proc/version") - if err != nil { - return "", err - } - f := bytes.Fields(buf) - if len(f) < 2 { - return string(bytes.TrimSpace(buf)), nil - } - return string(f[2]), nil -} - -func readUptime() (string, error) { - buf, err := ioutil.ReadFile("/proc/uptime") - if err != nil { - return "", err - } - f := bytes.Fields(buf) - if len(f) < 1 { - return "", fmt.Errorf("invalid uptime") - } - return string(f[0]), nil -} diff --git a/cmd/kpod/inspect.go b/cmd/kpod/inspect.go deleted file mode 100644 index d3411d3d..00000000 --- a/cmd/kpod/inspect.go +++ /dev/null @@ -1,117 +0,0 @@ -package main - -import ( - "github.com/kubernetes-incubator/cri-o/cmd/kpod/formats" - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/kubernetes-incubator/cri-o/libpod/images" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -const ( - inspectTypeContainer = "container" - inspectTypeImage = "image" - inspectAll = "all" -) - -var ( - inspectFlags = []cli.Flag{ - cli.StringFlag{ - Name: "type, t", - Value: inspectAll, - Usage: "Return JSON for specified type, (e.g image, container or task)", - }, - cli.StringFlag{ - Name: "format, f", - Usage: "Change the output format to a Go template", - }, - cli.BoolFlag{ - Name: "size", - Usage: "Display total file size if the type is container", - }, - } - inspectDescription = "This displays the low-level information on containers and images identified by name or ID. By default, this will render all results in a JSON array. If the container and image have the same name, this will return container JSON for unspecified type." - inspectCommand = cli.Command{ - Name: "inspect", - Usage: "Displays the configuration of a container or image", - Description: inspectDescription, - Flags: inspectFlags, - Action: inspectCmd, - ArgsUsage: "CONTAINER-OR-IMAGE", - } -) - -func inspectCmd(c *cli.Context) error { - args := c.Args() - if len(args) == 0 { - return errors.Errorf("container or image name must be specified: kpod inspect [options [...]] name") - } - if len(args) > 1 { - return errors.Errorf("too many arguments specified") - } - - itemType := c.String("type") - size := c.Bool("size") - - switch itemType { - case inspectTypeContainer: - case inspectTypeImage: - case inspectAll: - default: - return errors.Errorf("the only recognized types are %q, %q, and %q", inspectTypeContainer, inspectTypeImage, inspectAll) - } - - name := args[0] - - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "Could not get config") - } - server, err := libkpod.New(config) - if err != nil { - return errors.Wrapf(err, "could not get container server") - } - defer server.Shutdown() - if err = server.Update(); err != nil { - return errors.Wrapf(err, "could not update list of containers") - } - - outputFormat := c.String("format") - var data interface{} - switch itemType { - case inspectTypeContainer: - data, err = server.GetContainerData(name, size) - if err != nil { - return errors.Wrapf(err, "error parsing container data") - } - case inspectTypeImage: - data, err = images.GetData(server.Store(), name) - if err != nil { - return errors.Wrapf(err, "error parsing image data") - } - case inspectAll: - ctrData, err := server.GetContainerData(name, size) - if err != nil { - imgData, err := images.GetData(server.Store(), name) - if err != nil { - return errors.Wrapf(err, "error parsing container or image data") - } - data = imgData - - } else { - data = ctrData - } - } - - var out formats.Writer - if outputFormat != "" && outputFormat != formats.JSONString { - //template - out = formats.StdoutTemplate{Output: data, Template: outputFormat} - } else { - // default is json output - out = formats.JSONStruct{Output: data} - } - - formats.Writer(out).Out() - return nil -} diff --git a/cmd/kpod/load.go b/cmd/kpod/load.go deleted file mode 100644 index f4b75941..00000000 --- a/cmd/kpod/load.go +++ /dev/null @@ -1,109 +0,0 @@ -package main - -import ( - "io" - "os" - - "io/ioutil" - - "github.com/containers/storage" - "github.com/kubernetes-incubator/cri-o/libpod/common" - "github.com/kubernetes-incubator/cri-o/libpod/images" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -type loadOptions struct { - input string - quiet bool -} - -var ( - loadFlags = []cli.Flag{ - cli.StringFlag{ - Name: "input, i", - Usage: "Read from archive file, default is STDIN", - Value: "/dev/stdin", - }, - cli.BoolFlag{ - Name: "quiet, q", - Usage: "Suppress the output", - }, - } - loadDescription = "Loads the image from docker-archive stored on the local machine." - loadCommand = cli.Command{ - Name: "load", - Usage: "load an image from docker archive", - Description: loadDescription, - Flags: loadFlags, - Action: loadCmd, - ArgsUsage: "", - } -) - -// loadCmd gets the image/file to be loaded from the command line -// and calls loadImage to load the image to containers-storage -func loadCmd(c *cli.Context) error { - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "could not get config") - } - store, err := getStore(config) - if err != nil { - return err - } - - args := c.Args() - if len(args) > 0 { - return errors.New("too many arguments. Requires exactly 1") - } - - input := c.String("input") - quiet := c.Bool("quiet") - - if input == "/dev/stdin" { - fi, err := os.Stdin.Stat() - if err != nil { - return err - } - // checking if loading from pipe - if !fi.Mode().IsRegular() { - outFile, err := ioutil.TempFile("/var/tmp", "kpod") - if err != nil { - return errors.Errorf("error creating file %v", err) - } - defer outFile.Close() - defer os.Remove(outFile.Name()) - - inFile, err := os.OpenFile(input, 0, 0666) - if err != nil { - return errors.Errorf("error reading file %v", err) - } - defer inFile.Close() - - _, err = io.Copy(outFile, inFile) - if err != nil { - return errors.Errorf("error copying file %v", err) - } - - input = outFile.Name() - } - } - - opts := loadOptions{ - input: input, - quiet: quiet, - } - - return loadImage(store, opts) -} - -// loadImage loads the image from docker-archive or oci to containers-storage -// using the pullImage function -func loadImage(store storage.Store, opts loadOptions) error { - systemContext := common.GetSystemContext("") - - src := dockerArchive + opts.input - - return images.PullImage(store, src, false, opts.quiet, systemContext) -} diff --git a/cmd/kpod/logs.go b/cmd/kpod/logs.go deleted file mode 100644 index 0f5fd8fa..00000000 --- a/cmd/kpod/logs.go +++ /dev/null @@ -1,89 +0,0 @@ -package main - -import ( - "fmt" - "time" - - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - logsFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "details", - Usage: "Show extra details provided to the logs", - Hidden: true, - }, - cli.BoolFlag{ - Name: "follow, f", - Usage: "Follow log output. The default is false", - }, - cli.StringFlag{ - Name: "since", - Usage: "Show logs since TIMESTAMP", - }, - cli.Uint64Flag{ - Name: "tail", - Usage: "Output the specified number of LINES at the end of the logs. Defaults to 0, which prints all lines", - }, - } - logsDescription = "The kpod logs command batch-retrieves whatever logs are present for a container at the time of execution. This does not guarantee execution" + - "order when combined with kpod run (i.e. your run may not have generated any logs at the time you execute kpod logs" - logsCommand = cli.Command{ - Name: "logs", - Usage: "Fetch the logs of a container", - Description: logsDescription, - Flags: logsFlags, - Action: logsCmd, - ArgsUsage: "CONTAINER", - } -) - -func logsCmd(c *cli.Context) error { - args := c.Args() - if len(args) != 1 { - return errors.Errorf("'kpod logs' requires exactly one container name/ID") - } - container := c.Args().First() - var opts libkpod.LogOptions - opts.Details = c.Bool("details") - opts.Follow = c.Bool("follow") - opts.SinceTime = time.Time{} - if c.IsSet("since") { - // parse time, error out if something is wrong - since, err := time.Parse("2006-01-02T15:04:05.999999999-07:00", c.String("since")) - if err != nil { - return errors.Wrapf(err, "could not parse time: %q", c.String("since")) - } - opts.SinceTime = since - } - opts.Tail = c.Uint64("tail") - - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "could not get config") - } - server, err := libkpod.New(config) - if err != nil { - return errors.Wrapf(err, "could not create container server") - } - defer server.Shutdown() - err = server.Update() - if err != nil { - return errors.Wrapf(err, "could not update list of containers") - } - logs := make(chan string) - go func() { - err = server.GetLogs(container, logs, opts) - }() - printLogs(logs) - return err -} - -func printLogs(logs chan string) { - for line := range logs { - fmt.Println(line) - } -} diff --git a/cmd/kpod/main.go b/cmd/kpod/main.go deleted file mode 100644 index 2a51aa28..00000000 --- a/cmd/kpod/main.go +++ /dev/null @@ -1,94 +0,0 @@ -package main - -import ( - "os" - - "github.com/containers/storage/pkg/reexec" - "github.com/sirupsen/logrus" - "github.com/urfave/cli" -) - -func main() { - if reexec.Init() { - return - } - - app := cli.NewApp() - app.Name = "kpod" - app.Usage = "manage pods and images" - app.Version = "0.0.1" - - app.Commands = []cli.Command{ - diffCommand, - exportCommand, - historyCommand, - imagesCommand, - infoCommand, - inspectCommand, - loadCommand, - logsCommand, - mountCommand, - psCommand, - pullCommand, - pushCommand, - renameCommand, - rmCommand, - rmiCommand, - saveCommand, - statsCommand, - tagCommand, - umountCommand, - versionCommand, - } - app.Before = func(c *cli.Context) error { - logrus.SetLevel(logrus.ErrorLevel) - if c.GlobalBool("debug") { - logrus.SetLevel(logrus.DebugLevel) - } - return nil - } - app.After = func(*cli.Context) error { - // called by Run() when the command handler succeeds - shutdownStores() - return nil - } - cli.OsExiter = func(code int) { - // called by Run() when the command fails, bypassing After() - shutdownStores() - os.Exit(code) - } - app.Flags = []cli.Flag{ - cli.StringFlag{ - Name: "config, c", - Usage: "path of a config file detailing container server configuration options", - }, - cli.BoolFlag{ - Name: "debug", - Usage: "print debugging information", - }, - cli.StringFlag{ - Name: "root", - Usage: "path to the root directory in which data, including images, is stored", - }, - cli.StringFlag{ - Name: "runroot", - Usage: "path to the 'run directory' where all state information is stored", - }, - cli.StringFlag{ - Name: "runtime", - Usage: "path to the OCI-compatible binary used to run containers, default is /usr/bin/runc", - }, - cli.StringFlag{ - Name: "storage-driver, s", - Usage: "select which storage driver is used to manage storage of images and containers (default is overlay2)", - }, - cli.StringSliceFlag{ - Name: "storage-opt", - Usage: "used to pass an option to the storage driver", - }, - } - if err := app.Run(os.Args); err != nil { - logrus.Errorf(err.Error()) - cli.OsExiter(1) - } -} diff --git a/cmd/kpod/mount.go b/cmd/kpod/mount.go deleted file mode 100644 index 7b46ff14..00000000 --- a/cmd/kpod/mount.go +++ /dev/null @@ -1,118 +0,0 @@ -package main - -import ( - js "encoding/json" - "fmt" - - of "github.com/kubernetes-incubator/cri-o/cmd/kpod/formats" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - mountDescription = ` - kpod mount - Lists all mounted containers mount points - - kpod mount CONTAINER-NAME-OR-ID - Mounts the specified container and outputs the mountpoint -` - - mountFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "notruncate", - Usage: "do not truncate output", - }, - cli.StringFlag{ - Name: "label", - Usage: "SELinux label for the mount point", - }, - cli.StringFlag{ - Name: "format", - Usage: "Change the output format to Go template", - }, - } - mountCommand = cli.Command{ - Name: "mount", - Usage: "Mount a working container's root filesystem", - Description: mountDescription, - Action: mountCmd, - ArgsUsage: "[CONTAINER-NAME-OR-ID]", - Flags: mountFlags, - } -) - -// MountOutputParams stores info about each layer -type jsonMountPoint struct { - ID string `json:"id"` - Names []string `json:"names"` - MountPoint string `json:"mountpoint"` -} - -func mountCmd(c *cli.Context) error { - formats := map[string]bool{ - "": true, - of.JSONString: true, - } - - args := c.Args() - json := c.String("format") == of.JSONString - if !formats[c.String("format")] { - return errors.Errorf("%q is not a supported format", c.String("format")) - } - - if len(args) > 1 { - return errors.Errorf("too many arguments specified") - } - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "Could not get config") - } - store, err := getStore(config) - if err != nil { - return errors.Wrapf(err, "error getting store") - } - if len(args) == 1 { - if json { - return errors.Wrapf(err, "json option can not be used with a container id") - } - mountPoint, err := store.Mount(args[0], c.String("label")) - if err != nil { - return errors.Wrapf(err, "error finding container %q", args[0]) - } - fmt.Printf("%s\n", mountPoint) - } else { - jsonMountPoints := []jsonMountPoint{} - containers, err2 := store.Containers() - if err2 != nil { - return errors.Wrapf(err2, "error reading list of all containers") - } - for _, container := range containers { - layer, err := store.Layer(container.LayerID) - if err != nil { - return errors.Wrapf(err, "error finding layer %q for container %q", container.LayerID, container.ID) - } - if layer.MountPoint == "" { - continue - } - if json { - jsonMountPoints = append(jsonMountPoints, jsonMountPoint{ID: container.ID, Names: container.Names, MountPoint: layer.MountPoint}) - continue - } - - if c.Bool("notruncate") { - fmt.Printf("%-64s %s\n", container.ID, layer.MountPoint) - } else { - fmt.Printf("%-12.12s %s\n", container.ID, layer.MountPoint) - } - } - if json { - data, err := js.MarshalIndent(jsonMountPoints, "", " ") - if err != nil { - return err - } - fmt.Printf("%s\n", data) - } - } - return nil -} diff --git a/cmd/kpod/ps.go b/cmd/kpod/ps.go deleted file mode 100644 index b28a4aa6..00000000 --- a/cmd/kpod/ps.go +++ /dev/null @@ -1,582 +0,0 @@ -package main - -import ( - "reflect" - "regexp" - "strconv" - "strings" - "time" - - "github.com/docker/go-units" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/sirupsen/logrus" - - "k8s.io/apimachinery/pkg/fields" - - "github.com/kubernetes-incubator/cri-o/cmd/kpod/formats" - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/kubernetes-incubator/cri-o/oci" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -type psOptions struct { - all bool - filter string - format string - last int - latest bool - noTrunc bool - quiet bool - size bool - label string -} - -type psTemplateParams struct { - ID string - Image string - Command string - CreatedAt string - RunningFor string - Status string - Ports string - Size string - Names string - Labels string - Mounts string -} - -// psJSONParams is only used when the JSON format is specified, -// and is better for data processing from JSON. -// psJSONParams will be populated by data from libkpod.ContainerData, -// the members of the struct are the sama data types as their sources. -type psJSONParams struct { - ID string `json:"id"` - Image string `json:"image"` - ImageID string `json:"image_id"` - Command string `json:"command"` - CreatedAt time.Time `json:"createdAt"` - RunningFor time.Duration `json:"runningFor"` - Status string `json:"status"` - Ports map[string]struct{} `json:"ports"` - Size uint `json:"size"` - Names string `json:"names"` - Labels fields.Set `json:"labels"` - Mounts []specs.Mount `json:"mounts"` - ContainerRunning bool `json:"ctrRunning"` -} - -const runningState = "running" - -var ( - psFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "all, a", - Usage: "Show all the containers, default is only running containers", - }, - cli.StringFlag{ - Name: "filter, f", - Usage: "Filter output based on conditions given", - }, - cli.StringFlag{ - Name: "format", - Usage: "Pretty-print containers to JSON or using a Go template", - }, - cli.IntFlag{ - Name: "last, n", - Usage: "Print the n last created containers (all states)", - Value: -1, - }, - cli.BoolFlag{ - Name: "latest, l", - Usage: "Show the latest container created (all states)", - }, - cli.BoolFlag{ - Name: "no-trunc", - Usage: "Display the extended information", - }, - cli.BoolFlag{ - Name: "quiet, q", - Usage: "Print the numeric IDs of the containers only", - }, - cli.BoolFlag{ - Name: "size, s", - Usage: "Display the total file sizes", - }, - } - psDescription = "Prints out information about the containers" - psCommand = cli.Command{ - Name: "ps", - Usage: "List containers", - Description: psDescription, - Flags: psFlags, - Action: psCmd, - ArgsUsage: "", - } -) - -func psCmd(c *cli.Context) error { - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "could not get config") - } - server, err := libkpod.New(config) - if err != nil { - return errors.Wrapf(err, "error creating server") - } - if err := server.Update(); err != nil { - return errors.Wrapf(err, "error updating list of containers") - } - - if len(c.Args()) > 0 { - return errors.Errorf("too many arguments, ps takes no arguments") - } - - format := genPsFormat(c.Bool("quiet"), c.Bool("size")) - if c.IsSet("format") { - format = c.String("format") - } - - opts := psOptions{ - all: c.Bool("all"), - filter: c.String("filter"), - format: format, - last: c.Int("last"), - latest: c.Bool("latest"), - noTrunc: c.Bool("no-trunc"), - quiet: c.Bool("quiet"), - size: c.Bool("size"), - } - - // all, latest, and last are mutually exclusive. Only one flag can be used at a time - exclusiveOpts := 0 - if opts.last >= 0 { - exclusiveOpts++ - } - if opts.latest { - exclusiveOpts++ - } - if opts.all { - exclusiveOpts++ - } - if exclusiveOpts > 1 { - return errors.Errorf("Last, latest and all are mutually exclusive") - } - - containers, err := server.ListContainers() - if err != nil { - return errors.Wrapf(err, "error getting containers from server") - } - var params *FilterParamsPS - if opts.filter != "" { - params, err = parseFilter(opts.filter, containers) - if err != nil { - return errors.Wrapf(err, "error parsing filter") - } - } else { - params = nil - } - - containerList := getContainersMatchingFilter(containers, params, server) - - return generatePsOutput(containerList, server, opts) -} - -// generate the template based on conditions given -func genPsFormat(quiet, size bool) (format string) { - if quiet { - return formats.IDString - } - format = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}\t{{.Ports}}\t{{.Names}}\t" - if size { - format += "{{.Size}}\t" - } - return -} - -func psToGeneric(templParams []psTemplateParams, JSONParams []psJSONParams) (genericParams []interface{}) { - if len(templParams) > 0 { - for _, v := range templParams { - genericParams = append(genericParams, interface{}(v)) - } - return - } - for _, v := range JSONParams { - genericParams = append(genericParams, interface{}(v)) - } - return -} - -// generate the accurate header based on template given -func (p *psTemplateParams) headerMap() map[string]string { - v := reflect.Indirect(reflect.ValueOf(p)) - values := make(map[string]string) - - for i := 0; i < v.NumField(); i++ { - key := v.Type().Field(i).Name - value := key - if value == "ID" { - value = "Container" + value - } - values[key] = strings.ToUpper(splitCamelCase(value)) - } - return values -} - -// getContainers gets the containers that match the flags given -func getContainers(containers []*libkpod.ContainerData, opts psOptions) []*libkpod.ContainerData { - var containersOutput []*libkpod.ContainerData - if opts.last >= 0 && opts.last < len(containers) { - for i := 0; i < opts.last; i++ { - containersOutput = append(containersOutput, containers[i]) - } - return containersOutput - } - if opts.latest { - return []*libkpod.ContainerData{containers[0]} - } - if opts.all || opts.last >= len(containers) { - return containers - } - for _, ctr := range containers { - if ctr.State.Status == runningState { - containersOutput = append(containersOutput, ctr) - } - } - return containersOutput -} - -// getTemplateOutput returns the modified container information -func getTemplateOutput(containers []*libkpod.ContainerData, opts psOptions) (psOutput []psTemplateParams) { - var status string - for _, ctr := range containers { - ctrID := ctr.ID - runningFor := units.HumanDuration(time.Since(ctr.State.Created)) - createdAt := runningFor + " ago" - command := getCommand(ctr.ImageCreatedBy) - imageName := ctr.FromImage - mounts := getMounts(ctr.Mounts, opts.noTrunc) - ports := getPorts(ctr.Config.ExposedPorts) - size := units.HumanSize(float64(ctr.SizeRootFs)) - labels := getLabels(ctr.Labels) - - switch ctr.State.Status { - case "stopped": - status = "Exited (" + strconv.FormatInt(int64(ctr.State.ExitCode), 10) + ") " + runningFor + " ago" - case runningState: - status = "Up " + runningFor + " ago" - default: - status = "Created" - } - - if !opts.noTrunc { - ctrID = ctr.ID[:idTruncLength] - imageName = getImageName(ctr.FromImage) - } - - params := psTemplateParams{ - ID: ctrID, - Image: imageName, - Command: command, - CreatedAt: createdAt, - RunningFor: runningFor, - Status: status, - Ports: ports, - Size: size, - Names: ctr.Name, - Labels: labels, - Mounts: mounts, - } - psOutput = append(psOutput, params) - } - return -} - -// getJSONOutput returns the container info in its raw form -func getJSONOutput(containers []*libkpod.ContainerData) (psOutput []psJSONParams) { - for _, ctr := range containers { - params := psJSONParams{ - ID: ctr.ID, - Image: ctr.FromImage, - ImageID: ctr.FromImageID, - Command: getCommand(ctr.ImageCreatedBy), - CreatedAt: ctr.State.Created, - RunningFor: time.Since(ctr.State.Created), - Status: ctr.State.Status, - Ports: ctr.Config.ExposedPorts, - Size: ctr.SizeRootFs, - Names: ctr.Name, - Labels: ctr.Labels, - Mounts: ctr.Mounts, - ContainerRunning: ctr.State.Status == runningState, - } - psOutput = append(psOutput, params) - } - return -} - -func generatePsOutput(containers []*libkpod.ContainerData, server *libkpod.ContainerServer, opts psOptions) error { - containersOutput := getContainers(containers, opts) - if len(containersOutput) == 0 { - return nil - } - - var out formats.Writer - - switch opts.format { - case formats.JSONString: - psOutput := getJSONOutput(containersOutput) - out = formats.JSONStructArray{Output: psToGeneric([]psTemplateParams{}, psOutput)} - default: - psOutput := getTemplateOutput(containersOutput, opts) - out = formats.StdoutTemplateArray{Output: psToGeneric(psOutput, []psJSONParams{}), Template: opts.format, Fields: psOutput[0].headerMap()} - } - - return formats.Writer(out).Out() -} - -// getCommand gets the actual command from the whole command -func getCommand(cmd string) string { - reg, err := regexp.Compile(".*\\[|\\].*") - if err != nil { - return "" - } - arr := strings.Split(reg.ReplaceAllLiteralString(cmd, ""), ",") - return strings.Join(arr, ",") -} - -// getImageName shortens the image name -func getImageName(img string) string { - arr := strings.Split(img, "/") - if arr[0] == "docker.io" && arr[1] == "library" { - img = strings.Join(arr[2:], "/") - } else if arr[0] == "docker.io" { - img = strings.Join(arr[1:], "/") - } - return img -} - -// getLabels converts the labels to a string of the form "key=value, key2=value2" -func getLabels(labels fields.Set) string { - var arr []string - if len(labels) > 0 { - for key, val := range labels { - temp := key + "=" + val - arr = append(arr, temp) - } - return strings.Join(arr, ",") - } - return "" -} - -// getMounts converts the volumes mounted to a string of the form "mount1, mount2" -// it truncates it if noTrunc is false -func getMounts(mounts []specs.Mount, noTrunc bool) string { - var arr []string - if len(mounts) == 0 { - return "" - } - for _, mount := range mounts { - if noTrunc { - arr = append(arr, mount.Source) - continue - } - tempArr := strings.SplitAfter(mount.Source, "/") - if len(tempArr) >= 3 { - arr = append(arr, strings.Join(tempArr[:3], "")) - } else { - arr = append(arr, mount.Source) - } - } - return strings.Join(arr, ",") -} - -// getPorts converts the ports used to a string of the from "port1, port2" -func getPorts(ports map[string]struct{}) string { - var arr []string - if len(ports) == 0 { - return "" - } - for key := range ports { - arr = append(arr, key) - } - return strings.Join(arr, ",") -} - -// FilterParamsPS contains the filter options for ps -type FilterParamsPS struct { - id string - label string - name string - exited int32 - status string - ancestor string - before time.Time - since time.Time - volume string -} - -// parseFilter takes a filter string and a list of containers and filters it -func parseFilter(filter string, containers []*oci.Container) (*FilterParamsPS, error) { - params := new(FilterParamsPS) - allFilters := strings.Split(filter, ",") - - for _, param := range allFilters { - pair := strings.SplitN(param, "=", 2) - switch strings.TrimSpace(pair[0]) { - case "id": - params.id = pair[1] - case "label": - params.label = pair[1] - case "name": - params.name = pair[1] - case "exited": - exitedCode, err := strconv.ParseInt(pair[1], 10, 32) - if err != nil { - return nil, errors.Errorf("exited code out of range %q", pair[1]) - } - params.exited = int32(exitedCode) - case "status": - params.status = pair[1] - case "ancestor": - params.ancestor = pair[1] - case "before": - if ctr, err := findContainer(containers, pair[1]); err == nil { - params.before = ctr.CreatedAt() - } else { - return nil, errors.Wrapf(err, "no such container %q", pair[1]) - } - case "since": - if ctr, err := findContainer(containers, pair[1]); err == nil { - params.before = ctr.CreatedAt() - } else { - return nil, errors.Wrapf(err, "no such container %q", pair[1]) - } - case "volume": - params.volume = pair[1] - default: - return nil, errors.Errorf("invalid filter %q", pair[0]) - } - } - return params, nil -} - -// findContainer finds a container with a specific name or id from a list of containers -func findContainer(containers []*oci.Container, ref string) (*oci.Container, error) { - for _, ctr := range containers { - if strings.HasPrefix(ctr.ID(), ref) || ctr.Name() == ref { - return ctr, nil - } - } - return nil, errors.Errorf("could not find container") -} - -// matchesFilter checks if a container matches all the filter parameters -func matchesFilter(ctrData *libkpod.ContainerData, params *FilterParamsPS) bool { - if params == nil { - return true - } - if params.id != "" && !matchesID(ctrData, params.id) { - return false - } - if params.name != "" && !matchesName(ctrData, params.name) { - return false - } - if !params.before.IsZero() && !matchesBeforeContainer(ctrData, params.before) { - return false - } - if !params.since.IsZero() && !matchesSinceContainer(ctrData, params.since) { - return false - } - if params.exited > 0 && !matchesExited(ctrData, params.exited) { - return false - } - if params.status != "" && !matchesStatus(ctrData, params.status) { - return false - } - if params.ancestor != "" && !matchesAncestor(ctrData, params.ancestor) { - return false - } - if params.label != "" && !matchesLabel(ctrData, params.label) { - return false - } - if params.volume != "" && !matchesVolume(ctrData, params.volume) { - return false - } - return true -} - -// GetContainersMatchingFilter returns a slice of all the containers that match the provided filter parameters -func getContainersMatchingFilter(containers []*oci.Container, filter *FilterParamsPS, server *libkpod.ContainerServer) []*libkpod.ContainerData { - var filteredCtrs []*libkpod.ContainerData - for _, ctr := range containers { - ctrData, err := server.GetContainerData(ctr.ID(), true) - if err != nil { - logrus.Warn("unable to get container data for matched container") - } - if filter == nil || matchesFilter(ctrData, filter) { - filteredCtrs = append(filteredCtrs, ctrData) - } - } - return filteredCtrs -} - -// matchesID returns true if the id's match -func matchesID(ctrData *libkpod.ContainerData, id string) bool { - return strings.HasPrefix(ctrData.ID, id) -} - -// matchesBeforeContainer returns true if the container was created before the filter image -func matchesBeforeContainer(ctrData *libkpod.ContainerData, beforeTime time.Time) bool { - return ctrData.State.Created.Before(beforeTime) -} - -// matchesSincecontainer returns true if the container was created since the filter image -func matchesSinceContainer(ctrData *libkpod.ContainerData, sinceTime time.Time) bool { - return ctrData.State.Created.After(sinceTime) -} - -// matchesLabel returns true if the container label matches that of the filter label -func matchesLabel(ctrData *libkpod.ContainerData, label string) bool { - pair := strings.SplitN(label, "=", 2) - if val, ok := ctrData.Labels[pair[0]]; ok { - if len(pair) == 2 && val == pair[1] { - return true - } - if len(pair) == 1 { - return true - } - return false - } - return false -} - -// matchesName returns true if the names are identical -func matchesName(ctrData *libkpod.ContainerData, name string) bool { - return ctrData.Name == name -} - -// matchesExited returns true if the exit codes are identical -func matchesExited(ctrData *libkpod.ContainerData, exited int32) bool { - return ctrData.State.ExitCode == exited -} - -// matchesStatus returns true if the container status matches that of filter status -func matchesStatus(ctrData *libkpod.ContainerData, status string) bool { - return ctrData.State.Status == status -} - -// matchesAncestor returns true if filter ancestor is in container image name -func matchesAncestor(ctrData *libkpod.ContainerData, ancestor string) bool { - return strings.Contains(ctrData.FromImage, ancestor) -} - -// matchesVolue returns true if the volume mounted or path to volue of the container matches that of filter volume -func matchesVolume(ctrData *libkpod.ContainerData, volume string) bool { - for _, vol := range ctrData.Mounts { - if strings.Contains(vol.Source, volume) { - return true - } - } - return false -} diff --git a/cmd/kpod/pull.go b/cmd/kpod/pull.go deleted file mode 100644 index 69f31def..00000000 --- a/cmd/kpod/pull.go +++ /dev/null @@ -1,56 +0,0 @@ -package main - -import ( - "os" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/urfave/cli" -) - -var ( - pullFlags = []cli.Flag{ - cli.BoolFlag{ - // all-tags is hidden since it has not been implemented yet - Name: "all-tags, a", - Hidden: true, - Usage: "Download all tagged images in the repository", - }, - } - - pullDescription = "Pulls an image from a registry and stores it locally.\n" + - "An image can be pulled using its tag or digest. If a tag is not\n" + - "specified, the image with the 'latest' tag (if it exists) is pulled." - pullCommand = cli.Command{ - Name: "pull", - Usage: "pull an image from a registry", - Description: pullDescription, - Flags: pullFlags, - Action: pullCmd, - ArgsUsage: "", - } -) - -// pullCmd gets the data from the command line and calls pullImage -// to copy an image from a registry to a local machine -func pullCmd(c *cli.Context) error { - args := c.Args() - if len(args) == 0 { - logrus.Errorf("an image name must be specified") - return nil - } - if len(args) > 1 { - logrus.Errorf("too many arguments. Requires exactly 1") - return nil - } - image := args[0] - - runtime, err := getRuntime(c) - if err != nil { - return errors.Wrapf(err, "could not create runtime") - } - if err := runtime.PullImage(image, c.Bool("all-tags"), os.Stdout); err != nil { - return errors.Errorf("error pulling image from %q: %v", image, err) - } - return nil -} diff --git a/cmd/kpod/push.go b/cmd/kpod/push.go deleted file mode 100644 index be9d3a1e..00000000 --- a/cmd/kpod/push.go +++ /dev/null @@ -1,124 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/containers/image/types" - "github.com/containers/storage/pkg/archive" - "github.com/kubernetes-incubator/cri-o/libpod/common" - "github.com/kubernetes-incubator/cri-o/libpod/images" - "github.com/pkg/errors" - "github.com/urfave/cli" - "golang.org/x/crypto/ssh/terminal" -) - -var ( - pushFlags = []cli.Flag{ - cli.StringFlag{ - Name: "signature-policy", - Usage: "`pathname` of signature policy file (not usually used)", - Hidden: true, - }, - cli.StringFlag{ - Name: "creds", - Usage: "`credentials` (USERNAME:PASSWORD) to use for authenticating to a registry", - }, - cli.StringFlag{ - Name: "cert-dir", - Usage: "`pathname` of a directory containing TLS certificates and keys", - }, - cli.BoolTFlag{ - Name: "tls-verify", - Usage: "require HTTPS and verify certificates when contacting registries (default: true)", - }, - cli.BoolFlag{ - Name: "remove-signatures", - Usage: "discard any pre-existing signatures in the image", - }, - cli.StringFlag{ - Name: "sign-by", - Usage: "add a signature at the destination using the specified key", - }, - cli.BoolFlag{ - Name: "quiet, q", - Usage: "don't output progress information when pushing images", - }, - } - pushDescription = fmt.Sprintf(` - Pushes an image to a specified location. - The Image "DESTINATION" uses a "transport":"details" format. - See kpod-push(1) section "DESTINATION" for the expected format`) - - pushCommand = cli.Command{ - Name: "push", - Usage: "push an image to a specified destination", - Description: pushDescription, - Flags: pushFlags, - Action: pushCmd, - ArgsUsage: "IMAGE DESTINATION", - } -) - -func pushCmd(c *cli.Context) error { - var registryCreds *types.DockerAuthConfig - - args := c.Args() - if len(args) < 2 { - return errors.New("kpod push requires exactly 2 arguments") - } - srcName := c.Args().Get(0) - destName := c.Args().Get(1) - - signaturePolicy := c.String("signature-policy") - registryCredsString := c.String("creds") - certPath := c.String("cert-dir") - skipVerify := !c.BoolT("tls-verify") - removeSignatures := c.Bool("remove-signatures") - signBy := c.String("sign-by") - - if registryCredsString != "" { - creds, err := common.ParseRegistryCreds(registryCredsString) - if err != nil { - if err == common.ErrNoPassword { - fmt.Print("Password: ") - password, err := terminal.ReadPassword(0) - if err != nil { - return errors.Wrapf(err, "could not read password from terminal") - } - creds.Password = string(password) - } else { - return err - } - } - registryCreds = creds - } - - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "Could not get config") - } - store, err := getStore(config) - if err != nil { - return err - } - - options := images.CopyOptions{ - Compression: archive.Uncompressed, - SignaturePolicyPath: signaturePolicy, - Store: store, - DockerRegistryOptions: common.DockerRegistryOptions{ - DockerRegistryCreds: registryCreds, - DockerCertPath: certPath, - DockerInsecureSkipTLSVerify: skipVerify, - }, - SigningOptions: common.SigningOptions{ - RemoveSignatures: removeSignatures, - SignBy: signBy, - }, - } - if !c.Bool("quiet") { - options.ReportWriter = os.Stderr - } - return images.PushImage(srcName, destName, options) -} diff --git a/cmd/kpod/rename.go b/cmd/kpod/rename.go deleted file mode 100644 index fe1f08d9..00000000 --- a/cmd/kpod/rename.go +++ /dev/null @@ -1,46 +0,0 @@ -package main - -import ( - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - renameDescription = "Rename a container. Container may be created, running, paused, or stopped" - renameFlags = []cli.Flag{} - renameCommand = cli.Command{ - Name: "rename", - Usage: "rename a container", - Description: renameDescription, - Action: renameCmd, - ArgsUsage: "CONTAINER NEW-NAME", - Flags: renameFlags, - } -) - -func renameCmd(c *cli.Context) error { - if len(c.Args()) != 2 { - return errors.Errorf("Rename requires a src container name/ID and a dest container name") - } - - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "Could not get config") - } - server, err := libkpod.New(config) - if err != nil { - return errors.Wrapf(err, "could not get container server") - } - defer server.Shutdown() - err = server.Update() - if err != nil { - return errors.Wrapf(err, "could not update list of containers") - } - - err = server.ContainerRename(c.Args().Get(0), c.Args().Get(1)) - if err != nil { - return errors.Wrapf(err, "could not rename container") - } - return nil -} diff --git a/cmd/kpod/rm.go b/cmd/kpod/rm.go deleted file mode 100644 index bed72642..00000000 --- a/cmd/kpod/rm.go +++ /dev/null @@ -1,65 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - rmFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "force, f", - Usage: "Force removal of a running container. The default is false", - }, - } - rmDescription = "Remove one or more containers" - rmCommand = cli.Command{ - Name: "rm", - Usage: fmt.Sprintf(`kpod rm will remove one or more containers from the host. The container name or ID can be used. - This does not remove images. Running containers will not be removed without the -f option.`), - Description: rmDescription, - Flags: rmFlags, - Action: rmCmd, - ArgsUsage: "", - } -) - -// saveCmd saves the image to either docker-archive or oci -func rmCmd(c *cli.Context) error { - args := c.Args() - if len(args) == 0 { - return errors.Errorf("specify one or more containers to remove") - } - - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "could not get config") - } - server, err := libkpod.New(config) - if err != nil { - return errors.Wrapf(err, "could not get container server") - } - defer server.Shutdown() - err = server.Update() - if err != nil { - return errors.Wrapf(err, "could not update list of containers") - } - force := c.Bool("force") - - for _, container := range c.Args() { - id, err2 := server.Remove(container, force) - if err2 != nil { - if err == nil { - err = err2 - } else { - err = errors.Wrapf(err, "%v. Stop the container before attempting removal or use -f\n", err2) - } - } else { - fmt.Println(id) - } - } - return err -} diff --git a/cmd/kpod/rmi.go b/cmd/kpod/rmi.go deleted file mode 100644 index 2e83ee8b..00000000 --- a/cmd/kpod/rmi.go +++ /dev/null @@ -1,123 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/containers/storage" - "github.com/kubernetes-incubator/cri-o/libpod/images" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - rmiDescription = "removes one or more locally stored images." - rmiFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "force, f", - Usage: "force removal of the image", - }, - } - rmiCommand = cli.Command{ - Name: "rmi", - Usage: "removes one or more images from local storage", - Description: rmiDescription, - Action: rmiCmd, - ArgsUsage: "IMAGE-NAME-OR-ID [...]", - Flags: rmiFlags, - } -) - -func rmiCmd(c *cli.Context) error { - - force := false - if c.IsSet("force") { - force = c.Bool("force") - } - - args := c.Args() - if len(args) == 0 { - return errors.Errorf("image name or ID must be specified") - } - - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "Could not get config") - } - store, err := getStore(config) - if err != nil { - return err - } - - for _, id := range args { - image, err := images.FindImage(store, id) - if err != nil { - return errors.Wrapf(err, "could not get image %q", id) - } - if image != nil { - ctrIDs, err := runningContainers(image, store) - if err != nil { - return errors.Wrapf(err, "error getting running containers for image %q", id) - } - if len(ctrIDs) > 0 && len(image.Names) <= 1 { - if force { - removeContainers(ctrIDs, store) - } else { - for ctrID := range ctrIDs { - return fmt.Errorf("Could not remove image %q (must force) - container %q is using its reference image", id, ctrID) - } - } - } - // If the user supplied an ID, we cannot delete the image if it is referred to by multiple tags - if images.MatchesID(image.ID, id) { - if len(image.Names) > 1 && !force { - return fmt.Errorf("unable to delete %s (must force) - image is referred to in multiple tags", image.ID) - } - // If it is forced, we have to untag the image so that it can be deleted - image.Names = image.Names[:0] - } else { - name, err2 := images.UntagImage(store, image, id) - if err2 != nil { - return err - } - fmt.Printf("untagged: %s", name) - } - - if len(image.Names) > 0 { - continue - } - id, err := images.RemoveImage(image, store) - if err != nil { - return err - } - fmt.Printf("%s\n", id) - } - } - - return nil -} - -// Returns a list of running containers associated with the given ImageReference -// TODO: replace this with something in libkpod -func runningContainers(image *storage.Image, store storage.Store) ([]string, error) { - ctrIDs := []string{} - containers, err := store.Containers() - if err != nil { - return nil, err - } - for _, ctr := range containers { - if ctr.ImageID == image.ID { - ctrIDs = append(ctrIDs, ctr.ID) - } - } - return ctrIDs, nil -} - -// TODO: replace this with something in libkpod -func removeContainers(ctrIDs []string, store storage.Store) error { - for _, ctrID := range ctrIDs { - if err := store.DeleteContainer(ctrID); err != nil { - return errors.Wrapf(err, "could not remove container %q", ctrID) - } - } - return nil -} diff --git a/cmd/kpod/save.go b/cmd/kpod/save.go deleted file mode 100644 index 97d42f70..00000000 --- a/cmd/kpod/save.go +++ /dev/null @@ -1,100 +0,0 @@ -package main - -import ( - "os" - - "github.com/containers/storage" - "github.com/kubernetes-incubator/cri-o/libpod/images" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/urfave/cli" -) - -const ( - dockerArchive = "docker-archive:" -) - -type saveOptions struct { - output string - quiet bool - images []string -} - -var ( - saveFlags = []cli.Flag{ - cli.StringFlag{ - Name: "output, o", - Usage: "Write to a file, default is STDOUT", - Value: "/dev/stdout", - }, - cli.BoolFlag{ - Name: "quiet, q", - Usage: "Suppress the output", - }, - } - saveDescription = "Save an image to docker-archive on the local machine" - saveCommand = cli.Command{ - Name: "save", - Usage: "Save image to an archive", - Description: saveDescription, - Flags: saveFlags, - Action: saveCmd, - ArgsUsage: "", - } -) - -// saveCmd saves the image to either docker-archive or oci -func saveCmd(c *cli.Context) error { - args := c.Args() - if len(args) == 0 { - return errors.Errorf("need at least 1 argument") - } - - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "could not get config") - } - store, err := getStore(config) - if err != nil { - return err - } - - output := c.String("output") - quiet := c.Bool("quiet") - - if output == "/dev/stdout" { - fi := os.Stdout - if logrus.IsTerminal(fi) { - return errors.Errorf("refusing to save to terminal. Use -o flag or redirect") - } - } - - opts := saveOptions{ - output: output, - quiet: quiet, - images: args, - } - - return saveImage(store, opts) -} - -// saveImage pushes the image to docker-archive or oci by -// calling pushImage -func saveImage(store storage.Store, opts saveOptions) error { - dst := dockerArchive + opts.output - - pushOpts := images.CopyOptions{ - SignaturePolicyPath: "", - Store: store, - } - - // only one image is supported for now - // future pull requests will fix this - for _, image := range opts.images { - dest := dst + ":" + image - if err := images.PushImage(image, dest, pushOpts); err != nil { - return errors.Wrapf(err, "unable to save %q", image) - } - } - return nil -} diff --git a/cmd/kpod/stats.go b/cmd/kpod/stats.go deleted file mode 100644 index b3ea6879..00000000 --- a/cmd/kpod/stats.go +++ /dev/null @@ -1,241 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "os" - "strings" - "text/template" - "time" - - tm "github.com/buger/goterm" - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/kubernetes-incubator/cri-o/libpod/images" - "github.com/kubernetes-incubator/cri-o/oci" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var printf func(format string, a ...interface{}) (n int, err error) -var println func(a ...interface{}) (n int, err error) - -type statsOutputParams struct { - Container string - ID string - CPUPerc string - MemUsage string - MemPerc string - NetIO string - BlockIO string - PIDs uint64 -} - -var ( - statsFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "all, a", - Usage: "show all containers. Only running containers are shown by default. The default is false", - }, - cli.BoolFlag{ - Name: "no-stream", - Usage: "disable streaming stats and only pull the first result, default setting is false", - }, - cli.StringFlag{ - Name: "format", - Usage: "pretty-print container statistics using a Go template", - }, - cli.BoolFlag{ - Name: "json", - Usage: "output container statistics in json format", - }, - } - - statsDescription = "display a live stream of one or more containers' resource usage statistics" - statsCommand = cli.Command{ - Name: "stats", - Usage: "Display percentage of CPU, memory, network I/O, block I/O and PIDs for one or more containers", - Description: statsDescription, - Flags: statsFlags, - Action: statsCmd, - ArgsUsage: "", - } -) - -func statsCmd(c *cli.Context) error { - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "could not read config") - } - containerServer, err := libkpod.New(config) - if err != nil { - return errors.Wrapf(err, "could not create container server") - } - defer containerServer.Shutdown() - err = containerServer.Update() - if err != nil { - return errors.Wrapf(err, "could not update list of containers") - } - times := -1 - if c.Bool("no-stream") { - times = 1 - } - statsChan := make(chan []*libkpod.ContainerStats) - // iterate over the channel until it is closed - go func() { - // print using goterm - printf = tm.Printf - println = tm.Println - for stats := range statsChan { - // Continually refresh statistics - tm.Clear() - tm.MoveCursor(1, 1) - outputStats(stats, c.String("format"), c.Bool("json")) - tm.Flush() - time.Sleep(time.Second) - } - }() - return getStats(containerServer, c.Args(), c.Bool("all"), statsChan, times) -} - -func getStats(server *libkpod.ContainerServer, args []string, all bool, statsChan chan []*libkpod.ContainerStats, times int) error { - ctrs, err := server.ListContainers(isRunning, ctrInList(args)) - if err != nil { - return err - } - containerStats := map[string]*libkpod.ContainerStats{} - for _, ctr := range ctrs { - initialStats, err := server.GetContainerStats(ctr, &libkpod.ContainerStats{}) - if err != nil { - return err - } - containerStats[ctr.ID()] = initialStats - } - step := 1 - if times == -1 { - times = 1 - step = 0 - } - for i := 0; i < times; i += step { - reportStats := []*libkpod.ContainerStats{} - for _, ctr := range ctrs { - id := ctr.ID() - if _, ok := containerStats[ctr.ID()]; !ok { - initialStats, err := server.GetContainerStats(ctr, &libkpod.ContainerStats{}) - if err != nil { - return err - } - containerStats[id] = initialStats - } - stats, err := server.GetContainerStats(ctr, containerStats[id]) - if err != nil { - return err - } - // replace the previous measurement with the current one - containerStats[id] = stats - reportStats = append(reportStats, stats) - } - statsChan <- reportStats - - err := server.Update() - if err != nil { - return err - } - ctrs, err = server.ListContainers(isRunning, ctrInList(args)) - if err != nil { - return err - } - } - return nil -} - -func outputStats(stats []*libkpod.ContainerStats, format string, json bool) error { - if format == "" { - outputStatsHeader() - } - if json { - return outputStatsAsJSON(stats) - } - var err error - for _, s := range stats { - if format == "" { - outputStatsUsingFormatString(s) - } else { - params := getStatsOutputParams(s) - err2 := outputStatsUsingTemplate(format, params) - if err2 != nil { - err = errors.Wrapf(err, err2.Error()) - } - } - } - return err -} - -func outputStatsHeader() { - printf("%-64s %-16s %-32s %-16s %-24s %-24s %s\n", "CONTAINER", "CPU %", "MEM USAGE / MEM LIMIT", "MEM %", "NET I/O", "BLOCK I/O", "PIDS") -} - -func outputStatsUsingFormatString(stats *libkpod.ContainerStats) { - printf("%-64s %-16s %-32s %-16s %-24s %-24s %d\n", stats.Container, floatToPercentString(stats.CPU), combineHumanValues(stats.MemUsage, stats.MemLimit), floatToPercentString(stats.MemPerc), combineHumanValues(stats.NetInput, stats.NetOutput), combineHumanValues(stats.BlockInput, stats.BlockOutput), stats.PIDs) -} - -func combineHumanValues(a, b uint64) string { - return fmt.Sprintf("%s / %s", images.FormattedSize(float64(a)), images.FormattedSize(float64(b))) -} - -func floatToPercentString(f float64) string { - return fmt.Sprintf("%.2f %s", f, "%") -} - -func getStatsOutputParams(stats *libkpod.ContainerStats) statsOutputParams { - return statsOutputParams{ - Container: stats.Container, - ID: stats.Container, - CPUPerc: floatToPercentString(stats.CPU), - MemUsage: combineHumanValues(stats.MemUsage, stats.MemLimit), - MemPerc: floatToPercentString(stats.MemPerc), - NetIO: combineHumanValues(stats.NetInput, stats.NetOutput), - BlockIO: combineHumanValues(stats.BlockInput, stats.BlockOutput), - PIDs: stats.PIDs, - } -} - -func outputStatsUsingTemplate(format string, params statsOutputParams) error { - tmpl, err := template.New("stats").Parse(format) - if err != nil { - return errors.Wrapf(err, "template parsing error") - } - - err = tmpl.Execute(os.Stdout, params) - if err != nil { - return err - } - println() - return nil -} - -func outputStatsAsJSON(stats []*libkpod.ContainerStats) error { - s, err := json.Marshal(stats) - if err != nil { - return err - } - println(s) - return nil -} - -func isRunning(ctr *oci.Container) bool { - return ctr.State().Status == "running" -} - -func ctrInList(idsOrNames []string) func(ctr *oci.Container) bool { - if len(idsOrNames) == 0 { - return func(*oci.Container) bool { return true } - } - return func(ctr *oci.Container) bool { - for _, idOrName := range idsOrNames { - if strings.HasPrefix(ctr.ID(), idOrName) || strings.HasSuffix(ctr.Name(), idOrName) { - return true - } - } - return false - } -} diff --git a/cmd/kpod/tag.go b/cmd/kpod/tag.go deleted file mode 100644 index 4e92c310..00000000 --- a/cmd/kpod/tag.go +++ /dev/null @@ -1,78 +0,0 @@ -package main - -import ( - "github.com/containers/image/docker/reference" - "github.com/containers/storage" - "github.com/kubernetes-incubator/cri-o/libpod/images" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - tagDescription = "Adds one or more additional names to locally-stored image" - tagCommand = cli.Command{ - Name: "tag", - Usage: "Add an additional name to a local image", - Description: tagDescription, - Action: tagCmd, - ArgsUsage: "IMAGE-NAME [IMAGE-NAME ...]", - } -) - -func tagCmd(c *cli.Context) error { - args := c.Args() - if len(args) < 2 { - return errors.Errorf("image name and at least one new name must be specified") - } - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "Could not get config") - } - store, err := getStore(config) - if err != nil { - return err - } - img, err := images.FindImage(store, args[0]) - if err != nil { - return err - } - if img == nil { - return errors.New("null image") - } - err = addImageNames(store, img, args[1:]) - if err != nil { - return errors.Wrapf(err, "error adding names %v to image %q", args[1:], args[0]) - } - return nil -} - -func addImageNames(store storage.Store, image *storage.Image, addNames []string) error { - // Add tags to the names if applicable - names, err := expandedTags(addNames) - if err != nil { - return err - } - err = store.SetNames(image.ID, append(image.Names, names...)) - if err != nil { - return errors.Wrapf(err, "error adding names (%v) to image %q", names, image.ID) - } - return nil -} - -func expandedTags(tags []string) ([]string, error) { - expandedNames := []string{} - for _, tag := range tags { - name, err := reference.ParseNormalizedNamed(tag) - if err != nil { - return nil, errors.Wrapf(err, "error parsing tag %q", name) - } - - name = reference.TagNameOnly(name) - newTag := "" - if tagged, ok := name.(reference.NamedTagged); ok { - newTag = tagged.Tag() - } - expandedNames = append(expandedNames, name.Name()+":"+newTag) - } - return expandedNames, nil -} diff --git a/cmd/kpod/umount.go b/cmd/kpod/umount.go deleted file mode 100644 index bad6752a..00000000 --- a/cmd/kpod/umount.go +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - umountCommand = cli.Command{ - Name: "umount", - Aliases: []string{"unmount"}, - Usage: "Unmount a working container's root filesystem", - Description: "Unmounts a working container's root filesystem", - Action: umountCmd, - ArgsUsage: "CONTAINER-NAME-OR-ID", - } -) - -func umountCmd(c *cli.Context) error { - args := c.Args() - if len(args) == 0 { - return errors.Errorf("container ID must be specified") - } - if len(args) > 1 { - return errors.Errorf("too many arguments specified") - } - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "Could not get config") - } - store, err := getStore(config) - if err != nil { - return err - } - - err = store.Unmount(args[0]) - if err != nil { - return errors.Wrapf(err, "error unmounting container %q", args[0]) - } - return nil -} diff --git a/cmd/kpod/version.go b/cmd/kpod/version.go deleted file mode 100644 index 9680c900..00000000 --- a/cmd/kpod/version.go +++ /dev/null @@ -1,44 +0,0 @@ -package main - -import ( - "fmt" - "runtime" - "strconv" - "time" - - "github.com/urfave/cli" -) - -// Overwritten at build time -var ( - gitCommit string - buildInfo string -) - -// versionCmd gets and prints version info for version command -func versionCmd(c *cli.Context) error { - fmt.Println("Version: ", c.App.Version) - fmt.Println("Go Version: ", runtime.Version()) - if gitCommit != "" { - fmt.Println("Git Commit: ", gitCommit) - } - if buildInfo != "" { - // Converts unix time from string to int64 - buildTime, err := strconv.ParseInt(buildInfo, 10, 64) - if err != nil { - return err - } - // Prints out the build time in readable format - fmt.Println("Built: ", time.Unix(buildTime, 0).Format(time.ANSIC)) - } - fmt.Println("OS/Arch: ", runtime.GOOS+"/"+runtime.GOARCH) - - return nil -} - -// Cli command to print out the full version of kpod -var versionCommand = cli.Command{ - Name: "version", - Usage: "Display the KPOD Version Information", - Action: versionCmd, -} diff --git a/code-of-conduct.md b/code-of-conduct.md index 215ce7ac..0d15c00c 100644 --- a/code-of-conduct.md +++ b/code-of-conduct.md @@ -1,55 +1,3 @@ -## Kubernetes Community Code of Conduct +# Kubernetes Community Code of Conduct -### Contributor Code of Conduct - -As contributors and maintainers of this project, and in the interest of fostering -an open and welcoming community, we pledge to respect all people who contribute -through reporting issues, posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. - -We are committed to making participation in this project a harassment-free experience for -everyone, regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, body size, race, ethnicity, age, -religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery. -* Personal attacks. -* Trolling or insulting/derogatory comments. -* Public or private harassment. -* Publishing other's private information, such as physical or electronic addresses, - without explicit permission. -* Other unethical or unprofessional conduct. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are not -aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers -commit themselves to fairly and consistently applying these principles to every aspect -of managing this project. Project maintainers who do not follow or enforce the Code of -Conduct may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a Kubernetes maintainer, Sarah Novotny , and/or Dan Kohn . - -This Code of Conduct is adapted from the Contributor Covenant -(http://contributor-covenant.org), version 1.2.0, available at -http://contributor-covenant.org/version/1/2/0/ - -### Kubernetes Events Code of Conduct - -Kubernetes events are working conferences intended for professional networking and collaboration in the -Kubernetes community. Attendees are expected to behave according to professional standards and in accordance -with their employer's policies on appropriate workplace behavior. - -While at Kubernetes events or related social networking opportunities, attendees should not engage in -discriminatory or offensive speech or actions regarding gender, sexuality, race, or religion. Speakers should -be especially aware of these concerns. - -The Kubernetes team does not condone any statements by speakers contrary to these standards. The Kubernetes -team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to -be engaging in discriminatory or offensive speech or actions. - -Please bring any concerns to the immediate attention of the Kubernetes event staff. +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/completions/bash/kpod b/completions/bash/kpod deleted file mode 100644 index e68f9814..00000000 --- a/completions/bash/kpod +++ /dev/null @@ -1,479 +0,0 @@ -#! /bin/bash - -: ${PROG:=$(basename ${BASH_SOURCE})} - -__kpod_list_images() { - COMPREPLY=($(compgen -W "$(kpod images -q)" -- $cur)) -} - -__kpod_list_containers() { - COMPREPLY=($(compgen -W "$(kpod ps -aq)" -- $cur)) -} - -_kpod_diff() { - local options_with_args=" - --format - " - local boolean_options=" - " - _complete_ "$options_with_args" "$boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - *) - __kpod_list_images - ;; - esac -} - -_kpod_export() { - local options_with_args=" - --output - -o - " - local boolean_options=" - " - _complete_ "$options_with_args" "$boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - *) - __kpod_list_images - ;; - esac -} - -_kpod_history() { - local options_with_args=" - --format - " - local boolean_options=" - --human -H - --no-trunc - --quiet -q - " - _complete_ "$options_with_args" "$boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - *) - __kpod_list_images - ;; - esac -} - -_kpod_info() { - local boolean_options=" - --help - -h - --debug - " - local options_with_args=" - --format - " - - local all_options="$options_with_args $boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - *) - __kpod_list_images - ;; - esac -} - -_kpod_images() { - local boolean_options=" - --help - -h - --quiet - -q - --noheading - -n - --no-trunc - --digests - --filter - -f - " - local options_with_args=" - --format - " - - local all_options="$options_with_args $boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - esac -} - -_kpod_inspect() { - local boolean_options=" - --help - -h - " - local options_with_args=" - --format - -f - --type - -t - --size - " - - local all_options="$options_with_args $boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - esac -} - -_kpod_logs() { - local options_with_args=" - --since - --tail - " - local boolean_options=" - --follow - -f - " - _complete_ "$options_with_args" "$boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - *) - __kpod_list_containers - ;; - esac -} - -_kpod_pull() { - local options_with_args=" - " - local boolean_options=" - --all-tags -a - " - _complete_ "$options_with_args" "$boolean_options" -} - -_kpod_unmount() { - _kpod_umount $@ -} - -_kpod_umount() { - local boolean_options=" - --help - -h - " - local options_with_args=" - " - - local all_options="$options_with_args $boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - esac -} - -_kpod_mount() { - local boolean_options=" - --help - -h - --notruncate - " - - local options_with_args=" - --label - --format - " - - local all_options="$options_with_args $boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - esac -} - -_kpod_push() { - local boolean_options=" - --disable-compression - -D - --quiet - -q - --signature-policy - --certs - --tls-verify - --remove-signatures - --sign-by - " - - local options_with_args=" - " - - local all_options="$options_with_args $boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - esac -} - -_kpod_rename() { - local boolean_options=" - --help - -h - " - local options_with_args=" - " - - local all_options="$options_with_args $boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - *) - __kpod_list_containers - ;; - esac -} - -_kpod_rm() { - local boolean_options=" - --force - -f - " - - local options_with_args=" - " - - local all_options="$options_with_args $boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - *) - __kpod_list_containers - ;; - esac -} - -_kpod_rmi() { - local boolean_options=" - --help - -h - --force - -f - " - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - *) - __kpod_list_images - ;; - esac -} - -_kpod_stats() { - local boolean_options=" - --help - --all - -a - --no-stream - --format - " - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - *) - __kpod_list_containers - ;; - esac -} - -kpod_tag() { - local options_with_args=" - " - local boolean_options=" - " - _complete_ "$options_with_args" "$boolean_options" -} - -_kpod_version() { - local options_with_args=" - " - local boolean_options=" - " - _complete_ "$options_with_args" "$boolean_options" -} - -_kpod_save() { - local options_with_args=" - --output -o - " - local boolean_options=" - --quiet -q - " - _complete_ "$options_with_args" "$boolean_options" -} - -_kpod_export() { - local options_with_args=" - --output -o - " - local boolean_options=" - " - _complete_ "$options_with_args" "$boolean_options" -} - -_kpod_ps() { - local options_with_args=" - --filter -f - --format - --last -n - " - local boolean_options=" - --all -a - --latest -l - --no-trunc - --quiet -q - --size -s - " - _complete_ "$options_with_args" "$boolean_options" -} - -_complete_() { - local options_with_args=$1 - local boolean_options="$2 -h --help" - - case "$prev" in - $options_with_args) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) - ;; - esac -} - -_kpod_load() { - local options_with_args=" - --input -i - " - local boolean_options=" - --quiet -q - " - _complete_ "$options_with_args" "$boolean_options" -} - -_kpod_kpod() { - local options_with_args=" - --config -c - --root - --runroot - --storage-driver - --storage-opt - " - local boolean_options=" - --debug - --help -h - --version -v - " - commands=" - diff - export - history - images - info - inspect - load - logs - mount - ps - pull - push - rename - rm - rmi - save - stats - tag - umount - unmount - version - " - - case "$prev" in - $main_options_with_args_glob ) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) ) - ;; - esac -} - -_cli_bash_autocomplete() { - local cur opts base - - COMPREPLY=() - cur="${COMP_WORDS[COMP_CWORD]}" - COMPREPLY=() - local cur prev words cword - - _get_comp_words_by_ref -n : cur prev words cword - - local command=${PROG} cpos=0 - local counter=1 - counter=1 - while [ $counter -lt $cword ]; do - case "!${words[$counter]}" in - *) - command=$(echo "${words[$counter]}" | sed 's/-/_/g') - cpos=$counter - (( cpos++ )) - break - ;; - esac - (( counter++ )) - done - - local completions_func=_kpod_${command} - declare -F $completions_func >/dev/null && $completions_func - - eval "$previous_extglob_setting" - return 0 -} - -complete -F _cli_bash_autocomplete $PROG diff --git a/conmon/Makefile b/conmon/Makefile index 460c1faa..b75605d9 100644 --- a/conmon/Makefile +++ b/conmon/Makefile @@ -5,8 +5,8 @@ override LIBS += $(shell pkg-config --libs glib-2.0) override CFLAGS += -std=c99 -Os -Wall -Wextra $(shell pkg-config --cflags glib-2.0) conmon: $(obj) - $(CC) -o $@ $^ $(CFLAGS) $(LIBS) + $(CC) -o ../bin/$@ $^ $(CFLAGS) $(LIBS) .PHONY: clean clean: - rm -f $(obj) conmon + rm -f $(obj) ../bin/conmon diff --git a/conmon/conmon.c b/conmon/conmon.c index 006ba141..477b98bf 100644 --- a/conmon/conmon.c +++ b/conmon/conmon.c @@ -7,11 +7,11 @@ #include #include #include +#include #include #include #include #include -#include #include #include #include @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -94,6 +95,8 @@ static inline void strv_cleanup(char ***strv) #define CMD_SIZE 1024 #define MAX_EVENTS 10 +#define DEFAULT_SOCKET_PATH "/var/lib/crio" + static bool opt_terminal = false; static bool opt_stdin = false; static char *opt_cid = NULL; @@ -102,11 +105,14 @@ static char *opt_runtime_path = NULL; static char *opt_bundle_path = NULL; static char *opt_pid_file = NULL; static bool opt_systemd_cgroup = false; +static bool opt_no_pivot = false; static char *opt_exec_process_spec = NULL; static bool opt_exec = false; static char *opt_log_path = NULL; static char *opt_exit_dir = NULL; static int opt_timeout = 0; +static int64_t opt_log_size_max = -1; +static char *opt_socket_path = DEFAULT_SOCKET_PATH; static GOptionEntry opt_entries[] = { { "terminal", 't', 0, G_OPTION_ARG_NONE, &opt_terminal, "Terminal", NULL }, @@ -114,6 +120,7 @@ static GOptionEntry opt_entries[] = { "cid", 'c', 0, G_OPTION_ARG_STRING, &opt_cid, "Container ID", NULL }, { "cuuid", 'u', 0, G_OPTION_ARG_STRING, &opt_cuuid, "Container UUID", NULL }, { "runtime", 'r', 0, G_OPTION_ARG_STRING, &opt_runtime_path, "Runtime path", NULL }, + { "no-pivot", 0, 0, G_OPTION_ARG_NONE, &opt_no_pivot, "do not use pivot_root", NULL }, { "bundle", 'b', 0, G_OPTION_ARG_STRING, &opt_bundle_path, "Bundle path", NULL }, { "pidfile", 'p', 0, G_OPTION_ARG_STRING, &opt_pid_file, "PID file", NULL }, { "systemd-cgroup", 's', 0, G_OPTION_ARG_NONE, &opt_systemd_cgroup, "Enable systemd cgroup manager", NULL }, @@ -122,6 +129,8 @@ static GOptionEntry opt_entries[] = { "exit-dir", 0, 0, G_OPTION_ARG_STRING, &opt_exit_dir, "Path to the directory where exit files are written", NULL }, { "log-path", 'l', 0, G_OPTION_ARG_STRING, &opt_log_path, "Log file path", NULL }, { "timeout", 'T', 0, G_OPTION_ARG_INT, &opt_timeout, "Timeout in seconds", NULL }, + { "log-size-max", 0, 0, G_OPTION_ARG_INT64, &opt_log_size_max, "Maximum size of log file", NULL }, + { "socket-dir-path", 0, 0, G_OPTION_ARG_STRING, &opt_socket_path, "Location of container attach sockets", NULL }, { NULL } }; @@ -130,6 +139,8 @@ static GOptionEntry opt_entries[] = #define CGROUP_ROOT "/sys/fs/cgroup" +static int log_fd = -1; + static ssize_t write_all(int fd, const void *buf, size_t count) { size_t remaining = count; @@ -281,11 +292,12 @@ const char *stdpipe_name(stdpipe_t pipe) * line in buf, and will partially write the final line of the log if buf is * not terminated by a newline. */ -int write_k8s_log(int fd, stdpipe_t pipe, const char *buf, ssize_t buflen) +static int write_k8s_log(int fd, stdpipe_t pipe, const char *buf, ssize_t buflen) { char tsbuf[TSBUFLEN]; - static stdpipe_t trailing_line = NO_PIPE; writev_buffer_t bufv = {0}; + static int64_t bytes_written = 0; + int64_t bytes_to_be_written = 0; /* * Use the same timestamp for every line of the log in this buffer. @@ -299,30 +311,63 @@ int write_k8s_log(int fd, stdpipe_t pipe, const char *buf, ssize_t buflen) while (buflen > 0) { const char *line_end = NULL; ptrdiff_t line_len = 0; + bool partial = FALSE; /* Find the end of the line, or alternatively the end of the buffer. */ line_end = memchr(buf, '\n', buflen); - if (line_end == NULL) + if (line_end == NULL) { line_end = &buf[buflen-1]; + partial = TRUE; + } line_len = line_end - buf + 1; + /* This is line_len bytes + TSBUFLEN - 1 + 2 (- 1 is for ignoring \0). */ + bytes_to_be_written = line_len + TSBUFLEN + 1; + + /* If partial, then we add a \n */ + if (partial) { + bytes_to_be_written += 1; + } + /* - * Write the (timestamp, stream) tuple if there isn't any trailing - * output from the previous line (or if there is trailing output but - * the current buffer being printed is from a different pipe). + * We re-open the log file if writing out the bytes will exceed the max + * log size. We also reset the state so that the new file is started with + * a timestamp. */ - if (trailing_line != pipe) { - /* - * If there was a trailing line from a different pipe, prepend a - * newline to split it properly. This technically breaks the flow - * of the previous line (adding a newline in the log where there - * wasn't one output) but without modifying the file in a - * non-append-only way there's not much we can do. - */ - if ((trailing_line != NO_PIPE && - writev_buffer_append_segment(fd, &bufv, "\n", -1) < 0) || - writev_buffer_append_segment(fd, &bufv, tsbuf, -1) < 0) { - nwarn("failed to write (timestamp, stream) to log"); + if ((opt_log_size_max > 0) && (bytes_written + bytes_to_be_written) > opt_log_size_max) { + ninfo("Creating new log file"); + bytes_written = 0; + + /* Close the existing fd */ + close(fd); + + /* Unlink the file */ + if (unlink(opt_log_path) < 0) { + pexit("Failed to unlink log file"); + } + + /* Open the log path file again */ + log_fd = open(opt_log_path, O_WRONLY | O_APPEND | O_CREAT | O_CLOEXEC, 0600); + if (log_fd < 0) + pexit("Failed to open log file %s: %s", opt_log_path, strerror(errno)); + fd = log_fd; + } + + /* Output the timestamp */ + if (writev_buffer_append_segment(fd, &bufv, tsbuf, -1) < 0) { + nwarn("failed to write (timestamp, stream) to log"); + goto next; + } + + /* Output log tag for partial or newline */ + if (partial) { + if (writev_buffer_append_segment(fd, &bufv, "P ", -1) < 0) { + nwarn("failed to write partial log tag"); + goto next; + } + } else { + if (writev_buffer_append_segment(fd, &bufv, "F ", -1) < 0) { + nwarn("failed to write end log tag"); goto next; } } @@ -333,9 +378,15 @@ int write_k8s_log(int fd, stdpipe_t pipe, const char *buf, ssize_t buflen) goto next; } - /* If we did not output a full line, then we are a trailing_line. */ - trailing_line = (*line_end == '\n') ? NO_PIPE : pipe; + /* Output a newline for partial */ + if (partial) { + if (writev_buffer_append_segment(fd, &bufv, "\n", -1) < 0) { + nwarn("failed to write newline to log"); + goto next; + } + } + bytes_written += bytes_to_be_written; next: /* Update the head of the buffer remaining to output. */ buf += line_len; @@ -346,6 +397,8 @@ next: nwarn("failed to flush buffer to log"); } + ninfo("Total bytes written: %"PRId64"", bytes_written); + return 0; } @@ -481,7 +534,6 @@ static int conn_sock = -1; static int conn_sock_readable; static int conn_sock_writable; -static int log_fd = -1; static int oom_event_fd = -1; static int attach_socket_fd = -1; static int console_socket_fd = -1; @@ -931,14 +983,14 @@ static char *setup_attach_socket(void) * Create a symlink so we don't exceed unix domain socket * path length limit. */ - attach_symlink_dir_path = g_build_filename("/var/run/crio", opt_cuuid, NULL); + attach_symlink_dir_path = g_build_filename(opt_socket_path, opt_cuuid, NULL); if (unlink(attach_symlink_dir_path) == -1 && errno != ENOENT) pexit("Failed to remove existing symlink for attach socket directory"); if (symlink(opt_bundle_path, attach_symlink_dir_path) == -1) pexit("Failed to create symlink for attach socket"); - attach_sock_path = g_build_filename("/var/run/crio", opt_cuuid, "attach", NULL); + attach_sock_path = g_build_filename(opt_socket_path, opt_cuuid, "attach", NULL); ninfo("attach sock path: %s", attach_sock_path); strncpy(attach_addr.sun_path, attach_sock_path, sizeof(attach_addr.sun_path) - 1); @@ -1068,6 +1120,8 @@ int main(int argc, char *argv[]) if (opt_runtime_path == NULL) nexit("Runtime path not provided. Use --runtime"); + if (access(opt_runtime_path, X_OK) < 0) + pexit("Runtime path %s is not valid: %s", opt_runtime_path, strerror(errno)); if (!opt_exec && opt_exit_dir == NULL) nexit("Container exit directory not provided. Use --exit-dir"); @@ -1209,6 +1263,12 @@ int main(int argc, char *argv[]) NULL); } + if (!opt_exec && opt_no_pivot) { + add_argv(runtime_argv, + "--no-pivot", + NULL); + } + if (csname != NULL) { add_argv(runtime_argv, "--console-socket", csname, diff --git a/contrib/rpm/Makefile b/contrib/rpm/Makefile deleted file mode 100644 index 24bbca28..00000000 --- a/contrib/rpm/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -.PHONY: dist -dist: crio.spec - spectool -g crio.spec - -.PHONY: rpm -rpm: dist - rpmbuild --define "_sourcedir `pwd`" --define "_specdir `pwd`" \ - --define "_rpmdir `pwd`" --define "_srcrpmdir `pwd`" -ba crio.spec - -all: rpm - -clean: - rm -f *rpm *gz - rm -rf x86_64 diff --git a/contrib/rpm/crio.spec b/contrib/rpm/crio.spec deleted file mode 100644 index bd7dd3f7..00000000 --- a/contrib/rpm/crio.spec +++ /dev/null @@ -1,72 +0,0 @@ -%define debug_package %{nil} -%global provider github -%global provider_tld com -%global project kubernetes-incubator -%global repo cri-o -%global Name crio -# https://github.com/kubernetes-incubator/cri-o -%global provider_prefix %{provider}.%{provider_tld}/%{project}/%{repo} -%global import_path %{provider_prefix} -%global commit 8ba639952a95f2e24cc98987689138b67545576c -%global shortcommit %(c=%{commit}; echo ${c:0:7}) - -Name: %{Name} -Version: 0.0.1 -Release: 1.git%{shortcommit}%{?dist} -Summary: Kubelet Container Runtime Interface (CRI) for OCI runtimes. -Group: Applications/Text -License: Apache 2.0 -URL: https://%{provider_prefix} -Source0: https://%{provider_prefix}/archive/%{commit}/%{repo}-%{shortcommit}.tar.gz -Provides: %{repo} - -BuildRequires: golang-github-cpuguy83-go-md2man - -%description -The crio package provides an implementation of the -Kubelet Container Runtime Interface (CRI) using OCI conformant runtimes. - -crio provides following functionalities: - - Support multiple image formats including the existing Docker image format - Support for multiple means to download images including trust & image verification - Container image management (managing image layers, overlay filesystems, etc) - Container process lifecycle management - Monitoring and logging required to satisfy the CRI - Resource isolation as required by the CRI - -%prep -%setup -q -n %{repo}-%{commit} - -%build -make all - -%install -%make_install -%make_install install.systemd - -#define license tag if not already defined -%{!?_licensedir:%global license %doc} -%files -%{_bindir}/crio -%{_bindir}/crioctl -%{_mandir}/man5/crio.conf.5* -%{_mandir}/man8/crio.8* -%{_sysconfdir}/crio.conf -%dir /%{_libexecdir}/crio -/%{_libexecdir}/crio/conmon -/%{_libexecdir}/crio/pause -%{_unitdir}/crio.service -%doc README.md -%license LICENSE - -%preun -%systemd_preun %{Name} - -%postun -%systemd_postun_with_restart %{Name} - -%changelog -* Mon Oct 31 2016 Dan Walsh - 0.0.1 -- Initial RPM release - diff --git a/contrib/system_containers/centos/Dockerfile b/contrib/system_containers/centos/Dockerfile new file mode 100644 index 00000000..0797fb14 --- /dev/null +++ b/contrib/system_containers/centos/Dockerfile @@ -0,0 +1,29 @@ +FROM centos + +ENV VERSION=0 RELEASE=1 ARCH=x86_64 +LABEL com.redhat.component="cri-o" \ + name="$FGC/cri-o" \ + version="$VERSION" \ + release="$RELEASE.$DISTTAG" \ + architecture="$ARCH" \ + usage="atomic install --system --system-package=no crio && systemctl start crio" \ + summary="The cri-o daemon as a system container." \ + maintainer="Yu Qi Zhang " \ + atomic.type="system" + +RUN yum-config-manager --nogpgcheck --add-repo https://cbs.centos.org/repos/virt7-container-common-candidate/x86_64/os/ && \ + yum install --disablerepo=extras --nogpgcheck --setopt=tsflags=nodocs -y iptables cri-o socat iproute runc && \ + rpm -V iptables cri-o iproute runc && \ + yum clean all && \ + mkdir -p /exports/hostfs/etc/crio /exports/hostfs/opt/cni/bin/ /exports/hostfs/var/lib/containers/storage/ && \ + cp /etc/crio/* /exports/hostfs/etc/crio && \ + if test -e /usr/libexec/cni; then cp -Lr /usr/libexec/cni/* /exports/hostfs/opt/cni/bin/; fi + +RUN sed -i '/storage_option =/s/.*/&\n"overlay.override_kernel_check=1",/' /exports/hostfs/etc/crio/crio.conf + +COPY manifest.json tmpfiles.template config.json.template service.template /exports/ + +COPY set_mounts.sh / +COPY run.sh /usr/bin/ + +CMD ["/usr/bin/run.sh"] diff --git a/contrib/system_containers/centos/README.md b/contrib/system_containers/centos/README.md new file mode 100644 index 00000000..428bc6ff --- /dev/null +++ b/contrib/system_containers/centos/README.md @@ -0,0 +1,57 @@ +# cri-o + +This is the cri-o daemon as a system container. + +## Building the image from source: + +``` +# git clone https://github.com/projectatomic/atomic-system-containers +# cd atomic-system-containers/cri-o +# docker build -t crio . +``` + +## Running the system container, with the atomic CLI: + +Pull from registry into ostree: + +``` +# atomic pull --storage ostree $REGISTRY/crio +``` + +Or alternatively, pull from local docker: + +``` +# atomic pull --storage ostree docker:crio:latest +``` + +Install the container: + +Currently we recommend using --system-package=no to avoid having rpmbuild create an rpm file +during installation. This flag will tell the atomic CLI to fall back to copying files to the +host instead. + +``` +# atomic install --system --system-package=no --name=crio ($REGISTRY)/crio +``` + +Start as a systemd service: + +``` +# systemctl start crio +``` + +Stopping the service + +``` +# systemctl stop crio +``` + +Removing the container + +``` +# atomic uninstall crio +``` + +## Binary version + +You can find the image automatically built as: registry.centos.org/projectatomic/cri-o:latest diff --git a/contrib/system_containers/centos/cccp.yml b/contrib/system_containers/centos/cccp.yml new file mode 100644 index 00000000..ec4dab74 --- /dev/null +++ b/contrib/system_containers/centos/cccp.yml @@ -0,0 +1,41 @@ +# This is for the purpose of building containers on the CentOS Community Container +# Pipeline. The containers are built, tested and delivered to registry.centos.org and +# lifecycled as well. A corresponding entry must exist in the container index itself, +# located at https://github.com/CentOS/container-index/tree/master/index.d +# You can know more at the following links: +# * https://github.com/CentOS/container-pipeline-service/blob/master/README.md +# * https://github.com/CentOS/container-index/blob/master/README.rst +# * https://wiki.centos.org/ContainerPipeline + +# This will be part of the name of the container. It should match the job-id in index entry +job-id: cri-o + +#the following are optional, can be left blank +#defaults, where applicable are filled in +#nulecule-file : nulecule + +# This flag tells the container pipeline to skip user defined tests on their container +test-skip : True + +# This is path of the script that initiates the user defined tests. It must be able to +# return an exit code. +test-script : null + +# This is the path of custom build script. +build-script : null + +# This is the path of the custom delivery script +delivery-script : null + +# This flag tells the pipeline to deliver this container to docker hub. +docker-index : True + +# This flag can be used to enable or disable the custom delivery +custom-delivery : False + +# This flag can be used to enable or disable delivery of container to local registry +local-delivery : True + +Upstreams : + - ref : + url : diff --git a/contrib/system_containers/centos/config.json.template b/contrib/system_containers/centos/config.json.template new file mode 100644 index 00000000..785383d4 --- /dev/null +++ b/contrib/system_containers/centos/config.json.template @@ -0,0 +1,427 @@ +{ + "ociVersion": "1.0.0", + "platform": { + "arch": "amd64", + "os": "linux" + }, + "process": { + "args": [ + "/usr/bin/run.sh" + ], + "capabilities": { + "ambient": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "bounding": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "effective": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "inheritable": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "permitted": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ] + }, + "selinuxLabel": "system_u:system_r:container_runtime_t:s0", + "cwd": "/", + "env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin:/root/go/bin", + "TERM=xterm", + "LOG_LEVEL=$LOG_LEVEL", + "NAME=$NAME" + ], + "noNewPrivileges": false, + "terminal": false, + "user": { + "gid": 0, + "uid": 0 + } + }, + "root": { + "path": "rootfs", + "readonly": true + }, + "hooks": {}, + "linux": { + "namespaces": [ + { + "type": "mount" + } + ], + "resources": { + "devices": [ + { + "access": "rwm", + "allow": true + } + ] + }, + "rootfsPropagation": "private" + }, + "mounts": [ + { + "destination": "/tmp", + "options": [ + "private", + "bind", + "rw", + "mode=755" + ], + "source": "/tmp", + "type": "bind" + }, + { + "destination": "/etc", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/etc", + "type": "bind" + }, + { + "destination": "/lib/modules", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/lib/modules", + "type": "bind" + }, + { + "destination": "/root", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/root", + "type": "bind" + }, + { + "destination": "/home", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/home", + "type": "bind" + }, + { + "destination": "/mnt", + "options": [ + "rbind", + "rw", + "rprivate", + "mode=755" + ], + "source": "/mnt", + "type": "bind" + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}", + "destination": "/run", + "options": [ + "rshared", + "rbind", + "rw", + "mode=755" + ] + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}/systemd", + "destination": "/run/systemd", + "options": [ + "rslave", + "bind", + "rw", + "mode=755" + ] + }, + { + "destination": "/var/log", + "options": [ + "rbind", + "rslave", + "rw" + ], + "source": "/var/log", + "type": "bind" + }, + { + "destination": "/var/lib", + "options": [ + "rbind", + "rprivate", + "rw" + ], + "source": "${STATE_DIRECTORY}", + "type": "bind" + }, + { + "destination": "/var/lib/containers/storage", + "options": [ + "rbind", + "rshared", + "rw" + ], + "source": "${VAR_LIB_CONTAINERS_STORAGE}", + "type": "bind" + }, + { + "destination": "/var/lib/origin", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_ORIGIN}", + "type": "bind" + }, + { + "destination": "/var/lib/kubelet", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_KUBE}", + "type": "bind" + }, + { + "destination": "/opt/cni", + "options": [ + "rbind", + "rprivate", + "ro", + "mode=755" + ], + "source": "${OPT_CNI}", + "type": "bind" + }, + { + "destination": "/dev", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/dev", + "type": "bind" + }, + { + "destination": "/sys", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/sys", + "type": "bind" + }, + { + "destination": "/proc", + "options": [ + "rbind", + "rw", + "mode=755" + ], + "source": "/proc", + "type": "proc" + } + ] +} diff --git a/contrib/system_containers/centos/manifest.json b/contrib/system_containers/centos/manifest.json new file mode 100644 index 00000000..38f4dc87 --- /dev/null +++ b/contrib/system_containers/centos/manifest.json @@ -0,0 +1,10 @@ +{ + "version": "1.0", + "defaultValues": { + "LOG_LEVEL" : "info", + "OPT_CNI" : "/opt/cni", + "VAR_LIB_CONTAINERS_STORAGE" : "/var/lib/containers/storage", + "VAR_LIB_ORIGIN" : "/var/lib/origin", + "VAR_LIB_KUBE" : "/var/lib/kubelet" + } +} diff --git a/contrib/system_containers/centos/run.sh b/contrib/system_containers/centos/run.sh new file mode 100755 index 00000000..7f34fd42 --- /dev/null +++ b/contrib/system_containers/centos/run.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +# Ensure that new process maintain this SELinux label +PID=$$ +LABEL=`tr -d '\000' < /proc/$PID/attr/current` +printf %s $LABEL > /proc/self/attr/exec + +test -e /etc/sysconfig/crio-storage && source /etc/sysconfig/crio-storage +test -e /etc/sysconfig/crio-network && source /etc/sysconfig/crio-network + +exec /usr/bin/crio --log-level=$LOG_LEVEL diff --git a/contrib/system_containers/centos/service.template b/contrib/system_containers/centos/service.template new file mode 100644 index 00000000..4c08b39d --- /dev/null +++ b/contrib/system_containers/centos/service.template @@ -0,0 +1,20 @@ +[Unit] +Description=crio daemon +After=network.target + +[Service] +Type=notify +ExecStartPre=/bin/sh $DESTDIR/rootfs/set_mounts.sh +ExecStart=$EXEC_START +ExecStop=$EXEC_STOP +Restart=on-failure +WorkingDirectory=$DESTDIR +RuntimeDirectory=${NAME} +TasksMax=infinity +LimitNOFILE=1048576 +LimitNPROC=1048576 +LimitCORE=infinity +TimeoutStartSec=0 + +[Install] +WantedBy=multi-user.target diff --git a/contrib/system_containers/centos/set_mounts.sh b/contrib/system_containers/centos/set_mounts.sh new file mode 100755 index 00000000..c1f0c050 --- /dev/null +++ b/contrib/system_containers/centos/set_mounts.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +findmnt /var/lib/containers/storage > /dev/null || mount --rbind --make-shared /var/lib/containers/storage /var/lib/containers/storage +findmnt /var/lib/origin > /dev/null || mount --bind --make-shared /var/lib/origin /var/lib/origin +findmnt /var/lib/kubelet > /dev/null || mount --bind --make-shared /var/lib/kubelet /var/lib/kubelet +mount --make-shared /run +findmnt /run/systemd > /dev/null || mount --bind --make-rslave /run/systemd /run/systemd diff --git a/contrib/system_containers/centos/tmpfiles.template b/contrib/system_containers/centos/tmpfiles.template new file mode 100644 index 00000000..94472677 --- /dev/null +++ b/contrib/system_containers/centos/tmpfiles.template @@ -0,0 +1,5 @@ +d ${RUN_DIRECTORY}/crio - - - - - +d /etc/crio - - - - - +Z /etc/crio - - - - - +d ${STATE_DIRECTORY}/origin - - - - - +d ${STATE_DIRECTORY}/kubelet - - - - - diff --git a/contrib/system_containers/fedora/Dockerfile b/contrib/system_containers/fedora/Dockerfile new file mode 100644 index 00000000..da12c6f0 --- /dev/null +++ b/contrib/system_containers/fedora/Dockerfile @@ -0,0 +1,30 @@ +FROM registry.fedoraproject.org/fedora:27 + +ENV VERSION=0 RELEASE=1 ARCH=x86_64 +LABEL com.redhat.component="cri-o" \ + name="$FGC/cri-o" \ + version="$VERSION" \ + release="$RELEASE.$DISTTAG" \ + architecture="$ARCH" \ + usage="atomic install --system --system-package=no crio && systemctl start crio" \ + summary="The cri-o daemon as a system container." \ + maintainer="Yu Qi Zhang " \ + atomic.type="system" + +COPY README.md / + +RUN dnf install --enablerepo=updates-testing --setopt=tsflags=nodocs -y iptables cri-o socat iproute runc && \ + rpm -V iptables cri-o iproute runc && \ + dnf clean all && \ + mkdir -p /exports/hostfs/etc/crio /exports/hostfs/opt/cni/bin/ /exports/hostfs/var/lib/containers/storage/ && \ + cp /etc/crio/* /exports/hostfs/etc/crio && \ + if test -e /usr/libexec/cni; then cp -Lr /usr/libexec/cni/* /exports/hostfs/opt/cni/bin/; fi + +RUN sed -i '/storage_option =/s/.*/&\n"overlay.override_kernel_check=1",/' /exports/hostfs/etc/crio/crio.conf + +COPY manifest.json tmpfiles.template config.json.template service.template /exports/ + +COPY set_mounts.sh / +COPY run.sh /usr/bin/ + +CMD ["/usr/bin/run.sh"] diff --git a/contrib/system_containers/fedora/README.md b/contrib/system_containers/fedora/README.md new file mode 100644 index 00000000..6de39964 --- /dev/null +++ b/contrib/system_containers/fedora/README.md @@ -0,0 +1,53 @@ +# cri-o + +This is the cri-o daemon as a system container. + +## Building the image from source: + +``` +# git clone https://github.com/projectatomic/atomic-system-containers +# cd atomic-system-containers/cri-o +# docker build -t crio . +``` + +## Running the system container, with the atomic CLI: + +Pull from registry into ostree: + +``` +# atomic pull --storage ostree $REGISTRY/crio +``` + +Or alternatively, pull from local docker: + +``` +# atomic pull --storage ostree docker:crio:latest +``` + +Install the container: + +Currently we recommend using --system-package=no to avoid having rpmbuild create an rpm file +during installation. This flag will tell the atomic CLI to fall back to copying files to the +host instead. + +``` +# atomic install --system --system-package=no --name=crio ($REGISTRY)/crio +``` + +Start as a systemd service: + +``` +# systemctl start crio +``` + +Stopping the service + +``` +# systemctl stop crio +``` + +Removing the container + +``` +# atomic uninstall crio +``` diff --git a/contrib/system_containers/fedora/config.json.template b/contrib/system_containers/fedora/config.json.template new file mode 100644 index 00000000..0642fbc1 --- /dev/null +++ b/contrib/system_containers/fedora/config.json.template @@ -0,0 +1,432 @@ +{ + "ociVersion": "1.0.0", + "platform": { + "arch": "amd64", + "os": "linux" + }, + "process": { + "args": [ + "/usr/bin/run.sh" + ], + "selinuxLabel": "system_u:system_r:container_runtime_t:s0", + "capabilities": { + "ambient": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "bounding": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "effective": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "inheritable": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "permitted": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ] + }, + "cwd": "/", + "env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin:/root/go/bin", + "TERM=xterm", + "LOG_LEVEL=$LOG_LEVEL", + "NAME=$NAME" + ], + "noNewPrivileges": false, + "terminal": false, + "user": { + "gid": 0, + "uid": 0 + } + }, + "root": { + "path": "rootfs", + "readonly": true + }, + "hooks": {}, + "linux": { + "namespaces": [ + { + "type": "mount" + } + ], + "resources": { + "devices": [ + { + "access": "rwm", + "allow": true + } + ] + }, + "rootfsPropagation": "private" + }, + "mounts": [ + { + "destination": "/tmp", + "options": [ + "private", + "bind", + "rw", + "mode=755" + ], + "source": "/tmp", + "type": "bind" + }, + { + "destination": "/etc", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/etc", + "type": "bind" + }, + { + "destination": "/lib/modules", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/lib/modules", + "type": "bind" + }, + { + "destination": "/root", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/root", + "type": "bind" + }, + { + "destination": "/home", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/home", + "type": "bind" + }, + { + "destination": "/mnt", + "options": [ + "rbind", + "rw", + "rprivate", + "mode=755" + ], + "source": "/mnt", + "type": "bind" + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}", + "destination": "/run", + "options": [ + "rshared", + "rbind", + "rw", + "mode=755" + ] + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}/systemd", + "destination": "/run/systemd", + "options": [ + "rslave", + "bind", + "rw", + "mode=755" + ] + }, + { + "destination": "/var/log", + "options": [ + "rbind", + "rslave", + "rw" + ], + "source": "/var/log", + "type": "bind" + }, + { + "destination": "/var/lib", + "options": [ + "rbind", + "rprivate", + "rw" + ], + "source": "${STATE_DIRECTORY}", + "type": "bind" + }, + { + "destination": "/var/lib/containers/storage", + "options": [ + "rbind", + "rshared", + "rw" + ], + "source": "${VAR_LIB_CONTAINERS_STORAGE}", + "type": "bind" + }, + { + "destination": "/var/lib/origin", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_ORIGIN}", + "type": "bind" + }, + { + "destination": "/var/lib/kubelet", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_KUBE}", + "type": "bind" + }, + { + "destination": "/opt/cni", + "options": [ + "rbind", + "rprivate", + "ro", + "mode=755" + ], + "source": "${OPT_CNI}", + "type": "bind" + }, + { + "destination": "/dev", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/dev", + "type": "bind" + }, + { + "destination": "/sys", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/sys", + "type": "bind" + }, + { + "destination": "/proc", + "options": [ + "rbind", + "rw", + "mode=755" + ], + "source": "/proc", + "type": "proc" + } + ] +} diff --git a/contrib/system_containers/fedora/manifest.json b/contrib/system_containers/fedora/manifest.json new file mode 100644 index 00000000..38f4dc87 --- /dev/null +++ b/contrib/system_containers/fedora/manifest.json @@ -0,0 +1,10 @@ +{ + "version": "1.0", + "defaultValues": { + "LOG_LEVEL" : "info", + "OPT_CNI" : "/opt/cni", + "VAR_LIB_CONTAINERS_STORAGE" : "/var/lib/containers/storage", + "VAR_LIB_ORIGIN" : "/var/lib/origin", + "VAR_LIB_KUBE" : "/var/lib/kubelet" + } +} diff --git a/contrib/system_containers/fedora/run.sh b/contrib/system_containers/fedora/run.sh new file mode 100755 index 00000000..7f34fd42 --- /dev/null +++ b/contrib/system_containers/fedora/run.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +# Ensure that new process maintain this SELinux label +PID=$$ +LABEL=`tr -d '\000' < /proc/$PID/attr/current` +printf %s $LABEL > /proc/self/attr/exec + +test -e /etc/sysconfig/crio-storage && source /etc/sysconfig/crio-storage +test -e /etc/sysconfig/crio-network && source /etc/sysconfig/crio-network + +exec /usr/bin/crio --log-level=$LOG_LEVEL diff --git a/contrib/system_containers/fedora/service.template b/contrib/system_containers/fedora/service.template new file mode 100644 index 00000000..4c08b39d --- /dev/null +++ b/contrib/system_containers/fedora/service.template @@ -0,0 +1,20 @@ +[Unit] +Description=crio daemon +After=network.target + +[Service] +Type=notify +ExecStartPre=/bin/sh $DESTDIR/rootfs/set_mounts.sh +ExecStart=$EXEC_START +ExecStop=$EXEC_STOP +Restart=on-failure +WorkingDirectory=$DESTDIR +RuntimeDirectory=${NAME} +TasksMax=infinity +LimitNOFILE=1048576 +LimitNPROC=1048576 +LimitCORE=infinity +TimeoutStartSec=0 + +[Install] +WantedBy=multi-user.target diff --git a/contrib/system_containers/fedora/set_mounts.sh b/contrib/system_containers/fedora/set_mounts.sh new file mode 100755 index 00000000..c1f0c050 --- /dev/null +++ b/contrib/system_containers/fedora/set_mounts.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +findmnt /var/lib/containers/storage > /dev/null || mount --rbind --make-shared /var/lib/containers/storage /var/lib/containers/storage +findmnt /var/lib/origin > /dev/null || mount --bind --make-shared /var/lib/origin /var/lib/origin +findmnt /var/lib/kubelet > /dev/null || mount --bind --make-shared /var/lib/kubelet /var/lib/kubelet +mount --make-shared /run +findmnt /run/systemd > /dev/null || mount --bind --make-rslave /run/systemd /run/systemd diff --git a/contrib/system_containers/fedora/tmpfiles.template b/contrib/system_containers/fedora/tmpfiles.template new file mode 100644 index 00000000..94472677 --- /dev/null +++ b/contrib/system_containers/fedora/tmpfiles.template @@ -0,0 +1,5 @@ +d ${RUN_DIRECTORY}/crio - - - - - +d /etc/crio - - - - - +Z /etc/crio - - - - - +d ${STATE_DIRECTORY}/origin - - - - - +d ${STATE_DIRECTORY}/kubelet - - - - - diff --git a/contrib/system_containers/rhel/Dockerfile b/contrib/system_containers/rhel/Dockerfile new file mode 100644 index 00000000..3c113fda --- /dev/null +++ b/contrib/system_containers/rhel/Dockerfile @@ -0,0 +1,41 @@ +#oit## This file is managed by the OpenShift Image Tool +#oit## by the OpenShift Continuous Delivery team. +#oit## +#oit## Any yum repos listed in this file will effectively be ignored during CD builds. +#oit## Yum repos must be enabled in the oit configuration files. +#oit## Some aspects of this file may be managed programmatically. For example, the image name, labels (version, +#oit## release, and other), and the base FROM. Changes made directly in distgit may be lost during the next +#oit## reconciliation. +#oit## +FROM rhel7:7-released + +RUN \ + yum install --setopt=tsflags=nodocs -y socat iptables cri-o iproute runc skopeo-containers container-selinux && \ + rpm -V socat iptables cri-o iproute runc skopeo-containers container-selinux && \ + yum clean all && \ + mkdir -p /exports/hostfs/etc/crio /exports/hostfs/opt/cni/bin/ /exports/hostfs/var/lib/containers/storage/ && \ + cp /etc/crio/* /exports/hostfs/etc/crio && \ + if test -e /usr/libexec/cni; then cp -Lr /usr/libexec/cni/* /exports/hostfs/opt/cni/bin/; fi + +COPY manifest.json tmpfiles.template config.json.template service.template /exports/ + +COPY set_mounts.sh / +COPY run.sh /usr/bin/ + +CMD ["/usr/bin/run.sh"] + +LABEL \ + com.redhat.component="cri-o-docker" \ + io.k8s.description="CRI-O is an implementation of the Kubernetes CRI. It is a lightweight, OCI-compliant runtime that is native to kubernetes. CRI-O supports OCI container images and can pull from any container registry." \ + maintainer="Jhon Honce " \ + name="openshift3/cri-o" \ + License="GPLv2+" \ + io.k8s.display-name="CRI-O" \ + summary="OCI-based implementation of Kubernetes Container Runtime Interface" \ + release="0.13.0.0" \ + version="v3.8.0" \ + architecture="x86_64" \ + usage="atomic install --system --system-package=no crio && systemctl start crio" \ + vendor="Red Hat" \ + io.openshift.tags="cri-o system rhel7" \ + atomic.type="system" diff --git a/contrib/system_containers/rhel/config.json.template b/contrib/system_containers/rhel/config.json.template new file mode 100644 index 00000000..a5eb001e --- /dev/null +++ b/contrib/system_containers/rhel/config.json.template @@ -0,0 +1,422 @@ +{ + "ociVersion": "1.0.0", + "platform": { + "arch": "amd64", + "os": "linux" + }, + "process": { + "args": [ + "/usr/bin/run.sh" + ], + "capabilities": { + "ambient": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "bounding": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "effective": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "inheritable": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "permitted": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ] + }, + "selinuxLabel": "system_u:system_r:container_runtime_t:s0", + "cwd": "/", + "env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin:/root/go/bin", + "TERM=xterm", + "LOG_LEVEL=$LOG_LEVEL", + "NAME=$NAME" + ], + "noNewPrivileges": false, + "terminal": false, + "user": { + "gid": 0, + "uid": 0 + } + }, + "root": { + "path": "rootfs", + "readonly": true + }, + "hooks": {}, + "linux": { + "namespaces": [{ + "type": "mount" + }], + "resources": { + "devices": [{ + "access": "rwm", + "allow": true + }] + }, + "rootfsPropagation": "private" + }, + "mounts": [{ + "destination": "/tmp", + "options": [ + "private", + "bind", + "rw", + "mode=755" + ], + "source": "/tmp", + "type": "bind" + }, + { + "destination": "/etc", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/etc", + "type": "bind" + }, + { + "destination": "/lib/modules", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/lib/modules", + "type": "bind" + }, + { + "destination": "/root", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/root", + "type": "bind" + }, + { + "destination": "/home", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/home", + "type": "bind" + }, + { + "destination": "/mnt", + "options": [ + "rbind", + "rw", + "rprivate", + "mode=755" + ], + "source": "/mnt", + "type": "bind" + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}", + "destination": "/run", + "options": [ + "rshared", + "rbind", + "rw", + "mode=755" + ] + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}/systemd", + "destination": "/run/systemd", + "options": [ + "rslave", + "bind", + "rw", + "mode=755" + ] + }, + { + "destination": "/var/log", + "options": [ + "rbind", + "rslave", + "rw" + ], + "source": "/var/log", + "type": "bind" + }, + { + "destination": "/var/lib", + "options": [ + "rbind", + "rprivate", + "rw" + ], + "source": "${STATE_DIRECTORY}", + "type": "bind" + }, + { + "destination": "/var/lib/containers/storage", + "options": [ + "rbind", + "rshared", + "rw" + ], + "source": "${VAR_LIB_CONTAINERS_STORAGE}", + "type": "bind" + }, + { + "destination": "/var/lib/origin", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_ORIGIN}", + "type": "bind" + }, + { + "destination": "/var/lib/kubelet", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_KUBE}", + "type": "bind" + }, + { + "destination": "/opt/cni", + "options": [ + "rbind", + "rprivate", + "ro", + "mode=755" + ], + "source": "${OPT_CNI}", + "type": "bind" + }, + { + "destination": "/dev", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/dev", + "type": "bind" + }, + { + "destination": "/sys", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/sys", + "type": "bind" + }, + { + "destination": "/proc", + "options": [ + "rbind", + "rw", + "mode=755" + ], + "source": "/proc", + "type": "proc" + } + ] +} diff --git a/contrib/system_containers/rhel/help.md b/contrib/system_containers/rhel/help.md new file mode 100644 index 00000000..e46702e7 --- /dev/null +++ b/contrib/system_containers/rhel/help.md @@ -0,0 +1,37 @@ +% CRI-O (1) Container Image Pages +% Jhon Honce +% September 7, 2017 + +# NAME +cri-o - OCI-based implementation of Kubernetes Container Runtime Interface + +# DESCRIPTION +CRI-O is an implementation of the Kubernetes CRI. It is a lightweight, OCI-compliant runtime that is native to kubernetes. CRI-O supports OCI container images and can pull from any container registry. + +You can find more information on the CRI-O project at + +# USAGE +Pull from local docker and install system container: + +``` +# atomic pull --storage ostree docker:openshift3/cri-o:latest +# atomic install --system --system-package=no --name cri-o openshift3/cri-o +``` + +Start and enable as a systemd service: +``` +# systemctl enable --now cri-o +``` + +Stopping the service +``` +# systemctl stop cri-o +``` + +Removing the container +``` +# atomic uninstall cri-o +``` + +# SEE ALSO +man systemd(1) diff --git a/contrib/system_containers/rhel/manifest.json b/contrib/system_containers/rhel/manifest.json new file mode 100644 index 00000000..727abf9e --- /dev/null +++ b/contrib/system_containers/rhel/manifest.json @@ -0,0 +1,10 @@ +{ + "version": "1.0", + "defaultValues": { + "LOG_LEVEL": "info", + "OPT_CNI": "/opt/cni", + "VAR_LIB_CONTAINERS_STORAGE": "/var/lib/containers/storage", + "VAR_LIB_ORIGIN": "/var/lib/origin", + "VAR_LIB_KUBE": "/var/lib/kubelet" + } +} diff --git a/contrib/system_containers/rhel/run.sh b/contrib/system_containers/rhel/run.sh new file mode 100755 index 00000000..7f34fd42 --- /dev/null +++ b/contrib/system_containers/rhel/run.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +# Ensure that new process maintain this SELinux label +PID=$$ +LABEL=`tr -d '\000' < /proc/$PID/attr/current` +printf %s $LABEL > /proc/self/attr/exec + +test -e /etc/sysconfig/crio-storage && source /etc/sysconfig/crio-storage +test -e /etc/sysconfig/crio-network && source /etc/sysconfig/crio-network + +exec /usr/bin/crio --log-level=$LOG_LEVEL diff --git a/contrib/system_containers/rhel/service.template b/contrib/system_containers/rhel/service.template new file mode 100644 index 00000000..4c08b39d --- /dev/null +++ b/contrib/system_containers/rhel/service.template @@ -0,0 +1,20 @@ +[Unit] +Description=crio daemon +After=network.target + +[Service] +Type=notify +ExecStartPre=/bin/sh $DESTDIR/rootfs/set_mounts.sh +ExecStart=$EXEC_START +ExecStop=$EXEC_STOP +Restart=on-failure +WorkingDirectory=$DESTDIR +RuntimeDirectory=${NAME} +TasksMax=infinity +LimitNOFILE=1048576 +LimitNPROC=1048576 +LimitCORE=infinity +TimeoutStartSec=0 + +[Install] +WantedBy=multi-user.target diff --git a/contrib/system_containers/rhel/set_mounts.sh b/contrib/system_containers/rhel/set_mounts.sh new file mode 100755 index 00000000..c1f0c050 --- /dev/null +++ b/contrib/system_containers/rhel/set_mounts.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +findmnt /var/lib/containers/storage > /dev/null || mount --rbind --make-shared /var/lib/containers/storage /var/lib/containers/storage +findmnt /var/lib/origin > /dev/null || mount --bind --make-shared /var/lib/origin /var/lib/origin +findmnt /var/lib/kubelet > /dev/null || mount --bind --make-shared /var/lib/kubelet /var/lib/kubelet +mount --make-shared /run +findmnt /run/systemd > /dev/null || mount --bind --make-rslave /run/systemd /run/systemd diff --git a/contrib/system_containers/rhel/tmpfiles.template b/contrib/system_containers/rhel/tmpfiles.template new file mode 100644 index 00000000..94472677 --- /dev/null +++ b/contrib/system_containers/rhel/tmpfiles.template @@ -0,0 +1,5 @@ +d ${RUN_DIRECTORY}/crio - - - - - +d /etc/crio - - - - - +Z /etc/crio - - - - - +d ${STATE_DIRECTORY}/origin - - - - - +d ${STATE_DIRECTORY}/kubelet - - - - - diff --git a/contrib/systemd/crio.service b/contrib/systemd/crio.service index 70a3d26b..35d6d427 100644 --- a/contrib/systemd/crio.service +++ b/contrib/systemd/crio.service @@ -12,7 +12,7 @@ ExecStart=/usr/local/bin/crio \ $CRIO_STORAGE_OPTIONS \ $CRIO_NETWORK_OPTIONS ExecReload=/bin/kill -s HUP $MAINPID -TasksMax=8192 +TasksMax=infinity LimitNOFILE=1048576 LimitNPROC=1048576 LimitCORE=infinity diff --git a/contrib/test/crio-integration-playbook.yaml b/contrib/test/crio-integration-playbook.yaml deleted file mode 100644 index af65ac20..00000000 --- a/contrib/test/crio-integration-playbook.yaml +++ /dev/null @@ -1,419 +0,0 @@ -## This playbook expects --extra-vars "commit=" -## and either --extra-vars "pullrequest=" or -## --skip-tags pr -- hosts: all - remote_user: root - vars: - xunit: false - cni_commit: dcf7368eeab15e2affc6256f0bb1e84dd46a34de - tasks: - - name: Update all packages - yum: - name: '*' - state: latest - async: 600 - poll: 10 - when: (ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS') - ignore_errors: true - - name: Update all packages on Fedora - dnf: - name: '*' - state: latest - async: 600 - poll: 10 - when: ansible_distribution == 'Fedora' - - name: Make sure we have all required packages - yum: - name: "{{ item }}" - state: latest - with_items: - - wget - - git - - make - - gcc - - tar - - libseccomp-devel - - golang - - glib2-devel - - glibc-static - - container-selinux - - btrfs-progs-devel - - device-mapper-devel - - ostree-devel - - glibc-devel - - gpgme-devel - - libassuan-devel - - libgpg-error-devel - - pkgconfig - - skopeo-containers - - oci-systemd-hook - - oci-register-machine - - oci-umount - async: 600 - poll: 10 - when: (ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS') - - name: Make sure we have all required packages on Fedora - dnf: - name: "{{ item }}" - state: latest - with_items: - - wget - - git - - make - - gcc - - tar - - libseccomp-devel - - golang - - glib2-devel - - glibc-static - - container-selinux - - btrfs-progs-devel - - device-mapper-devel - - ostree-devel - - glibc-devel - - gpgme-devel - - libassuan-devel - - libgpg-error-devel - - pkgconfig - - skopeo-containers - - oci-systemd-hook - - oci-register-machine - - oci-umount - async: 600 - poll: 10 - when: ansible_distribution == 'Fedora' - - name: Setup swap to prevent kernel firing off the OOM killer - shell: | - truncate -s 8G /root/swap && \ - export SWAPDEV=$(losetup --show -f /root/swap | head -1) && \ - mkswap $SWAPDEV && \ - swapon $SWAPDEV && \ - swapon --show - - name: Make testing directories to conform to testing standards - file: - path: "{{ item }}" - state: directory - with_items: - - /root/src/github.com/kubernetes-incubator - - /root/src/github.com/opencontainers - - /opt/cni/bin - - /etc/cni/net.d - - /usr/local/go - - name: install Golang upstream in CentOS - shell: | - curl -fsSL "https://golang.org/dl/go1.8.1.linux-amd64.tar.gz" \ - | tar -xzC /usr/local - when: ansible_distribution == 'CentOS' - - name: Set custom Golang path for CentOS - lineinfile: - dest: /root/.bashrc - line: 'export PATH=/usr/local/go/bin:$PATH' - insertafter: 'EOF' - regexp: 'export PATH=/usr/local/go/bin:$PATH' - state: present - when: ansible_distribution == 'CentOS' - - name: set sysctl vm.overcommit_memory=1 for CentOS - shell: | - sysctl -w vm.overcommit_memory=1 - when: ansible_distribution == 'CentOS' - - name: disable selinux on CentOS :( - shell: | - setenforce 0 - when: ansible_distribution == 'CentOS' - - name: git clone bats repo - git: - repo: https://github.com/sstephenson/bats.git - dest: /root/src/bats - async: 600 - poll: 10 - - name: Fetch the xunit feature PR for bats - shell: "git fetch origin +refs/pull/161/head:refs/remotes/origin/pr/161" - args: - chdir: /root/src/bats - async: 600 - poll: 10 - when: xunit - - name: Git checkout the xunit PR for bats - shell: "git checkout origin/pr/161" - args: - chdir: /root/src/bats - async: 600 - poll: 10 - when: xunit - - name: git clone crictl repo - git: - repo: https://github.com/kubernetes-incubator/cri-tools - dest: /root/src/github.com/kubernetes-incubator/cri-tools - version: 16e6fe4d7199c5689db4630a9330e6a8a12cecd1 - async: 600 - poll: 10 - - name: git clone runc repo - git: - repo: https://github.com/opencontainers/runc - dest: /root/src/github.com/opencontainers/runc - async: 600 - poll: 10 - - name: git clone cri-o repo - git: - repo: https://github.com/kubernetes-incubator/cri-o - dest: /root/src/github.com/kubernetes-incubator/cri-o - async: 600 - poll: 10 - - name: git clone cni repo - git: - repo: https://github.com/containernetworking/plugins - dest: /root/src/github.com/containernetworking/plugins - version: "{{ cni_commit }}" - - name: Git fetch the PR - shell: "git fetch origin +refs/pull/{{ pullrequest }}/head:refs/remotes/origin/pr/{{ pullrequest }}" - args: - chdir: /root/src/github.com/kubernetes-incubator/cri-o - tags: - - pr - async: 600 - poll: 10 - - name: Git checkout the commit into working branch - shell: "git checkout {{ commit }}" - args: - chdir: /root/src/github.com/kubernetes-incubator/cri-o - async: 600 - poll: 10 - - name: Install bats - command: bats/install.sh /usr/local - args: - chdir: /root/src - - name: Add go testing dir to bashrc files - lineinfile: - dest: /root/.bashrc - line: 'export GOPATH=/root' - insertafter: 'EOF' - regexp: 'export GOPATH=/root' - state: present - - name: Source the bashrc file - shell: source /root/.bashrc - - name: Build cni networking - shell: ./build.sh - args: - chdir: /root/src/github.com/containernetworking/plugins - - name: cp bin to cni bin dir - shell: cp /root/src/github.com/containernetworking/plugins/bin/* /opt/cni/bin - - name: curl crio bridge conf file for cni networking - get_url: - url: https://raw.githubusercontent.com/kubernetes-incubator/cri-o/{{ commit }}/contrib/cni/10-crio-bridge.conf - dest: /etc/cni/net.d/10-crio-bridge.conf - - name: curl loopback conf for cni networking - get_url: - url: https://raw.githubusercontent.com/kubernetes-incubator/cri-o/{{ commit }}/contrib/cni/99-loopback.conf - dest: /etc/cni/net.d/99-loopback.conf - - name: make clean - make: - target: clean - chdir: /root/src/github.com/opencontainers/runc - async: 600 - poll: 10 - - name: make crictl - shell: | - go install github.com/kubernetes-incubator/cri-tools/cmd/crictl && \ - cp $GOPATH/bin/crictl /usr/bin/crictl - args: - chdir: /root/src/github.com/kubernetes-incubator/cri-o/ - - name: make runc - make: - params: BUILDTAGS="seccomp selinux" - chdir: /root/src/github.com/opencontainers/runc - async: 600 - poll: 10 - - name: install runc - make: - target: install - chdir: /root/src/github.com/opencontainers/runc - async: 600 - poll: 10 - - name: Change test_runner.sh to use bats xunit output - lineinfile: - dest: /root/src/github.com/kubernetes-incubator/cri-o/test/test_runner.sh - line: 'execute time bats --tap --junit $TESTS' - regexp: 'execute time bats --tap \$TESTS' - state: present - when: xunit - - name: git clone cni test repo - git: - repo: https://github.com/runcom/plugins - dest: /root/src/github.com/containernetworking/plugins - version: "custom-bridge" - force: yes - - name: Build cni test networking - shell: ./build.sh - args: - chdir: /root/src/github.com/containernetworking/plugins - - name: cp custom-bridge to opt bin - shell: cp /root/src/github.com/containernetworking/plugins/bin/bridge /opt/cni/bin/bridge-custom - # k8s builds with go1.8.x, rhel, fedora don't have it yet - - name: install Golang upstream in Fedora/RHEL - shell: | - curl -fsSL "https://golang.org/dl/go1.8.3.linux-amd64.tar.gz" \ - | tar -xzC /usr/local - when: ansible_distribution == 'Fedora' or ansible_distribution == 'RedHat' - - name: Set custom Golang path for Fedora/RHEL - lineinfile: - dest: /root/.bashrc - line: 'export PATH=/usr/local/go/bin:$PATH' - insertafter: 'EOF' - regexp: 'export PATH=/usr/local/go/bin:$PATH' - state: present - when: ansible_distribution == 'Fedora' or ansible_distribution == 'RedHat' - - name: run integration tests RHEL - shell: 'CGROUP_MANAGER=cgroupfs STORAGE_OPTS="--storage-driver=overlay2 --storage-opt overlay2.override_kernel_check=1" make localintegration 2>&1 > testout.txt' - args: - chdir: /root/src/github.com/kubernetes-incubator/cri-o - async: 3600 - poll: 10 - ignore_errors: yes - when: ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS' - - name: run integration tests RHEL with xunit results - shell: 'CGROUP_MANAGER=cgroupfs STORAGE_OPTS="--storage-driver=overlay2 --storage-opt overlay2.override_kernel_check=1" make localintegration' - args: - chdir: /root/src/github.com/kubernetes-incubator/cri-o - async: 3600 - poll: 10 - ignore_errors: yes - when: (ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS') and xunit - - name: run integration tests Fedora - shell: 'CGROUP_MANAGER=cgroupfs STORAGE_OPTS="--storage-driver=overlay2" make localintegration 2>&1 > testout.txt' - args: - chdir: /root/src/github.com/kubernetes-incubator/cri-o - async: 3600 - poll: 10 - ignore_errors: yes - when: ansible_distribution == 'Fedora' - - name: run integration tests Fedora with xunit results - shell: 'CGROUP_MANAGER=cgroupfs STORAGE_OPTS="--storage-driver=overlay2" make localintegration' - args: - chdir: /root/src/github.com/kubernetes-incubator/cri-o - async: 3600 - poll: 10 - ignore_errors: yes - when: (ansible_distribution == 'Fedora' and xunit) - - name: Make testing output directory - file: - path: /root/src/github.com/kubernetes-incubator/cri-o/reports - state: directory - ignore_errors: yes - when: xunit - - name: Move all xunit files into one dir to scp - shell: 'mv /root/src/github.com/kubernetes-incubator/cri-o/test/TestReport-bats*.xml /root/src/github.com/kubernetes-incubator/cri-o/reports/' - when: xunit - # XXX: kube tests from now on - - name: git clone k8s repo - git: - repo: https://github.com/runcom/kubernetes - dest: /root/src/k8s.io/kubernetes - # based on kube upstream v1.7.4 - version: cri-o-node-e2e-patched - force: yes - async: 600 - poll: 10 - - name: make and install CRI-O - shell: | - make install.tools && \ - make && \ - make install && \ - make install.systemd && \ - make install.config - args: - chdir: /root/src/github.com/kubernetes-incubator/cri-o - async: 600 - poll: 10 - - name: link runc - file: src=/usr/local/sbin/runc dest=/usr/bin/runc state=link - - name: run with overlay2 - replace: - regexp: 'storage_driver = ""' - replace: 'storage_driver = "overlay2"' - name: /etc/crio/crio.conf - backup: yes - - name: run with systemd cgroup manager - replace: - regexp: 'cgroup_manager = "cgroupfs"' - replace: 'cgroup_manager = "systemd"' - name: /etc/crio/crio.conf - backup: yes - - name: add docker.io default registry - lineinfile: - dest: /etc/crio/crio.conf - line: '"docker.io"' - insertafter: 'registries = \[' - regexp: 'docker\.io' - state: present - - name: add overlay2 storage opts on RHEL/CentOS - lineinfile: - dest: /etc/crio/crio.conf - line: '"overlay2.override_kernel_check=1"' - insertafter: 'storage_option = \[' - regexp: 'overlay2\.override_kernel_check=1' - state: present - when: ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS' - - name: enable and start CRI-O - systemd: - name: crio - state: started - enabled: yes - daemon_reload: yes - async: 600 - poll: 10 - # see https://github.com/kubernetes-incubator/cri-o/issues/528 - - name: disable selinux for k8s conformance tests - shell: | - setenforce 0 - async: 600 - poll: 10 - - name: Go get the go-bindata file - shell: go get -u github.com/jteeuwen/go-bindata/go-bindata - args: - chdir: /root/src/k8s.io/kubernetes - async: 600 - poll: 10 - - name: Install etcd - command: hack/install-etcd.sh - args: - chdir: /root/src/k8s.io/kubernetes - async: 600 - poll: 10 - - name: Install necessary github go packages - shell: go get github.com/onsi/ginkgo/ginkgo ; go get github.com/onsi/gomega ; go get -u github.com/cloudflare/cfssl/cmd/... - args: - chdir: /root/src/k8s.io/kubernetes - async: 600 - poll: 10 - - name: Add path to bashrc files - lineinfile: - dest: /root/.bashrc - line: 'export PATH=$PATH:/root/src/k8s.io/kubernetes/third_party/etcd' - insertafter: 'EOF' - regexp: 'export PATH=\$PATH:/root/src/k8s.io/kubernetes/third_party/etcd' - state: present - - name: gather correct hostname - shell: | - cat /etc/hostname - register: hostname - - name: inject hostname into /etc/hosts - lineinfile: - dest: /etc/hosts - line: '127.0.0.1 {{ hostname.stdout }}' - insertafter: 'EOF' - regexp: '127\.0\.0\.1\s+{{ hostname.stdout }}' - state: present - - name: Flush the iptables - command: iptables -F - async: 600 - poll: 10 - - name: run k8s tests - shell: | - make test-e2e-node PARALLELISM=1 RUNTIME=remote CONTAINER_RUNTIME_ENDPOINT=/var/run/crio.sock IMAGE_SERVICE_ENDPOINT=/var/run/crio.sock TEST_ARGS='--prepull-images=true --kubelet-flags="--cgroup-driver=systemd"' FOCUS="\[Conformance\]" 2>&1 > node-e2e.log - args: - chdir: /root/src/k8s.io/kubernetes - async: 7200 - poll: 10 - ignore_errors: true - # XXX: tests on RHEL/CentOS are unreliable fow now - when: ansible_distribution == 'Fedora' or ansible_distribution == 'RedHat' diff --git a/contrib/test/integration/README.md b/contrib/test/integration/README.md new file mode 100644 index 00000000..f13b8b92 --- /dev/null +++ b/contrib/test/integration/README.md @@ -0,0 +1,21 @@ +# Fedora and RHEL Integration and End-to-End Tests + +This directory contains playbooks to set up for and run the integration and +end-to-end tests for CRI-O on RHEL and Fedora hosts. Two entrypoints exist: + + - `main.yml`: sets up the machine and runs tests + - `results.yml`: gathers test output to `/tmp/artifacts` + +When running `main.yml`, three tags are present: + + - `setup`: run all tasks to set up the system for testing + - `e2e`: build CRI-O from source and run Kubernetes node E2Es + - `integration`: build CRI-O from source and run the local integration suite + +The playbooks assume the following things about your system: + + - on RHEL, the server and extras repos are configured and certs are present + - `ansible` is installed and the host is boot-strapped to allow `ansible` to run against it + - the `$GOPATH` is set and present for all shells (*e.g.* written in `/etc/environment`) + - CRI-O is checked out to the correct state at `${GOPATH}/src/github.com/kubernetes-incubator/cri-o` + - the user running the playbook has access to passwordless `sudo` \ No newline at end of file diff --git a/contrib/test/integration/ansible.cfg b/contrib/test/integration/ansible.cfg new file mode 100644 index 00000000..92a13a5f --- /dev/null +++ b/contrib/test/integration/ansible.cfg @@ -0,0 +1,359 @@ +# config file for ansible -- http://ansible.com/ +# ============================================== + +# nearly all parameters can be overridden in ansible-playbook +# or with command line flags. ansible will read ANSIBLE_CONFIG, +# ansible.cfg in the current working directory, .ansible.cfg in +# the home directory or /etc/ansible/ansible.cfg, whichever it +# finds first + +[defaults] + +# some basic default values... + +#inventory = inventory +#library = /usr/share/my_modules/ +#remote_tmp = $HOME/.ansible/tmp +#local_tmp = .ansible/tmp +#forks = 5 +forks = 10 +#poll_interval = 15 +#sudo_user = root +#ask_sudo_pass = True +ask_sudo_pass = False +#ask_pass = True +ask_pass = False +#transport = smart +#remote_port = 22 +#module_lang = C +#module_set_locale = True + +# plays will gather facts by default, which contain information about +# the remote system. +# +# smart - gather by default, but don't regather if already gathered +# implicit - gather by default, turn off with gather_facts: False +# explicit - do not gather by default, must say gather_facts: True +#gathering = implicit +gathering = smart + +# by default retrieve all facts subsets +# all - gather all subsets +# network - gather min and network facts +# hardware - gather hardware facts (longest facts to retrieve) +# virtual - gather min and virtual facts +# facter - import facts from facter +# ohai - import facts from ohai +# You can combine them using comma (ex: network,virtual) +# You can negate them using ! (ex: !hardware,!facter,!ohai) +# A minimal set of facts is always gathered. +gather_subset = network + +# additional paths to search for roles in, colon separated +# N/B: This depends on how ansible is called +#roles_path = $WORKSPACE/kommandir_workspace/roles + +# uncomment this to disable SSH key host checking +#host_key_checking = False +host_key_checking = False + +# change the default callback +#stdout_callback = skippy +# enable additional callbacks +#callback_whitelist = timer, mail + +# Determine whether includes in tasks and handlers are "static" by +# default. As of 2.0, includes are dynamic by default. Setting these +# values to True will make includes behave more like they did in the +# 1.x versions. +task_includes_static = True +handler_includes_static = True + +# change this for alternative sudo implementations +#sudo_exe = sudo + +# What flags to pass to sudo +# WARNING: leaving out the defaults might create unexpected behaviours +#sudo_flags = -H -S -n + +# SSH timeout +#timeout = 10 + +# default user to use for playbooks if user is not specified +# (/usr/bin/ansible will use current user as default) +#remote_user = root +remote_user = root + +# logging is off by default unless this path is defined +# if so defined, consider logrotate +log_path = $ARTIFACTS/main.log + +# default module name for /usr/bin/ansible +#module_name = command + +# use this shell for commands executed under sudo +# you may need to change this to bin/bash in rare instances +# if sudo is constrained +# executable = /bin/sh + +# if inventory variables overlap, does the higher precedence one win +# or are hash values merged together? The default is 'replace' but +# this can also be set to 'merge'. +hash_behaviour = replace + +# by default, variables from roles will be visible in the global variable +# scope. To prevent this, the following option can be enabled, and only +# tasks and handlers within the role will see the variables there +private_role_vars = False + +# list any Jinja2 extensions to enable here: +#jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n + +# if set, always use this private key file for authentication, same as +# if passing --private-key to ansible or ansible-playbook +#private_key_file = /path/to/file + +# If set, configures the path to the Vault password file as an alternative to +# specifying --vault-password-file on the command line. +#vault_password_file = /path/to/vault_password_file + +# format of string {{ ansible_managed }} available within Jinja2 +# templates indicates to users editing templates files will be replaced. +# replacing {file}, {host} and {uid} and strftime codes with proper values. +#ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host} +# This short version is better used in templates as it won't flag the file as changed every run. +#ansible_managed = Ansible managed: {file} on {host} + +# by default, ansible-playbook will display "Skipping [host]" if it determines a task +# should not be run on a host. Set this to "False" if you don't want to see these "Skipping" +# messages. NOTE: the task header will still be shown regardless of whether or not the +# task is skipped. +#display_skipped_hosts = True +display_skipped_hosts = False + +# by default, if a task in a playbook does not include a name: field then +# ansible-playbook will construct a header that includes the task's action but +# not the task's args. This is a security feature because ansible cannot know +# if the *module* considers an argument to be no_log at the time that the +# header is printed. If your environment doesn't have a problem securing +# stdout from ansible-playbook (or you have manually specified no_log in your +# playbook on all of the tasks where you have secret information) then you can +# safely set this to True to get more informative messages. +display_args_to_stdout = False + +# by default (as of 1.3), Ansible will raise errors when attempting to dereference +# Jinja2 variables that are not set in templates or action lines. Uncomment this line +# to revert the behavior to pre-1.3. +#error_on_undefined_vars = False + +# by default (as of 1.6), Ansible may display warnings based on the configuration of the +# system running ansible itself. This may include warnings about 3rd party packages or +# other conditions that should be resolved if possible. +# to disable these warnings, set the following value to False: +system_warnings = False + +# by default (as of 1.4), Ansible may display deprecation warnings for language +# features that should no longer be used and will be removed in future versions. +# to disable these warnings, set the following value to False: +deprecation_warnings = False + +# (as of 1.8), Ansible can optionally warn when usage of the shell and +# command module appear to be simplified by using a default Ansible module +# instead. These warnings can be silenced by adjusting the following +# setting or adding warn=yes or warn=no to the end of the command line +# parameter string. This will for example suggest using the git module +# instead of shelling out to the git command. +command_warnings = False + + +# set plugin path directories here, separate with colons +#action_plugins = /usr/share/ansible/plugins/action +#callback_plugins = /usr/share/ansible/plugins/callback +#connection_plugins = /usr/share/ansible/plugins/connection +#lookup_plugins = /usr/share/ansible/plugins/lookup +#vars_plugins = /usr/share/ansible/plugins/vars +#filter_plugins = /usr/share/ansible/plugins/filter +#test_plugins = /usr/share/ansible/plugins/test +#strategy_plugins = /usr/share/ansible/plugins/strategy + +# Most callbacks shipped with Ansible are disabled by default +# and need to be whitelisted in your ansible.cfg file in order to function. +callback_whitelist = default + +# by default callbacks are not loaded for /bin/ansible, enable this if you +# want, for example, a notification or logging callback to also apply to +# /bin/ansible runs +#bin_ansible_callbacks = False + + +# don't like cows? that's unfortunate. +# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1 +#nocows = 1 + +# set which cowsay stencil you'd like to use by default. When set to 'random', +# a random stencil will be selected for each task. The selection will be filtered +# against the `cow_whitelist` option below. +#cow_selection = default +#cow_selection = random + +# when using the 'random' option for cowsay, stencils will be restricted to this list. +# it should be formatted as a comma-separated list with no spaces between names. +# NOTE: line continuations here are for formatting purposes only, as the INI parser +# in python does not support them. +#cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\ +# hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\ +# stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www + +# don't like colors either? +# set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1 +nocolor = 0 + +# if set to a persistent type (not 'memory', for example 'redis') fact values +# from previous runs in Ansible will be stored. This may be useful when +# wanting to use, for example, IP information from one group of servers +# without having to talk to them in the same playbook run to get their +# current IP information. +#fact_caching = memory + +# retry files +# When a playbook fails by default a .retry file will be created in ~/ +# You can disable this feature by setting retry_files_enabled to False +# and you can change the location of the files by setting retry_files_save_path + +#retry_files_enabled = False +retry_files_enabled = False + +# squash actions +# Ansible can optimise actions that call modules with list parameters +# when looping. Instead of calling the module once per with_ item, the +# module is called once with all items at once. Currently this only works +# under limited circumstances, and only with parameters named 'name'. +squash_actions = apk,apt,dnf,package,pacman,pkgng,yum,zypper + +# prevents logging of task data, off by default +#no_log = False + +# prevents logging of tasks, but only on the targets, data is still logged on the master/controller +no_target_syslog = True + +# controls whether Ansible will raise an error or warning if a task has no +# choice but to create world readable temporary files to execute a module on +# the remote machine. This option is False by default for security. Users may +# turn this on to have behaviour more like Ansible prior to 2.1.x. See +# https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user +# for more secure ways to fix this than enabling this option. +#allow_world_readable_tmpfiles = False + +# controls the compression level of variables sent to +# worker processes. At the default of 0, no compression +# is used. This value must be an integer from 0 to 9. +#var_compression_level = 9 + +# controls what compression method is used for new-style ansible modules when +# they are sent to the remote system. The compression types depend on having +# support compiled into both the controller's python and the client's python. +# The names should match with the python Zipfile compression types: +# * ZIP_STORED (no compression. available everywhere) +# * ZIP_DEFLATED (uses zlib, the default) +# These values may be set per host via the ansible_module_compression inventory +# variable +#module_compression = 'ZIP_DEFLATED' + +# This controls the cutoff point (in bytes) on --diff for files +# set to 0 for unlimited (RAM may suffer!). +#max_diff_size = 1048576 + +[privilege_escalation] +#become=True +#become_method=sudo +#become_user=root +become_user=root +#become_ask_pass=False + +[paramiko_connection] + +# uncomment this line to cause the paramiko connection plugin to not record new host +# keys encountered. Increases performance on new host additions. Setting works independently of the +# host key checking setting above. +#record_host_keys=False + +# by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this +# line to disable this behaviour. +#pty=False + +[ssh_connection] + +# ssh arguments to use +# Leaving off ControlPersist will result in poor performance, so use +# paramiko on older platforms rather than removing it +ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o PreferredAuthentications=publickey -o ConnectTimeout=13 + +# The path to use for the ControlPath sockets. This defaults to +# "%(directory)s/ansible-ssh-%%h-%%p-%%r", however on some systems with +# very long hostnames or very long path names (caused by long user names or +# deeply nested home directories) this can exceed the character limit on +# file socket names (108 characters for most platforms). In that case, you +# may wish to shorten the string below. +# +# Example: +# control_path = %(directory)s/%%h-%%r +#control_path = %(directory)s/ansible-ssh-%%h-%%p-%%r + +# Enabling pipelining reduces the number of SSH operations required to +# execute a module on the remote server. This can result in a significant +# performance improvement when enabled, however when using "sudo:" you must +# first disable 'requiretty' in /etc/sudoers +# +# By default, this option is disabled to preserve compatibility with +# sudoers configurations that have requiretty (the default on many distros). +# +#pipelining = False +pipelining=True + +# if True, make ansible use scp if the connection type is ssh +# (default is sftp) +#scp_if_ssh = True + +# if False, sftp will not use batch mode to transfer files. This may cause some +# types of file transfer failures impossible to catch however, and should +# only be disabled if your sftp version has problems with batch mode +#sftp_batch_mode = False + +[accelerate] +#accelerate_port = 5099 +#accelerate_timeout = 30 +#accelerate_connect_timeout = 5.0 + +# The daemon timeout is measured in minutes. This time is measured +# from the last activity to the accelerate daemon. +#accelerate_daemon_timeout = 30 + +# If set to yes, accelerate_multi_key will allow multiple +# private keys to be uploaded to it, though each user must +# have access to the system via SSH to add a new key. The default +# is "no". +#accelerate_multi_key = yes + +[selinux] +# file systems that require special treatment when dealing with security context +# the default behaviour that copies the existing context or uses the user default +# needs to be changed to use the file system dependent context. +#special_context_filesystems=nfs,vboxsf,fuse,ramfs + +# Set this to yes to allow libvirt_lxc connections to work without SELinux. +#libvirt_lxc_noseclabel = yes + +[colors] +#highlight = white +#verbose = blue +#warn = bright purple +#error = red +#debug = dark gray +#deprecate = purple +#skip = cyan +#unreachable = red +#ok = green +#changed = yellow +#diff_add = green +#diff_remove = red +#diff_lines = cyan diff --git a/contrib/test/integration/build/bats.yml b/contrib/test/integration/build/bats.yml new file mode 100644 index 00000000..d4ea19c6 --- /dev/null +++ b/contrib/test/integration/build/bats.yml @@ -0,0 +1,17 @@ +--- + +- name: clone bats source repo + git: + repo: "https://github.com/sstephenson/bats.git" + dest: "{{ ansible_env.GOPATH }}/src/github.com/sstephenson/bats" + +- name: install bats + command: "./install.sh /usr/local" + args: + chdir: "{{ ansible_env.GOPATH }}/src/github.com/sstephenson/bats" + +- name: link bats + file: + src: /usr/local/bin/bats + dest: /usr/bin/bats + state: link diff --git a/contrib/test/integration/build/cri-o.yml b/contrib/test/integration/build/cri-o.yml new file mode 100644 index 00000000..fa025035 --- /dev/null +++ b/contrib/test/integration/build/cri-o.yml @@ -0,0 +1,79 @@ +--- + +- name: stat the expected cri-o directory + stat: + path: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o" + register: dir_stat + +- name: expect cri-o to be cloned already + fail: + msg: "Expected cri-o to be cloned at {{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o but it wasn't!" + when: not dir_stat.stat.exists + +- name: install cri-o tools + make: + target: install.tools + chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o" + +- name: build cri-o + make: + chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o" + +- name: install cri-o + make: + target: install + chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o" + +- name: install cri-o systemd files + make: + target: install.systemd + chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o" + +- name: install cri-o config + make: + target: install.config + chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o" + +- name: install configs + copy: + src: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o/{{ item.src }}" + dest: "{{ item.dest }}" + remote_src: yes + with_items: + - src: contrib/cni/10-crio-bridge.conf + dest: /etc/cni/net.d/10-crio-bridge.conf + - src: contrib/cni/99-loopback.conf + dest: /etc/cni/net.d/99-loopback.conf + - src: test/redhat_sigstore.yaml + dest: /etc/containers/registries.d/registry.access.redhat.com.yaml + +- name: run with overlay + replace: + regexp: 'storage_driver = ""' + replace: 'storage_driver = "overlay"' + name: /etc/crio/crio.conf + backup: yes + +- name: run with systemd cgroup manager + replace: + regexp: 'cgroup_manager = "cgroupfs"' + replace: 'cgroup_manager = "systemd"' + name: /etc/crio/crio.conf + backup: yes + +- name: add docker.io default registry + lineinfile: + dest: /etc/crio/crio.conf + line: '"docker.io"' + insertafter: 'registries = \[' + regexp: 'docker\.io' + state: present + +- name: add overlay storage opts on RHEL/CentOS + lineinfile: + dest: /etc/crio/crio.conf + line: '"overlay.override_kernel_check=1"' + insertafter: 'storage_option = \[' + regexp: 'overlay\.override_kernel_check=1' + state: present + when: ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS' diff --git a/contrib/test/integration/build/cri-tools.yml b/contrib/test/integration/build/cri-tools.yml new file mode 100644 index 00000000..3d30824f --- /dev/null +++ b/contrib/test/integration/build/cri-tools.yml @@ -0,0 +1,26 @@ +--- + +- name: clone cri-tools source repo + git: + repo: "https://github.com/kubernetes-incubator/cri-tools.git" + dest: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-tools" + version: "{{ cri_tools_git_version }}" + force: "{{ force_clone | default(False) | bool}}" + +- name: install crictl + command: "/usr/bin/go install github.com/kubernetes-incubator/cri-tools/cmd/crictl" + +- name: install critest + command: "/usr/bin/go install github.com/kubernetes-incubator/cri-tools/cmd/critest" + +- name: link crictl + file: + src: "{{ ansible_env.GOPATH }}/bin/crictl" + dest: /usr/bin/crictl + state: link + +- name: link critest + file: + src: "{{ ansible_env.GOPATH }}/bin/critest" + dest: /usr/bin/critest + state: link diff --git a/contrib/test/integration/build/kubernetes.yml b/contrib/test/integration/build/kubernetes.yml new file mode 100644 index 00000000..63d907f1 --- /dev/null +++ b/contrib/test/integration/build/kubernetes.yml @@ -0,0 +1,67 @@ +--- + +- name: clone kubernetes source repo + git: + repo: "https://github.com/{{ k8s_github_fork }}/kubernetes.git" + dest: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes" + # based on kube v1.9.0-alpha.2, update as needed + version: "{{ k8s_git_version }}" + force: "{{ force_clone | default(False) | bool}}" + +- name: install etcd + command: "hack/install-etcd.sh" + args: + chdir: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes" + +- name: build kubernetes + make: + chdir: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes" + +- name: Add custom cluster service file for the e2e testing + copy: + dest: /etc/systemd/system/customcluster.service + content: | + [Unit] + After=network-online.target + Wants=network-online.target + [Service] + WorkingDirectory={{ ansible_env.GOPATH }}/src/k8s.io/kubernetes + ExecStart=/usr/local/bin/createcluster.sh + User=root + [Install] + WantedBy=multi-user.target + +- name: Add create cluster background script for e2e testing + copy: + dest: /usr/local/bin/createcluster.sh + content: | + #!/bin/bash + + export PATH=/usr/local/go/bin:/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/root/bin:{{ ansible_env.GOPATH }}/bin:{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes/third_party/etcd:{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes/_output/local/bin/linux/amd64/ + export CONTAINER_RUNTIME=remote + export CGROUP_DRIVER=systemd + export CONTAINER_RUNTIME_ENDPOINT='{{ crio_socket }} --runtime-request-timeout=5m' + export ALLOW_SECURITY_CONTEXT="," + export ALLOW_PRIVILEGED=1 + export DNS_SERVER_IP={{ ansible_default_ipv4.address }} + export API_HOST={{ ansible_default_ipv4.address }} + export API_HOST_IP={{ ansible_default_ipv4.address }} + export KUBE_ENABLE_CLUSTER_DNS=true + export ENABLE_HOSTPATH_PROVISIONER=true + export KUBE_ENABLE_CLUSTER_DASHBOARD=true + ./hack/local-up-cluster.sh + mode: "u=rwx,g=rwx,o=x" + +- name: Set kubernetes_provider to be local + lineinfile: + dest: /etc/environment + line: 'KUBERNETES_PROVIDER=local' + regexp: 'KUBERNETES_PROVIDER=' + state: present + +- name: Set KUBECONFIG + lineinfile: + dest: /etc/environment + line: 'KUBECONFIG=/var/run/kubernetes/admin.kubeconfig' + regexp: 'KUBECONFIG=' + state: present diff --git a/contrib/test/integration/build/plugins.yml b/contrib/test/integration/build/plugins.yml new file mode 100644 index 00000000..e342a0b9 --- /dev/null +++ b/contrib/test/integration/build/plugins.yml @@ -0,0 +1,50 @@ +--- + +- name: clone plugins source repo + git: + repo: "https://github.com/containernetworking/plugins.git" + dest: "{{ ansible_env.GOPATH }}/src/github.com/containernetworking/plugins" + version: "dcf7368eeab15e2affc6256f0bb1e84dd46a34de" + +- name: build plugins + command: "./build.sh" + args: + chdir: "{{ ansible_env.GOPATH }}/src/github.com/containernetworking/plugins" + +- name: install plugins + copy: + src: "{{ ansible_env.GOPATH }}/src/github.com/containernetworking/plugins/bin/{{ item }}" + dest: "/opt/cni/bin" + mode: "o=rwx,g=rx,o=rx" + remote_src: yes + with_items: + - bridge + - dhcp + - flannel + - host-local + - ipvlan + - loopback + - macvlan + - ptp + - sample + - tuning + - vlan + +- name: clone runcom plugins source repo + git: + repo: "https://github.com/runcom/plugins.git" + dest: "{{ ansible_env.GOPATH }}/src/github.com/containernetworking/plugins" + version: "custom-bridge" + force: yes + +- name: build plugins + command: "./build.sh" + args: + chdir: "{{ ansible_env.GOPATH }}/src/github.com/containernetworking/plugins" + +- name: install custom bridge + copy: + src: "{{ ansible_env.GOPATH }}/src/github.com/containernetworking/plugins/bin/bridge" + dest: "/opt/cni/bin/bridge-custom" + mode: "o=rwx,g=rx,o=rx" + remote_src: yes diff --git a/contrib/test/integration/build/runc.yml b/contrib/test/integration/build/runc.yml new file mode 100644 index 00000000..f3221f4a --- /dev/null +++ b/contrib/test/integration/build/runc.yml @@ -0,0 +1,23 @@ +--- + +- name: clone runc source repo + git: + repo: "https://github.com/opencontainers/runc.git" + dest: "{{ ansible_env.GOPATH }}/src/github.com/opencontainers/runc" + version: "c6e4a1ebeb1a72b529c6f1b6ee2b1ae5b868b14f" + +- name: build runc + make: + params: BUILDTAGS="seccomp selinux" + chdir: "{{ ansible_env.GOPATH }}/src/github.com/opencontainers/runc" + +- name: install runc + make: + target: "install" + chdir: "{{ ansible_env.GOPATH }}/src/github.com/opencontainers/runc" + +- name: link runc + file: + src: /usr/local/sbin/runc + dest: /usr/bin/runc + state: link diff --git a/contrib/test/integration/callback_plugins/default.py b/contrib/test/integration/callback_plugins/default.py new file mode 100644 index 00000000..99821f66 --- /dev/null +++ b/contrib/test/integration/callback_plugins/default.py @@ -0,0 +1,156 @@ +'''Plugin to override the default output logic.''' + +# upstream: https://gist.github.com/cliffano/9868180 + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# For some reason this has to be done +import imp +import os + +ANSIBLE_PATH = imp.find_module('ansible')[1] +DEFAULT_PATH = os.path.join(ANSIBLE_PATH, 'plugins/callback/default.py') +DEFAULT_MODULE = imp.load_source( + 'ansible.plugins.callback.default', + DEFAULT_PATH +) + +try: + from ansible.plugins.callback import CallbackBase + BASECLASS = CallbackBase +except ImportError: # < ansible 2.1 + BASECLASS = DEFAULT_MODULE.CallbackModule + + +class CallbackModule(DEFAULT_MODULE.CallbackModule): # pylint: disable=too-few-public-methods,no-init + ''' + Override for the default callback module. + + Render std err/out outside of the rest of the result which it prints with + indentation. + ''' + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'default' + + def __init__(self, *args, **kwargs): + # pylint: disable=non-parent-init-called + BASECLASS.__init__(self, *args, **kwargs) + self.failed_task = [] + self.result_file = os.environ.get('AHT_RESULT_FILE') + + def _dump_results(self, result): + '''Return the text to output for a result.''' + result['_ansible_verbose_always'] = True + + save = {} + for key in ['stdout', 'stdout_lines', 'stderr', 'stderr_lines', 'msg']: + if key in result: + save[key] = result.pop(key) + + output = BASECLASS._dump_results(self, result) # pylint: disable=protected-access + + for key in ['stdout', 'stderr', 'msg']: + if key in save and save[key]: + output += '\n\n%s:\n---\n%s\n---' % (key.upper(), save[key]) + + for key, value in save.items(): + result[key] = value + + return output + + def v2_runner_on_unreachable(self, result): + self.failed_task = result + + if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid: + self._print_task_banner(result._task) + + delegated_vars = result._result.get('_ansible_delegated_vars', None) + if delegated_vars: + self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_UNREACHABLE) + else: + self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_UNREACHABLE) + + def v2_runner_on_failed(self,result, ignore_errors=False): + if ignore_errors is not True: + # Sets environment variable for test failures for use in playboks. + # Handlers tasks can conditionalize themselves using this variable + # to run only on failure. + os.environ["AHT_FAILURE"] = "1" + + # Save last failure + self.failed_task = result + + if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid: + self._print_task_banner(result._task) + + delegated_vars = result._result.get('_ansible_delegated_vars', None) + if 'exception' in result._result: + if self._display.verbosity < 3: + # extract just the actual error message from the exception text + error = result._result['exception'].strip().split('\n')[-1] + msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error + else: + msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] + + self._display.display(msg, color=C.COLOR_ERROR) + + if result._task.loop and 'results' in result._result: + self._process_items(result) + + else: + if delegated_vars: + self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR) + else: + self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR) + + if ignore_errors: + self._display.display("...ignoring", color=C.COLOR_SKIP) + + def v2_playbook_on_stats(self, stats): + self._display.banner("PLAY RECAP") + + hosts = sorted(stats.processed.keys()) + for h in hosts: + t = stats.summarize(h) + + self._display.display(u"%s : %s %s %s %s" % ( + hostcolor(h, t), + colorize(u'ok', t['ok'], C.COLOR_OK), + colorize(u'changed', t['changed'], C.COLOR_CHANGED), + colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE), + colorize(u'failed', t['failures'], C.COLOR_ERROR)), + screen_only=True + ) + + self._display.display(u"%s : %s %s %s %s" % ( + hostcolor(h, t, False), + colorize(u'ok', t['ok'], None), + colorize(u'changed', t['changed'], None), + colorize(u'unreachable', t['unreachable'], None), + colorize(u'failed', t['failures'], None)), + log_only=True + ) + + self._display.display("", screen_only=True) + # Save result to file if environment variable exists + if self.result_file is not None: + if self.failed_task: + with open(self.result_file, 'w') as f: + f.write("PLAY: %s\n%s\n%s" % (self._play, \ + self.failed_task._task, \ + self._dump_results(self.failed_task._result))) + else: + open(self.result_file, 'w').close() diff --git a/contrib/test/integration/critest.yml b/contrib/test/integration/critest.yml new file mode 100644 index 00000000..377ab59d --- /dev/null +++ b/contrib/test/integration/critest.yml @@ -0,0 +1,45 @@ +--- + +- name: enable and start CRI-O + systemd: + name: crio + state: started + enabled: yes + daemon_reload: yes + +- name: Flush the iptables + command: iptables -F + +- name: Enable localnet routing + command: sysctl -w net.ipv4.conf.all.route_localnet=1 + +- name: Add masquerade for localhost + command: iptables -t nat -I POSTROUTING -s 127.0.0.1 ! -d 127.0.0.1 -j MASQUERADE + +- name: run critest validation + shell: "critest -c --runtime-endpoint /var/run/crio/crio.sock --image-endpoint /var/run/crio/crio.sock v" + args: + chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o" + async: 5400 + poll: 30 + when: ansible_distribution not in ['RedHat', 'CentOS'] + + # XXX: RHEL has an additional test which fails because of selinux but disabling + # it doesn't solve the issue. + # TODO(runcom): enable skipped tests once we fix them (selinux) + # https://bugzilla.redhat.com/show_bug.cgi?id=1414236 + # https://access.redhat.com/solutions/2897781 +- name: run critest validation + shell: "critest -c --runtime-endpoint /var/run/crio/crio.sock --image-endpoint /var/run/crio/crio.sock -s 'should not allow privilege escalation when true' v" + args: + chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o" + async: 5400 + poll: 30 + when: ansible_distribution in ['RedHat', 'CentOS'] + +- name: run critest benchmarks + shell: "critest -c --runtime-endpoint /var/run/crio/crio.sock --image-endpoint /var/run/crio/crio.sock b" + args: + chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o" + async: 5400 + poll: 30 diff --git a/contrib/test/integration/e2e.yml b/contrib/test/integration/e2e.yml new file mode 100644 index 00000000..17982d1d --- /dev/null +++ b/contrib/test/integration/e2e.yml @@ -0,0 +1,58 @@ +--- + +- name: enable and start CRI-O + systemd: + name: crio + state: started + enabled: yes + daemon_reload: yes + +- name: update the server address for the custom cluster + lineinfile: + dest: /usr/local/bin/createcluster.sh + line: "export {{ item }}={{ ansible_default_ipv4.address }}" + regexp: "^export {{ item }}=" + state: present + with_items: + - DNS_SERVER_IP + - API_HOST + - API_HOST_IP + +- name: enable and start the custom cluster + systemd: + name: customcluster.service + state: started + enabled: yes + daemon_reload: yes + +- name: wait for the cluster to be running + command: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes/_output/bin/kubectl get service kubernetes --namespace default" + register: kube_poll + until: kube_poll | succeeded + retries: 100 + delay: 30 + +- name: ensure directory exists for e2e reports + file: + path: "{{ artifacts }}" + state: directory + +# TODO remove the last test skipped once https://github.com/kubernetes-incubator/cri-o/pull/1217 is merged +- name: Buffer the e2e testing command to workaround Ansible YAML folding "feature" + set_fact: + e2e_shell_cmd: > + /usr/bin/go run hack/e2e.go + --test + --test_args="-host=https://{{ ansible_default_ipv4.address }}:6443 + --ginkgo.skip=\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|PersistentVolumes|\[HPA\]|should.support.building.a.client.with.a.CSR|should.support.inline.execution.and.attach + --report-dir={{ artifacts }}" + &> {{ artifacts }}/e2e.log + # Fix vim syntax hilighting: " + +- name: disable SELinux + command: setenforce 0 + +- name: run e2e tests + shell: "{{ e2e_shell_cmd | regex_replace('\\s+', ' ') }}" + args: + chdir: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes" diff --git a/contrib/test/integration/golang.yml b/contrib/test/integration/golang.yml new file mode 100644 index 00000000..38f3d78e --- /dev/null +++ b/contrib/test/integration/golang.yml @@ -0,0 +1,55 @@ +--- + +- name: ensure Golang dir is empty first + file: + path: /usr/local/go + state: absent + +- name: fetch Golang + unarchive: + remote_src: yes + src: "https://storage.googleapis.com/golang/go{{ version }}.linux-amd64.tar.gz" + dest: /usr/local + +- name: link go toolchain + file: + src: "/usr/local/go/bin/{{ item }}" + dest: "/usr/bin/{{ item }}" + state: link + with_items: + - go + - gofmt + - godoc + +- name: ensure user profile exists + file: + path: "{{ ansible_user_dir }}/.profile" + state: touch + +- name: set up PATH for Go toolchain and built binaries + lineinfile: + dest: "{{ ansible_user_dir }}/.profile" + line: 'PATH={{ ansible_env.PATH }}:{{ ansible_env.GOPATH }}/bin:/usr/local/go/bin' + regexp: '^PATH=' + state: present + +- name: set up directories + file: + path: "{{ item }}" + state: directory + with_items: + - "{{ ansible_env.GOPATH }}/src/github.com/containernetworking" + - "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator" + - "{{ ansible_env.GOPATH }}/src/github.com/k8s.io" + - "{{ ansible_env.GOPATH }}/src/github.com/sstephenson" + - "{{ ansible_env.GOPATH }}/src/github.com/opencontainers" + +- name: install Go tools and dependencies + shell: /usr/bin/go get -u "github.com/{{ item }}" + with_items: + - tools/godep + - onsi/ginkgo/ginkgo + - onsi/gomega + - cloudflare/cfssl/cmd/... + - jteeuwen/go-bindata/go-bindata + - cpuguy83/go-md2man diff --git a/contrib/test/integration/main.yml b/contrib/test/integration/main.yml new file mode 100644 index 00000000..1f6448f5 --- /dev/null +++ b/contrib/test/integration/main.yml @@ -0,0 +1,125 @@ +- hosts: all + remote_user: root + vars_files: + - "{{ playbook_dir }}/vars.yml" + tags: + - setup + tasks: + - name: set up the system + include: system.yml + + - name: install Golang tools + include: golang.yml + vars: + version: "1.8.5" + + - name: clone build and install bats + include: "build/bats.yml" + + - name: clone build and install cri-tools + include: "build/cri-tools.yml" + vars: + cri_tools_git_version: "b42fc3f364dd48f649d55926c34492beeb9b2e99" + + - name: clone build and install kubernetes + include: "build/kubernetes.yml" + vars: + k8s_git_version: "cri-o-node-e2e-patched-logs" + k8s_github_fork: "runcom" + crio_socket: "/var/run/crio.sock" + + - name: clone build and install runc + include: "build/runc.yml" + + - name: clone build and install networking plugins + include: "build/plugins.yml" + +- hosts: all + remote_user: root + vars_files: + - "{{ playbook_dir }}/vars.yml" + tags: + - integration + - e2e + - node-e2e + - critest + tasks: + - name: clone build and install cri-o + include: "build/cri-o.yml" + +- hosts: all + remote_user: root + vars_files: + - "{{ playbook_dir }}/vars.yml" + tags: + - integration + tasks: + - name: clone build and install cri-tools + include: "build/cri-tools.yml" + vars: + force_clone: True + cri_tools_git_version: "a9e38a4a000bc1a4052fb33de1c967b8cfe9ad40" + - name: run cri-o integration tests + include: test.yml + +- hosts: all + remote_user: root + vars_files: + - "{{ playbook_dir }}/vars.yml" + tags: + - critest + tasks: + - name: install Golang tools + include: golang.yml + vars: + version: "1.9.2" + - name: setup critest + include: "build/cri-tools.yml" + vars: + force_clone: True + cri_tools_git_version: "a9e38a4a000bc1a4052fb33de1c967b8cfe9ad40" + - name: run critest validation and benchmarks + include: critest.yml + +- hosts: all + remote_user: root + vars_files: + - "{{ playbook_dir }}/vars.yml" + tags: + - node-e2e + tasks: + - name: install Golang tools + include: golang.yml + vars: + version: "1.9.2" + - name: clone build and install kubernetes + include: "build/kubernetes.yml" + vars: + force_clone: True + k8s_git_version: "master" + k8s_github_fork: "kubernetes" + crio_socket: "/var/run/crio/crio.sock" + - name: run k8s node-e2e tests + include: node-e2e.yml + +- hosts: all + remote_user: root + vars_files: + - "{{ playbook_dir }}/vars.yml" + tags: + - e2e + tasks: + - name: install Golang tools + include: golang.yml + vars: + version: "1.9.2" + - name: clone build and install kubernetes + include: "build/kubernetes.yml" + vars: + force_clone: True + # master as of 12/11/2017 + k8s_git_version: "master-nfs-fix" + k8s_github_fork: "runcom" + crio_socket: "/var/run/crio/crio.sock" + - name: run k8s e2e tests + include: e2e.yml diff --git a/contrib/test/integration/node-e2e.yml b/contrib/test/integration/node-e2e.yml new file mode 100644 index 00000000..6ea8ac2c --- /dev/null +++ b/contrib/test/integration/node-e2e.yml @@ -0,0 +1,26 @@ +--- + +- name: enable and start CRI-O + systemd: + name: crio + state: started + enabled: yes + daemon_reload: yes + +- name: disable SELinux + command: setenforce 0 + +- name: Flush the iptables + command: iptables -F + +- name: run node-e2e tests + shell: | + # parametrize crio socket + # cgroup-driver??? + # TODO(runcom): remove conformance focus, we want everything for testgrid + make test-e2e-node PARALLELISM=1 RUNTIME=remote CONTAINER_RUNTIME_ENDPOINT=/var/run/crio.sock IMAGE_SERVICE_ENDPOINT=/var/run/crio/crio.sock TEST_ARGS='--prepull-images=true --kubelet-flags="--cgroup-driver=systemd"' FOCUS="\[Conformance\]" &> {{ artifacts }}/node-e2e.log + args: + chdir: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes" + async: 7200 + poll: 10 + ignore_errors: true diff --git a/contrib/test/integration/results.yml b/contrib/test/integration/results.yml new file mode 100644 index 00000000..c9a96abb --- /dev/null +++ b/contrib/test/integration/results.yml @@ -0,0 +1,62 @@ +--- +# vim-syntax: ansible + +- hosts: '{{ hosts | default("all") }}' + vars_files: + - "{{ playbook_dir }}/vars.yml" + vars: + _result_filepaths: [] # do not use + _dstfnbuff: [] # do not use + tasks: + - name: The crio_integration_filepath is required + tags: + - integration + set_fact: + _result_filepaths: "{{ _result_filepaths + [crio_integration_filepath] }}" + + - name: The crio_node_e2e_filepath is required + tags: + - e2e + set_fact: + _result_filepaths: "{{ _result_filepaths + [crio_node_e2e_filepath] }}" + + - name: Verify expectations + assert: + that: + - 'result_dest_basedir | default(False, True)' + - '_result_filepaths | default(False, True)' + - '_dstfnbuff == []' + - 'results_fetched is undefined' + + - name: Results directory exists + file: + path: "{{ result_dest_basedir }}" + state: directory + delegate_to: localhost + + - name: destination file paths are buffered for overwrite-checking and jUnit conversion + set_fact: + _dstfnbuff: > + {{ _dstfnbuff | + union( [result_dest_basedir ~ "/" ~ inventory_hostname ~ "/" ~ item | basename] ) }} + with_items: '{{ _result_filepaths }}' + + - name: Overwriting existing results assumed very very bad + fail: + msg: "Cowardly refusing to overwrite {{ item }}" + when: item | exists + delegate_to: localhost + with_items: '{{ _dstfnbuff }}' + + # fetch module doesn't support directories + - name: Retrieve results from all hosts + synchronize: + checksum: True # Don't rely on date/time being in sync + archive: False # Don't bother with permissions or times + copy_links: True # We want files, not links to files + recursive: True + mode: pull + dest: '{{ result_dest_basedir }}/{{ inventory_hostname }}/' # must end in / + src: '{{ item }}' + register: results_fetched + with_items: '{{ _result_filepaths }}' diff --git a/contrib/test/integration/system.yml b/contrib/test/integration/system.yml new file mode 100644 index 00000000..453551fa --- /dev/null +++ b/contrib/test/integration/system.yml @@ -0,0 +1,134 @@ +--- + +- name: Make sure we have all required packages + package: + name: "{{ item }}" + state: present + with_items: + - atomic-registries + - container-selinux + - curl + - device-mapper-devel + - expect + - findutils + - gcc + - git + - glib2-devel + - glibc-devel + - glibc-static + - gpgme-devel + - hostname + - iproute + - iptables + - krb5-workstation + - libassuan-devel + - libffi-devel + - libgpg-error-devel + - libguestfs-tools + - libseccomp-devel + - libvirt-client + - libvirt-python + - libxml2-devel + - libxslt-devel + - make + - mlocate + - nfs-utils + - nmap-ncat + - oci-register-machine + - oci-systemd-hook + - oci-umount + - openssl + - openssl-devel + - ostree-devel + - pkgconfig + - python + - python2-crypto + - python-devel + - python-rhsm-certificates + - python-virtualenv + - PyYAML + - redhat-rpm-config + - rpcbind + - rsync + - sed + - skopeo-containers + - socat + - tar + - wget + async: 600 + poll: 10 + +- name: Add python2-boto for Fedora + package: + name: "{{ item }}" + state: present + with_items: + - python2-boto + when: ansible_distribution in ['Fedora'] + +- name: Add python-boto for RHEL and CentOS + package: + name: "{{ item }}" + state: present + with_items: + - python-boto + when: ansible_distribution in ['RedHat', 'CentOS'] + +- name: Add Btrfs for Fedora + package: + name: "{{ item }}" + state: present + with_items: + - btrfs-progs-devel + when: ansible_distribution in ['Fedora'] + +- name: Update all packages + package: + name: '*' + state: latest + async: 600 + poll: 10 + +- name: Setup swap to prevent kernel firing off the OOM killer + shell: | + truncate -s 8G /root/swap && \ + export SWAPDEV=$(losetup --show -f /root/swap | head -1) && \ + mkswap $SWAPDEV && \ + swapon $SWAPDEV && \ + swapon --show + +- name: ensure directories exist as needed + file: + path: "{{ item }}" + state: directory + with_items: + - /opt/cni/bin + - /etc/cni/net.d + +- name: set sysctl vm.overcommit_memory=1 for CentOS + sysctl: + name: vm.overcommit_memory + state: present + value: 1 + when: ansible_distribution == 'CentOS' + +- name: inject hostname into /etc/hosts + lineinfile: + dest: /etc/hosts + line: '{{ ansible_default_ipv4.address }} {{ ansible_nodename }}' + insertafter: 'EOF' + regexp: '{{ ansible_default_ipv4.address }}\s+{{ ansible_nodename }}' + state: present + +- name: Flush the iptables + command: iptables -F + +- name: Enable localnet routing + command: sysctl -w net.ipv4.conf.all.route_localnet=1 + +- name: Add masquerade for localhost + command: iptables -t nat -I POSTROUTING -s 127.0.0.1 ! -d 127.0.0.1 -j MASQUERADE + +- name: Update the kernel cmdline to include quota support + command: grubby --update-kernel=ALL --args="rootflags=pquota" + when: ansible_distribution in ['RedHat', 'CentOS'] diff --git a/contrib/test/integration/test.yml b/contrib/test/integration/test.yml new file mode 100644 index 00000000..418ceff7 --- /dev/null +++ b/contrib/test/integration/test.yml @@ -0,0 +1,25 @@ +--- + +- name: Make testing output verbose so it can be converted to xunit + lineinfile: + dest: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes/hack/make-rules/test.sh" + line: ' go test -v "${goflags[@]:+${goflags[@]}}" \' + regexp: ' go test \"\$' + state: present + +- name: set extra storage options + set_fact: + extra_storage_opts: " --storage-opt overlay.override_kernel_check=1" + when: ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS' + +- name: ensure directory exists for e2e reports + file: + path: "{{ artifacts }}" + state: directory + +- name: run integration tests + shell: "CGROUP_MANAGER=cgroupfs STORAGE_OPTIONS='--storage-driver=overlay{{ extra_storage_opts | default('') }}' make localintegration >& {{ artifacts }}/testout.txt" + args: + chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o" + async: 5400 + poll: 30 diff --git a/contrib/test/integration/vars.yml b/contrib/test/integration/vars.yml new file mode 100644 index 00000000..f1e5e2f7 --- /dev/null +++ b/contrib/test/integration/vars.yml @@ -0,0 +1,8 @@ +--- + +# For results.yml Paths use rsync 'source' conventions +artifacts: "/tmp/artifacts" # Base-directory for collection +crio_integration_filepath: "{{ artifacts }}/testout.txt" +crio_node_e2e_filepath: "{{ artifacts }}/junit_01.xml" +result_dest_basedir: '{{ lookup("env","WORKSPACE") | + default(playbook_dir, True) }}/artifacts' diff --git a/contrib/test/venv-ansible-playbook.sh b/contrib/test/venv-ansible-playbook.sh index 06acab35..58704215 100755 --- a/contrib/test/venv-ansible-playbook.sh +++ b/contrib/test/venv-ansible-playbook.sh @@ -46,54 +46,61 @@ else trap 'rm -rf "$PIPCACHE"' EXIT fi +# Create a directory to contain logs and test artifacts +export ARTIFACTS=$(mkdir -pv $WORKSPACE/artifacts | tail -1 | cut -d \' -f 2) +[ -d "$ARTIFACTS" ] || exit 3 + # All command failures from now on are fatal set -e echo echo "Bootstrapping trusted virtual environment, this may take a few minutes, depending on networking." -echo "(logs: \"$WORKSPACE/crio_venv_setup_log.txt\")" +echo "(logs: \"$ARTIFACTS/crio_venv_setup_log.txt\")" echo + ( set -x cd "$WORKSPACE" - # N/B: local system's virtualenv binary - uncontrolled version fixed below - virtualenv --no-site-packages --python=python2.7 ./.venvbootstrap - # Set up paths to install/operate out of $WORKSPACE/.venvbootstrap - source ./.venvbootstrap/bin/activate - # N/B: local system's pip binary - uncontrolled version fixed below - # pip may not support --cache-dir, force it's location into $WORKSPACE the ugly-way - OLD_HOME="$HOME" - export HOME="$WORKSPACE" - export PIPCACHE="$WORKSPACE/.cache/pip" - pip install --force-reinstall --upgrade pip==9.0.1 - # Undo --cache-dir workaround - export HOME="$OLD_HOME" - # Install fixed, trusted, hashed versions of all requirements (including pip and virtualenv) - pip --cache-dir="$PIPCACHE" install --require-hashes \ - --requirement "$SCRIPT_PATH/requirements.txt" - - # Setup trusted virtualenv using hashed binary from requirements.txt - ./.venvbootstrap/bin/virtualenv --no-site-packages --python=python2.7 ./.cri-o_venv - # Exit untrusted virtualenv - deactivate + # When running more than once, make it fast by skipping the bootstrap + if [ ! -d "./.cri-o_venv" ]; then + # N/B: local system's virtualenv binary - uncontrolled version fixed below + virtualenv --no-site-packages --python=python2.7 ./.venvbootstrap + # Set up paths to install/operate out of $WORKSPACE/.venvbootstrap + source ./.venvbootstrap/bin/activate + # N/B: local system's pip binary - uncontrolled version fixed below + # pip may not support --cache-dir, force it's location into $WORKSPACE the ugly-way + OLD_HOME="$HOME" + export HOME="$WORKSPACE" + export PIPCACHE="$WORKSPACE/.cache/pip" + pip install --force-reinstall --upgrade pip==9.0.1 + # Undo --cache-dir workaround + export HOME="$OLD_HOME" + # Install fixed, trusted, hashed versions of all requirements (including pip and virtualenv) + pip --cache-dir="$PIPCACHE" install --require-hashes \ + --requirement "$SCRIPT_PATH/requirements.txt" + # Setup trusted virtualenv using hashed binary from requirements.txt + ./.venvbootstrap/bin/virtualenv --no-site-packages --python=python2.7 ./.cri-o_venv + # Exit untrusted virtualenv + deactivate + fi # Enter trusted virtualenv source ./.cri-o_venv/bin/activate - # Re-install from cache - pip install --force-reinstall --upgrade pip==9.0.1 + # Upgrade stock-pip to support hashes + pip install --force-reinstall --cache-dir="$PIPCACHE" --upgrade pip==9.0.1 + # Re-install from cache but validate all hashes (including on pip itself) pip --cache-dir="$PIPCACHE" install --require-hashes \ --requirement "$SCRIPT_PATH/requirements.txt" # Remove temporary bootstrap virtualenv rm -rf ./.venvbootstrap # Exit trusted virtualenv -) &> $WORKSPACE/crio_venv_setup_log.txt; +) &> $ARTIFACTS/crio_venv_setup_log.txt; echo echo "Executing \"$WORKSPACE/.cri-o_venv/bin/ansible-playbook $@\"" echo # Execute command-line arguments under virtualenv -cd "$WORKSPACE" -source ./.cri-o_venv/bin/activate -./.cri-o_venv/bin/ansible-playbook $@ +source ${WORKSPACE}/.cri-o_venv/bin/activate +${WORKSPACE}/.cri-o_venv/bin/ansible-playbook $@ diff --git a/crictl.yaml b/crictl.yaml new file mode 100644 index 00000000..b6142efd --- /dev/null +++ b/crictl.yaml @@ -0,0 +1 @@ +runtime-endpoint: /var/run/crio/crio.sock diff --git a/crio-umount.conf b/crio-umount.conf new file mode 100644 index 00000000..5177e636 --- /dev/null +++ b/crio-umount.conf @@ -0,0 +1,8 @@ +# This contains a list of paths on host which will be unmounted inside +# container. (If they are mounted inside container). + +# If there is a "/*" at the end, that means only mounts underneath that +# mounts (submounts) will be unmounted but top level mount will remain +# in place. +/var/run/containers/* +/var/lib/containers/storage/* diff --git a/docs/crio.8.md b/docs/crio.8.md index 21a2e2dd..8408978b 100644 --- a/docs/crio.8.md +++ b/docs/crio.8.md @@ -5,158 +5,131 @@ crio - OCI Kubernetes Container Runtime daemon # SYNOPSIS -**crio** -[**--apparmor-profile**=[*value*]] -[**--cgroup-manager**=[*value*]] -[**--cni-config-dir**=[*value*]] -[**--cni-plugin-dir**=[*value*]] -[**--config**=[*value*]] -[**--conmon**=[*value*]] -[**--cpu-profile**=[*value*]] -[**--debug**] -[**--default-transport**=[*value*]] -[**--help**|**-h**] -[**--insecure-registry**=[*value*]] -[**--listen**=[*value*]] -[**--log**=[*value*]] -[**--log-format value**] -[**--pause-command**=[*value*]] -[**--pause-image**=[*value*]] -[**--registry**=[*value*]] -[**--root**=[*value*]] -[**--runroot**=[*value*]] -[**--runtime**=[*value*]] -[**--seccomp-profile**=[*value*]] -[**--selinux**] -[**--signature-policy**=[*value*]] -[**--storage-driver**=[*value*]] -[**--storage-opt**=[*value*]] -[**--version**|**-v**] - +crio +``` +[--apparmor-profile=[value]] +[--cgroup-manager=[value]] +[--cni-config-dir=[value]] +[--cni-plugin-dir=[value]] +[--config=[value]] +[--conmon=[value]] +[--cpu-profile=[value]] +[--default-transport=[value]] +[--help|-h] +[--insecure-registry=[value]] +[--listen=[value]] +[--log=[value]] +[--log-format value] +[--log-level value] +[--pause-command=[value]] +[--pause-image=[value]] +[--registry=[value]] +[--root=[value]] +[--runroot=[value]] +[--runtime=[value]] +[--seccomp-profile=[value]] +[--selinux] +[--signature-policy=[value]] +[--storage-driver=[value]] +[--storage-opt=[value]] +[--version|-v] +``` # DESCRIPTION OCI-based implementation of Kubernetes Container Runtime Interface Daemon crio is meant to provide an integration path between OCI conformant runtimes and the kubelet. Specifically, it implements the Kubelet Container Runtime Interface (CRI) using OCI conformant runtimes. The scope of crio is tied to the scope of the CRI. - * Support multiple image formats including the existing Docker image format - * Support for multiple means to download images including trust & image verification - * Container image management (managing image layers, overlay filesystems, etc) - * Container process lifecycle management - * Monitoring and logging required to satisfy the CRI - * Resource isolation as required by the CRI - -**crio [GLOBAL OPTIONS]** - -**crio [GLOBAL OPTIONS] config [OPTIONS]** +1. Support multiple image formats including the existing Docker image format. +2. Support for multiple means to download images including trust & image verification. +3. Container image management (managing image layers, overlay filesystems, etc). +4. Container process lifecycle management. +5. Monitoring and logging required to satisfy the CRI. +6. Resource isolation as required by the CRI. +**Usage**: +``` +crio [GLOBAL OPTIONS] +crio [GLOBAL OPTIONS] config [OPTIONS] +``` # GLOBAL OPTIONS +**--apparmor_profile**="": Name of the apparmor profile to be used as the runtime's default (default: "crio-default") -**--apparmor_profile**="" - Name of the apparmor profile to be used as the runtime's default (default: "crio-default") +**--cgroup-manager**="": cgroup manager (cgroupfs or systemd) -**--cgroup-manager**="" - cgroup manager (cgroupfs or systemd) +**--config**="": path to configuration file -**--config**="" - path to configuration file +**--conmon**="": path to the conmon executable (default: "/usr/local/libexec/crio/conmon") -**--conmon**="" - path to the conmon executable (default: "/usr/local/libexec/crio/conmon") +**--cpu-profile**="": set the CPU profile file path -**--cpu-profile**="" -set the CPU profile file path +**--default-transport**: A prefix to prepend to image names that can't be pulled as-is. -**--debug** - Enable debug output for logging +**--help, -h**: Print usage statement -**--default-transport** - A prefix to prepend to image names that can't be pulled as-is. +**--insecure-registry=**: Enable insecure registry communication, i.e., enable un-encrypted and/or untrusted communication. -**--help, -h** - Print usage statement +1. List of insecure registries can contain an element with CIDR notation to specify a whole subnet. +2. Insecure registries accept HTTP or accept HTTPS with certificates from unknown CAs. +3. Enabling `--insecure-registry` is useful when running a local registry. However, because its use creates security vulnerabilities, **it should ONLY be enabled for testing purposes**. For increased security, users should add their CA to their system's list of trusted CAs instead of using `--insecure-registry`. -**--insecure-registry=** - Enable insecure registry communication, i.e., enable un-encrypted - and/or untrusted communication. +**--image-volumes**="": Image volume handling ('mkdir', 'bind' or 'ignore') (default: "mkdir") - List of insecure registries can contain an element with CIDR notation - to specify a whole subnet. Insecure registries accept HTTP and/or - accept HTTPS with certificates from unknown CAs. +1. mkdir: A directory is created inside the container root filesystem for the volumes. +2. bind: A directory is created inside container state directory and bind mounted into the container for the volumes. +3. ignore: All volumes are just ignored and no action is taken. - Enabling --insecure-registry is useful when running a local registry. - However, because its use creates security vulnerabilities it should - ONLY be enabled for testing purposes. For increased security, users - should add their CA to their system's list of trusted CAs instead of - using --insecure-registry. +**--listen**="": Path to CRI-O socket (default: "/var/run/crio/crio.sock") -**--image-volumes**="" - Image volume handling ('mkdir' or 'ignore') (default: "mkdir") +**--log**="": Set the log file path where internal debug information is written -**--listen**="" - Path to crio socket (default: "/var/run/crio.sock") +**--log-format**="": Set the format used by logs ('text' (default), or 'json') (default: "text") -**--log**="" - Set the log file path where internal debug information is written +**--log-level**="": log crio messages above specified level: debug, info (default), warn, error, fatal or panic -**--log-format**="" - Set the format used by logs ('text' (default), or 'json') (default: "text") +**--log-size-max**="": Maximum log size in bytes for a container (default: -1 (no limit)). If it is positive, it must be >= 8192 (to match/exceed conmon read buffer). -**--pause-command**="" - Path to the pause executable in the pause image (default: "/pause") +**--pause-command**="": Path to the pause executable in the pause image (default: "/pause") -**--pause-image**="" - Image which contains the pause executable (default: "kubernetes/pause") +**--pause-image**="": Image which contains the pause executable (default: "kubernetes/pause") -**--pids-limit**="" - Maximum number of processes allowed in a container (default: 1024) +**--pids-limit**="": Maximum number of processes allowed in a container (default: 1024) -**--root**="" - CRIO root dir (default: "/var/lib/containers/storage") +**--enable-shared-pid-namespace**="": Enable using a shared PID namespace for containers in a pod (default: false) -**--registry**="" - Registry host which will be prepended to unqualified images, can be specified multiple times +**--root**="": The crio root dir (default: "/var/lib/containers/storage") -**--runroot**="" - CRIO state dir (default: "/var/run/containers/storage") +**--registry**="": Registry host which will be prepended to unqualified images, can be specified multiple times -**--runtime**="" - OCI runtime path (default: "/usr/bin/runc") +**--runroot**="": The crio state dir (default: "/var/run/containers/storage") -**--selinux**=*true*|*false* - Enable selinux support (default: false) +**--runtime**="": OCI runtime path (default: "/usr/bin/runc") -**--seccomp-profile**="" - Path to the seccomp json profile to be used as the runtime's default (default: "/etc/crio/seccomp.json") +**--selinux**=**true**|**false**: Enable selinux support (default: false) -**--signature-policy**="" - Path to the signature policy json file (default: "", to use the system-wide default) +**--seccomp-profile**="": Path to the seccomp json profile to be used as the runtime's default (default: "/etc/crio/seccomp.json") -**--storage-driver** - OCI storage driver (default: "devicemapper") +**--signature-policy**="": Path to the signature policy json file (default: "", to use the system-wide default) -**--storage-opt** - OCI storage driver option (no default) +**--storage-driver**: OCI storage driver (default: "devicemapper") -**--cni-config-dir**="" - CNI configuration files directory (default: "/etc/cni/net.d/") +**--storage-opt**: OCI storage driver option (no default) -**--cni-plugin-dir**="" - CNI plugin binaries directory (default: "/opt/cni/bin/") +**--cni-config-dir**="": CNI configuration files directory (default: "/etc/cni/net.d/") -**--cpu-profile** - Set the CPU profile file path +**--cni-plugin-dir**="": CNI plugin binaries directory (default: "/opt/cni/bin/") -**--version, -v** - Print the version +**--cpu-profile**: Set the CPU profile file path + +**--version, -v**: Print the version # COMMANDS -CRIO's default command is to start the daemon. However, it currently offers a +CRI-O's default command is to start the daemon. However, it currently offers a single additional subcommand. ## config Outputs a commented version of the configuration file that would've been used -by CRIO. This allows you to save you current configuration setup and then load +by CRI-O. This allows you to save you current configuration setup and then load it later with **--config**. Global options will modify the output. **--default** diff --git a/docs/crio.conf.5.md b/docs/crio.conf.5.md index 9c1896b4..708f26e7 100644 --- a/docs/crio.conf.5.md +++ b/docs/crio.conf.5.md @@ -36,15 +36,39 @@ The `crio` table supports the following options: CRIO state dir (default: "/var/run/containers/storage") **storage_driver**="" - CRIO storage driver (default is "devicemapper") + CRIO storage driver (default is "overlay") + +Note: + **overlay** and **overlay2** are the same driver + **storage_option**=[] CRIO storage driver option list (no default) + Values: + + "STORAGE_DRIVER.imagestore=/PATH", + + Paths to additional container image stores. These are read/only and are usually stored on remote network shares, based on overlay storage format. + storage_option=[ "overlay.imagestore=/mnt/overlay", ] + + "STORAGE_DRIVER.size=SIZE" + + Maximum size of a container image. Default is 10GB. The size flag sets quota on the size of container images. + storage_option=[ "overlay.size=1G", ] + +Note: Not all drivers support all options. + +Note: In order to use the **size** option for quota on *overlay* storage you must use the *xfs* file system. The mount point that the *overlay* file system must be setup with the *pquota* flag at mount time. If you are setting up / to be used with quota, you have to modify the linux boot line in /etc/grubq2.conf and add the rootflags=pquota flag. + +Example: + linux16 /vmlinuz-4.12.13-300.fc26.x86_64 root=/dev/mapper/fedora-root ro rd.lvm.lv=fedora/root rd.lvm.lv=fedora/swap rhgb quiet LANG=en_US.UTF-8 rootflags=pquota + + ## CRIO.API TABLE **listen**="" - Path to crio socket (default: "/var/run/crio.sock") + Path to crio socket (default: "/var/run/crio/crio.sock") ## CRIO.RUNTIME TABLE @@ -54,9 +78,18 @@ The `crio` table supports the following options: **conmon_env**=[] Environment variable list for conmon process (default: ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",]) +**log_size_max**="" + Maximum sized allowed for the container log file (default: -1) + Negative numbers indicate that no size limit is imposed. + If it is positive, it must be >= 8192 (to match/exceed conmon read buffer). + The file is truncated and re-opened so the limit is never exceeded. + **pids_limit**="" Maximum number of processes allowed in a container (default: 1024) +**enable_shared_pid_namespace**="" + Enable using a shared PID namespace for containers in a pod (default: false) + **runtime**="" OCI runtime path (default: "/usr/bin/runc") @@ -72,13 +105,23 @@ The `crio` table supports the following options: **apparmor_profile**="" Name of the apparmor profile to be used as the runtime's default (default: "crio-default") +**no_pivot**=*true*|*false* + Instructs the runtime to not use pivot_root, but instead use MS_MOVE + +**default_mounts**=[] + List of mount points, in the form host:container, to be mounted in every container + ## CRIO.IMAGE TABLE **default_transport** A prefix to prepend to image names that can't be pulled as-is (default: "docker://") **image_volumes**="" - Image volume handling ('mkdir' or 'ignore') (default: "mkdir") + Image volume handling ('mkdir', 'bind' or 'ignore') (default: "mkdir") + mkdir: A directory is created inside the container root filesystem for the volumes. + bind: A directory is created inside container state directory and bind mounted into + the container for the volumes. + ignore: All volumes are just ignored and no action is taken. **insecure_registries**="" Enable insecure registry communication, i.e., enable un-encrypted diff --git a/docs/kpod-cp.1.md b/docs/kpod-cp.1.md deleted file mode 100644 index 3f3e1ca1..00000000 --- a/docs/kpod-cp.1.md +++ /dev/null @@ -1,46 +0,0 @@ -% kpod(1) kpod-cp - Copy content between container's file system and the host -% Dan Walsh -# kpod-cp "1" "August 2017" "kpod" - -## NAME -kpod-cp - Copy files/folders between a container and the local filesystem. - -## Description -We chose not to implement this feature in kpod even though the upstream docker -project has it. We have a much stronger capability. Using standard kpod-mount -and kpod-umount, we can take advantage of the entire linux tool chain, rather -then just cp. - -If a user wants to copy contents out of a container or into a container, they -can execute a few simple commands. - -You can copy from the container's file system to the local machine or the -reverse, from the local filesystem to the container. - -If you want to copy the /etc/foobar directory out of a container and onto /tmp -on the host, you could execute the following commands: - - mnt=$(kpod mount CONTAINERID) - cp -R ${mnt}/etc/foobar /tmp - kpod umount CONTAINERID - -If you want to untar a tar ball into a container, you can execute these commands: - - mnt=$(kpod mount CONTAINERID) - tar xf content.tgz -C ${mnt} - kpod umount CONTAINERID - -One last example, if you want to install a package into a container that -does not have dnf installed, you could execute something like: - - mnt=$(kpod mount CONTAINERID) - dnf install --installroot=${mnt} httpd - chroot ${mnt} rm -rf /var/log/dnf /var/cache/dnf - kpod umount CONTAINERID - -This shows that using `kpod mount` and `kpod umount` you can use all of the -standard linux tools for moving files into and out of containers, not just -the cp command. - -## SEE ALSO -kpod(1), kpod-mount(1), kpod-umount(1) diff --git a/docs/kpod-diff.1.md b/docs/kpod-diff.1.md deleted file mode 100644 index 1916780c..00000000 --- a/docs/kpod-diff.1.md +++ /dev/null @@ -1,45 +0,0 @@ -% kpod(1) kpod-diff - Inspect changes on a container or image's filesystem -% Dan Walsh -# kpod-diff "1" "August 2017" "kpod" - -## NAME -kpod diff - Inspect changes on a container or image's filesystem - -## SYNOPSIS -**kpod** **diff** [*options* [...]] NAME - -## DESCRIPTION -Displays changes on a container or image's filesystem. The container or image will be compared to its parent layer - -## OPTIONS - -**--format** - -Alter the output into a different format. The only valid format for diff is `json`. - - -## EXAMPLE - -kpod diff redis:alpine -C /usr -C /usr/local -C /usr/local/bin -A /usr/local/bin/docker-entrypoint.sh - -kpod diff --format json redis:alpine -{ - "changed": [ - "/usr", - "/usr/local", - "/usr/local/bin" - ], - "added": [ - "/usr/local/bin/docker-entrypoint.sh" - ] -} - -## SEE ALSO -kpod(1) - -## HISTORY -August 2017, Originally compiled by Ryan Cole diff --git a/docs/kpod-export.1.md b/docs/kpod-export.1.md deleted file mode 100644 index 260cac6b..00000000 --- a/docs/kpod-export.1.md +++ /dev/null @@ -1,48 +0,0 @@ -% kpod(1) kpod-export - Simple tool to export a container's filesystem as a tarball -% Urvashi Mohnani -# kpod-export "1" "July 2017" "kpod" - -## NAME -kpod-export - Export container's filesystem contents as a tar archive - -## SYNOPSIS -**kpod export** -**CONTAINER** -[**--help**|**-h**] - -## DESCRIPTION -**kpod export** exports the filesystem of a container and saves it as a tarball -on the local machine. **kpod export** writes to STDOUT by default and can be -redirected to a file using the **output flag**. - -**kpod [GLOBAL OPTIONS]** - -**kpod export [GLOBAL OPTIONS]** - -**kpod export [OPTIONS] CONTAINER [GLOBAL OPTIONS]** - -## OPTIONS - -**--output, -o** -Write to a file, default is STDOUT - -## GLOBAL OPTIONS - -**--help, -h** - Print usage statement - -## EXAMPLES - -``` -# kpod export -o redis-container.tar 883504668ec465463bc0fe7e63d53154ac3b696ea8d7b233748918664ea90e57 -``` - -``` -# kpod export > redis-container.tar 883504668ec465463bc0fe7e63d53154ac3b696ea8d7b233748918664ea90e57 -``` - -## SEE ALSO -kpod(1), kpod-import(1), crio(8), crio.conf(5) - -## HISTORY -August 2017, Originally compiled by Urvashi Mohnani diff --git a/docs/kpod-history.1.md b/docs/kpod-history.1.md deleted file mode 100644 index 713da6f5..00000000 --- a/docs/kpod-history.1.md +++ /dev/null @@ -1,71 +0,0 @@ -% kpod(1) kpod-history - Simple tool to view the history of an image -% Urvashi Mohnani -% kpod-history "1" "JULY 2017" "kpod" - -## NAME -kpod-history - Shows the history of an image - -## SYNOPSIS -**kpod history [OPTIONS] IMAGE[:TAG|DIGEST]** - -## DESCRIPTION -**kpod history** displays the history of an image by printing out information -about each layer used in the image. The information printed out for each layer -include Created (time and date), Created By, Size, and Comment. The output can -be truncated or not using the **--no-trunc** flag. If the **--human** flag is -set, the time of creation and size are printed out in a human readable format. -The **--quiet** flag displays the ID of the image only when set and the **--format** -flag is used to print the information using the Go template provided by the user. - -Valid placeholders for the Go template are listed below: - -| **Placeholder** | **Description** | -| --------------- | ----------------------------------------------------------------------------- | -| .ID | Image ID | -| .Created | if **--human**, time elapsed since creation, otherwise time stamp of creation | -| .CreatedBy | Command used to create the layer | -| .Size | Size of layer on disk | -| .Comment | Comment for the layer | - -**kpod [GLOBAL OPTIONS]** - -**kpod [GLOBAL OPTIONS] history [OPTIONS]** - -## GLOBAL OPTIONS - -**--help, -h** - Print usage statement - -## OPTIONS - -**--human, -H** - Display sizes and dates in human readable format - -**--no-trunc** - Do not truncate the output - -**--quiet, -q** - Print the numeric IDs only - -**--format** - Alter the output for a format like 'json' or a Go template. - - -## COMMANDS - -**kpod history debian** - -**kpod history --no-trunc=true --human=false debian** - -**kpod history --format "{{.ID}} {{.Created}}" debian** - -**kpod history --format json debian** - -## history -Show the history of an image - -## SEE ALSO -kpod(1), crio(8), crio.conf(5) - -## HISTORY -July 2017, Originally compiled by Urvashi Mohnani diff --git a/docs/kpod-images.1.md b/docs/kpod-images.1.md deleted file mode 100644 index 96408090..00000000 --- a/docs/kpod-images.1.md +++ /dev/null @@ -1,60 +0,0 @@ -% kpod(1) kpod-images - List images in local storage -% Dan Walsh -# kpod-images "1" "March 2017" "kpod" - -## NAME -kpod images - List images in local storage - -## SYNOPSIS -**kpod** **images** [*options* [...]] - -## DESCRIPTION -Displays locally stored images, their names, and their IDs. - -## OPTIONS - -**--digests** - -Show image digests - -**--filter, -f=[]** - -Filter output based on conditions provided (default []) - -**--format** - -Change the default output format. This can be of a supported type like 'json' -or a Go template. - -**--noheading, -n** - -Omit the table headings from the listing of images. - -**--no-trunc, --notruncate** - -Do not truncate output. - -**--quiet, -q** - -Lists only the image IDs. - - -## EXAMPLE - -kpod images - -kpod images --quiet - -kpod images -q --noheading --notruncate - -kpod images --format json - -kpod images --format "{{.ID}}" - -kpod images --filter dangling=true - -## SEE ALSO -kpod(1) - -## HISTORY -March 2017, Originally compiled by Dan Walsh diff --git a/docs/kpod-info.1.md b/docs/kpod-info.1.md deleted file mode 100644 index 99deae9b..00000000 --- a/docs/kpod-info.1.md +++ /dev/null @@ -1,36 +0,0 @@ -% kpod(1) kpod-version - Simple tool to view version information -% Vincent Batts -% kpod-version "1" "JULY 2017" "kpod" - -## NAME -kpod-info - Display system information - - -## SYNOPSIS -**kpod** **info** [*options* [...]] - - -## DESCRIPTION - -Information display here pertain to the host, current storage stats, and build of kpod. Useful for the user and when reporting issues. - - -## OPTIONS - -**--debug, -D** - -Show additional information - -**--format** - -Change output format to "json" or a Go template. - - -## EXAMPLE - -`kpod info` - -`kpod info --debug --format json| jq .host.kernel` - -## SEE ALSO -crio(8), crio.conf(5) diff --git a/docs/kpod-inspect.1.md b/docs/kpod-inspect.1.md deleted file mode 100644 index 21d41a15..00000000 --- a/docs/kpod-inspect.1.md +++ /dev/null @@ -1,171 +0,0 @@ -% kpod(1) kpod-inspect - Display a container or image's configuration -% Dan Walsh -# kpod-inspect "1" "July 2017" "kpod" - -## NAME -kpod inspect - Display a container or image's configuration - -## SYNOPSIS -**kpod** **inspect** [*options* [...]] name - -## DESCRIPTION -This displays the low-level information on containers and images identified by name or ID. By default, this will render all results in a JSON array. If the container and image have the same name, this will return container JSON for unspecified type. If a format is specified, the given template will be executed for each result. - -## OPTIONS - -**--type, t="TYPE"** - -Return data on items of the specified type. Type can be 'container', 'image' or 'all' (default: all) - -**--format, -f="FORMAT"** - -Format the output using the given Go template - -**--size** - -Display the total file size if the type is a container - - -## EXAMPLE - -kpod inspect redis:alpine - -{ - "ArgsEscaped": true, - "AttachStderr": false, - "AttachStdin": false, - "AttachStdout": false, - "Cmd": [ - "/bin/sh", - "-c", - "#(nop) ", - "CMD [\"redis-server\"]" - ], - "Domainname": "", - "Entrypoint": [ - "entrypoint.sh" - ], - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "REDIS_VERSION=3.2.9", - "REDIS_DOWNLOAD_URL=http://download.redis.io/releases/redis-3.2.9.tar.gz", - "REDIS_DOWNLOAD_SHA=6eaacfa983b287e440d0839ead20c2231749d5d6b78bbe0e0ffa3a890c59ff26" - ], - "ExposedPorts": { - "6379/tcp": {} - }, - "Hostname": "e1ede117fb1e", - "Image": "sha256:75e877aa15b534396de82d385386cc4dda7819d5cbb018b9f97b77aeb8f4b55a", - "Labels": {}, - "OnBuild": [], - "OpenStdin": false, - "StdinOnce": false, - "Tty": false, - "User": "", - "Volumes": { - "/data": {} - }, - "WorkingDir": "/data" -} -{ - "ID": "b3f2436bdb978c1d33b1387afb5d7ba7e3243ed2ce908db431ac0069da86cb45", - "Names": [ - "docker.io/library/redis:alpine" - ], - "Digests": [ - "sha256:88286f41530e93dffd4b964e1db22ce4939fffa4a4c665dab8591fbab03d4926", - "sha256:07b1ac6c7a5068201d8b63a09bb15358ec1616b813ef3942eb8cc12ae191227f", - "sha256:91e2e140ea27b3e89f359cd9fab4ec45647dda2a8e5fb0c78633217d9dca87b5", - "sha256:08957ceaa2b3be874cde8d7fa15c274300f47185acd62bca812a2ffb6228482d", - "sha256:acd3d12a6a79f772961a771f678c1a39e1f370e7baeb9e606ad8f1b92572f4ab", - "sha256:4ad88df090801e8faa8cf0be1f403b77613d13e11dad73f561461d482f79256c", - "sha256:159ac12c79e1a8d85dfe61afff8c64b96881719139730012a9697f432d6b739a" - ], - "Parent": "", - "Comment": "", - "Created": "2017-06-28T22:14:36.35280993Z", - "Container": "ba8d6c6b0d7fdd201fce404236136b44f3bfdda883466531a3d1a1f87906770b", - "ContainerConfig": { - "Hostname": "e1ede117fb1e", - "Domainname": "", - "User": "", - "AttachStdin": false, - "AttachStdout": false, - "AttachStderr": false, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "REDIS_VERSION=3.2.9", - "REDIS_DOWNLOAD_URL=http://download.redis.io/releases/redis-3.2.9.tar.gz", - "REDIS_DOWNLOAD_SHA=6eaacfa983b287e440d0839ead20c2231749d5d6b78bbe0e0ffa3a890c59ff26" - ], - "Cmd": [ - "/bin/sh", - "-c", - "#(nop) ", - "CMD [\"redis-server\"]" - ], - "ArgsEscaped": true, - "Image": "sha256:75e877aa15b534396de82d385386cc4dda7819d5cbb018b9f97b77aeb8f4b55a", - "Volumes": { - "/data": {} - }, - "WorkingDir": "/data", - "Entrypoint": [ - "entrypoint.sh" - ], - "Labels": {}, - "OnBuild": [] - }, - "Author": "", - "Config": { - "ExposedPorts": { - "6379/tcp": {} - }, - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "REDIS_VERSION=3.2.9", - "REDIS_DOWNLOAD_URL=http://download.redis.io/releases/redis-3.2.9.tar.gz", - "REDIS_DOWNLOAD_SHA=6eaacfa983b287e440d0839ead20c2231749d5d6b78bbe0e0ffa3a890c59ff26" - ], - "Entrypoint": [ - "entrypoint.sh" - ], - "Cmd": [ - "redis-server" - ], - "Volumes": { - "/data": {} - }, - "WorkingDir": "/data" - }, - "Architecture": "amd64", - "OS": "linux", - "Size": 3965955, - "VirtualSize": 19808086, - "GraphDriver": { - "Name": "overlay2", - "Data": { - "MergedDir": "/var/lib/containers/storage/overlay2/2059d805c90e034cb773d9722232ef018a72143dd31113b470fb876baeccd700/merged", - "UpperDir": "/var/lib/containers/storage/overlay2/2059d805c90e034cb773d9722232ef018a72143dd31113b470fb876baeccd700/diff", - "WorkDir": "/var/lib/containers/storage/overlay2/2059d805c90e034cb773d9722232ef018a72143dd31113b470fb876baeccd700/work" - } - }, - "RootFS": { - "type": "layers", - "diff_ids": [ - "sha256:5bef08742407efd622d243692b79ba0055383bbce12900324f75e56f589aedb0", - "sha256:c92a8fc997217611d0bfc9ff14d7ec00350ca564aef0ecbf726624561d7872d7", - "sha256:d4c406dea37a107b0cccb845611266a146725598be3e82ba31c55c08d1583b5a", - "sha256:8b4fa064e2b6c03a6c37089b0203f167375a8b49259c0ad7cb47c8c1e58b3fa0", - "sha256:c393e3d0b00ddf6b4166f1e2ad68245e08e9e3be0a0567a36d0a43854f03bfd6", - "sha256:38047b4117cb8bb3bba82991daf9a4e14ba01f9f66c1434d4895a7e96f67d8ba" - ] - } -} - - -## SEE ALSO -kpod(1) diff --git a/docs/kpod-load.1.md b/docs/kpod-load.1.md deleted file mode 100644 index 43201c8f..00000000 --- a/docs/kpod-load.1.md +++ /dev/null @@ -1,69 +0,0 @@ -% kpod(1) kpod-load - Simple tool to load an image from an archive to containers-storage -% Urvashi Mohnani -# kpod-load "1" "July 2017" "kpod" - -## NAME -kpod-load - Load an image from docker archive - -## SYNOPSIS -**kpod load** -**NAME[:TAG|@DIGEST]** -[**--help**|**-h**] - -## DESCRIPTION -**kpod load** copies an image from **docker-archive** stored on the local machine. -**kpod load** reads from stdin by default or a file if the **input** flag is set. -The **quiet** flag suppresses the output when set. - -**kpod [GLOBAL OPTIONS]** - -**kpod load [GLOBAL OPTIONS]** - -**kpod load [OPTIONS] NAME[:TAG|@DIGEST] [GLOBAL OPTIONS]** - -## OPTIONS - -**--input, -i** -Read from archive file, default is STDIN - -**--quiet, -q** -Suppress the output - -## GLOBAL OPTIONS - -**--help, -h** - Print usage statement - -## EXAMPLES - -``` -# kpod load --quiet -i fedora.tar -``` - -``` -# kpod load < fedora.tar -Getting image source signatures -Copying blob sha256:5bef08742407efd622d243692b79ba0055383bbce12900324f75e56f589aedb0 - 0 B / 4.03 MB [---------------------------------------------------------------] -Copying config sha256:7328f6f8b41890597575cbaadc884e7386ae0acc53b747401ebce5cf0d624560 - 0 B / 1.48 KB [---------------------------------------------------------------] -Writing manifest to image destination -Storing signatures -``` - -``` -# cat fedora.tar | kpod load -Getting image source signatures -Copying blob sha256:5bef08742407efd622d243692b79ba0055383bbce12900324f75e56f589aedb0 - 0 B / 4.03 MB [---------------------------------------------------------------] -Copying config sha256:7328f6f8b41890597575cbaadc884e7386ae0acc53b747401ebce5cf0d624560 - 0 B / 1.48 KB [---------------------------------------------------------------] -Writing manifest to image destination -Storing signatures -``` - -## SEE ALSO -kpod(1), kpod-save(1), crio(8), crio.conf(5) - -## HISTORY -July 2017, Originally compiled by Urvashi Mohnani diff --git a/docs/kpod-logs.1.md b/docs/kpod-logs.1.md deleted file mode 100644 index 25d108ed..00000000 --- a/docs/kpod-logs.1.md +++ /dev/null @@ -1,61 +0,0 @@ -% kpod(1) kpod-logs - Fetch the logs of a container -% Ryan Cole -# kpod-logs "1" "March 2017" "kpod" - -## NAME -kpod logs - Fetch the logs of a container - -## SYNOPSIS -**kpod** **logs** [*options* [...]] container - -## DESCRIPTION -The kpod logs command batch-retrieves whatever logs are present for a container at the time of execution. This does not guarantee execution order when combined with kpod run (i.e. your run may not have generated any logs at the time you execute kpod logs - -## OPTIONS - -**--follow, -f** - -Follow log output. Default is false - -**--since=TIMESTAMP** - -Show logs since TIMESTAMP - -**--tail=LINES** - -Ouput the specified number of LINES at the end of the logs. LINES must be a positive integer. Defaults to 0, which prints all lines - -## EXAMPLE - -kpod logs b3f2436bdb978c1d33b1387afb5d7ba7e3243ed2ce908db431ac0069da86cb45 - -2017/08/07 10:16:21 Seeked /var/log/crio/pods/eb296bd56fab164d4d3cc46e5776b54414af3bf543d138746b25832c816b933b/c49f49788da14f776b7aa93fb97a2a71f9912f4e5a3e30397fca7dfe0ee0367b.log - &{Offset:0 Whence:0} -1:C 07 Aug 14:10:09.055 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo -1:C 07 Aug 14:10:09.055 # Redis version=4.0.1, bits=64, commit=00000000, modified=0, pid=1, just started -1:C 07 Aug 14:10:09.055 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf -1:M 07 Aug 14:10:09.055 # You requested maxclients of 10000 requiring at least 10032 max file descriptors. -1:M 07 Aug 14:10:09.055 # Server can't set maximum open files to 10032 because of OS error: Operation not permitted. -1:M 07 Aug 14:10:09.055 # Current maximum open files is 4096. maxclients has been reduced to 4064 to compensate for low ulimit. If you need higher maxclients increase 'ulimit -n'. -1:M 07 Aug 14:10:09.056 * Running mode=standalone, port=6379. -1:M 07 Aug 14:10:09.056 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128. -1:M 07 Aug 14:10:09.056 # Server initialized - - -kpod logs --tail 2 b3f2436bdb97 - -1:M 07 Aug 14:10:09.056 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128. -1:M 07 Aug 14:10:09.056 # Server initialized - -kpod logs 224c375f27cd --since 2017-08-07T10:10:09.055837383-04:00 myserver - -1:M 07 Aug 14:10:09.055 # Server can't set maximum open files to 10032 because of OS error: Operation not permitted. -1:M 07 Aug 14:10:09.055 # Current maximum open files is 4096. maxclients has been reduced to 4064 to compensate for low ulimit. If you need higher maxclients increase 'ulimit -n'. -1:M 07 Aug 14:10:09.056 * Running mode=standalone, port=6379. -1:M 07 Aug 14:10:09.056 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128. -1:M 07 Aug 14:10:09.056 # Server initialized - -## SEE ALSO -kpod(1) - -## HISTORY -August 2017, Originally compiled by Ryan Cole diff --git a/docs/kpod-mount.1.md b/docs/kpod-mount.1.md deleted file mode 100644 index 599edce6..00000000 --- a/docs/kpod-mount.1.md +++ /dev/null @@ -1,50 +0,0 @@ -% kpod(1) kpod-mount - Mount a working container's root filesystem. -% Dan Walsh -# kpod-mount "1" "July 2017" "kpod" - -## NAME -kpod mount - Mount a working container's root filesystem - -## SYNOPSIS -**kpod** **mount** - -**kpod** **mount** **containerID** - -## DESCRIPTION -Mounts the specified container's root file system in a location which can be -accessed from the host, and returns its location. - -If you execute the command without any arguments, the tool will list all of the -currently mounted containers. - -## RETURN VALUE -The location of the mounted file system. On error an empty string and errno is -returned. - -## OPTIONS - -**--format** - Print the mounted containers in specified format (json) - -**--notruncate** - -Do not truncate IDs in output. - -**--label** - -SELinux label for the mount point - -## EXAMPLE - -kpod mount c831414b10a3 - -/var/lib/containers/storage/overlay2/f3ac502d97b5681989dff84dfedc8354239bcecbdc2692f9a639f4e080a02364/merged - -kpod mount - -c831414b10a3 /var/lib/containers/storage/overlay2/f3ac502d97b5681989dff84dfedc8354239bcecbdc2692f9a639f4e080a02364/merged - -a7060253093b /var/lib/containers/storage/overlay2/0ff7d7ca68bed1ace424f9df154d2dd7b5a125c19d887f17653cbcd5b6e30ba1/merged - -## SEE ALSO -kpod(1), kpod-umount(1), mount(8) diff --git a/docs/kpod-ps.1.md b/docs/kpod-ps.1.md deleted file mode 100644 index 027c9bdd..00000000 --- a/docs/kpod-ps.1.md +++ /dev/null @@ -1,118 +0,0 @@ -% kpod(1) kpod-ps - Simple tool to list containers -% Urvashi Mohnani -% kpod-ps "1" "AUGUST 2017" "kpod" - -## NAME -kpod-ps - Prints out information about containers - -## SYNOPSIS -**kpod ps [OPTIONS] CONTAINER** - -## DESCRIPTION -**kpod ps** lists the running containers on the system. Use the **--all** flag to view -all the containers information. By default it lists: - - * container id - * the name of the image the container is using - * the COMMAND the container is executing - * the time the container was created - * the status of the container - * port mappings the container is using - * alternative names for the container - -**kpod [GLOBAL OPTIONS]** - -**kpod [GLOBAL OPTIONS] ps [OPTIONS]** - -## GLOBAL OPTIONS - -**--help, -h** - Print usage statement - -## OPTIONS - -**--all, -a** - Show all the containers, default is only running containers - -**--no-trunc** - Display the extended information - -**--quiet, -q** - Print the numeric IDs of the containers only - -**--format** - Pretty-print containers to JSON or using a Go template - -Valid placeholders for the Go template are listed below: - -| **Placeholder** | **Description** | -| --------------- | ------------------------------------------------ | -| .ID | Container ID | -| .Image | Image ID/Name | -| .Command | Quoted command used | -| .CreatedAt | Creation time for container | -| .RunningFor | Time elapsed since container was started | -| .Status | Status of container | -| .Ports | Exposed ports | -| .Size | Size of container | -| .Names | Name of container | -| .Labels | All the labels assigned to the container | -| .Mounts | Volumes mounted in the container | - - -**--size, -s** - Display the total file size - -**--last, -n** - Print the n last created containers (all states) - -**--latest, -l** - show the latest container created (all states) - -**--filter, -f** - Filter output based on conditions given - -Valid filters are listed below: - -| **Filter** | **Description** | -| --------------- | ------------------------------------------------------------------- | -| id | [ID] Container's ID | -| name | [Name] Container's name | -| label | [Key] or [Key=Value] Label assigned to a container | -| exited | [Int] Container's exit code | -| status | [Status] Container's status, e.g *running*, *stopped* | -| ancestor | [ImageName] Image or descendant used to create container | -| before | [ID] or [Name] Containers created before this container | -| since | [ID] or [Name] Containers created since this container | -| volume | [VolumeName] or [MountpointDestination] Volume mounted in container | - -## COMMANDS - -``` -sudo kpod ps -a -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -02f65160e14ca redis:alpine "redis-server" 19 hours ago Exited (-1) 19 hours ago 6379/tcp k8s_podsandbox1-redis_podsandbox1_redhat.test.crio_redhat-test-crio_0 -69ed779d8ef9f redis:alpine "redis-server" 25 hours ago Created 6379/tcp k8s_container1_podsandbox1_redhat.test.crio_redhat-test-crio_1 -``` - -``` -sudo kpod ps -a -s -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE -02f65160e14ca redis:alpine "redis-server" 20 hours ago Exited (-1) 20 hours ago 6379/tcp k8s_podsandbox1-redis_podsandbox1_redhat.test.crio_redhat-test-crio_0 27.49 MB -69ed779d8ef9f redis:alpine "redis-server" 25 hours ago Created 6379/tcp k8s_container1_podsandbox1_redhat.test.crio_redhat-test-crio_1 27.49 MB -``` - -``` -sudo kpod ps -a --format "{{.ID}} {{.Image}} {{.Labels}} {{.Mounts}}" -02f65160e14ca redis:alpine tier=backend proc,tmpfs,devpts,shm,mqueue,sysfs,cgroup,/var/run/,/var/run/ -69ed779d8ef9f redis:alpine batch=no,type=small proc,tmpfs,devpts,shm,mqueue,sysfs,cgroup,/var/run/,/var/run/ -``` - -## ps -Print a list of containers - -## SEE ALSO -kpod(1), crio(8), crio.conf(5) - -## HISTORY -August 2017, Originally compiled by Urvashi Mohnani diff --git a/docs/kpod-pull.1.md b/docs/kpod-pull.1.md deleted file mode 100644 index 1787e209..00000000 --- a/docs/kpod-pull.1.md +++ /dev/null @@ -1,64 +0,0 @@ -% kpod(1) kpod-pull - Simple tool to pull an image from a registry -% Urvashi Mohnani -# kpod-pull "1" "July 2017" "kpod" - -## NAME -kpod-pull - Pull an image from a registry - -## SYNOPSIS -**kpod pull** -**NAME[:TAG|@DIGEST]** -[**--help**|**-h**] - -## DESCRIPTION -Copies an image from a registry onto the local machine. **kpod pull** pulls an -image from Docker Hub if a registry is not specified in the command line argument. -If an image tag is not specified, **kpod pull** defaults to the image with the -**latest** tag (if it exists) and pulls it. **kpod pull** can also pull an image -using its digest **kpod pull [image]@[digest]**. **kpod pull** can be used to pull -images from archives and local storage using different transports. - -## imageID -Image stored in local container/storage - -## DESTINATION - - The DESTINATION is a location to store container images - The Image "DESTINATION" uses a "transport":"details" format. - - Multiple transports are supported: - - **dir:**_path_ - An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection. - - **docker://**_docker-reference_ - An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$HOME/.docker/config.json`, which is set e.g. using `(docker login)`. - - **docker-archive:**_path_[**:**_docker-reference_] - An image is stored in the `docker save` formatted file. _docker-reference_ is only used when creating such a file, and it must not contain a digest. - - **docker-daemon:**_docker-reference_ - An image _docker-reference_ stored in the docker daemon internal storage. _docker-reference_ must contain either a tag or a digest. Alternatively, when reading images, the format can also be docker-daemon:algo:digest (an image ID). - - **oci:**_path_**:**_tag_ - An image _tag_ in a directory compliant with "Open Container Image Layout Specification" at _path_. - - **ostree:**_image_[**@**_/absolute/repo/path_] - An image in local OSTree repository. _/absolute/repo/path_ defaults to _/ostree/repo_. - -**kpod [GLOBAL OPTIONS]** - -**kpod pull [GLOBAL OPTIONS]** - -**kpod pull NAME[:TAG|@DIGEST] [GLOBAL OPTIONS]** - -## GLOBAL OPTIONS - -**--help, -h** - Print usage statement - -## SEE ALSO -kpod(1), crio(8), crio.conf(5) - -## HISTORY -July 2017, Originally compiled by Urvashi Mohnani diff --git a/docs/kpod-push.1.md b/docs/kpod-push.1.md deleted file mode 100644 index 6970a172..00000000 --- a/docs/kpod-push.1.md +++ /dev/null @@ -1,98 +0,0 @@ -% kpod(1) kpod-push - Push an image from local storage to elsewhere -% Dan Walsh -# kpod-push "1" "June 2017" "kpod" - -## NAME -kpod push - Push an image from local storage to elsewhere - -## SYNOPSIS -**kpod** **push** [*options* [...]] **imageID** [**destination**] - -## DESCRIPTION -Pushes an image from local storage to a specified destination, decompressing -and recompressing layers as needed. - -## imageID -Image stored in local container/storage - -## DESTINATION - - The DESTINATION is a location to store container images - The Image "DESTINATION" uses a "transport":"details" format. - - Multiple transports are supported: - - **dir:**_path_ - An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection. - - **docker://**_docker-reference_ - An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$HOME/.docker/config.json`, which is set e.g. using `(docker login)`. - - **docker-archive:**_path_[**:**_docker-reference_] - An image is stored in the `docker save` formatted file. _docker-reference_ is only used when creating such a file, and it must not contain a digest. - - **docker-daemon:**_docker-reference_ - An image _docker-reference_ stored in the docker daemon internal storage. _docker-reference_ must contain either a tag or a digest. Alternatively, when reading images, the format can also be docker-daemon:algo:digest (an image ID). - - **oci:**_path_**:**_tag_ - An image _tag_ in a directory compliant with "Open Container Image Layout Specification" at _path_. - - **ostree:**_image_[**@**_/absolute/repo/path_] - An image in local OSTree repository. _/absolute/repo/path_ defaults to _/ostree/repo_. - -## OPTIONS - -**--creds="CREDENTIALS"** - -Credentials (USERNAME:PASSWORD) to use for authenticating to a registry - -**cert-dir="PATHNAME"** - -Pathname of a directory containing TLS certificates and keys - -**--disable-compression, -D** - -Don't compress copies of filesystem layers which will be pushed - -**--quiet, -q** - -When writing the output image, suppress progress output - -**--remove-signatures** - -Discard any pre-existing signatures in the image - -**--signature-policy="PATHNAME"** - -Pathname of a signature policy file to use. It is not recommended that this -option be used, as the default behavior of using the system-wide default policy -(frequently */etc/containers/policy.json*) is most often preferred - -**--sign-by="KEY"** - -Add a signature at the destination using the specified key - -**--tls-verify** - -Require HTTPS and verify certificates when contacting registries (default: true) - -## EXAMPLE - -This example extracts the imageID image to a local directory in docker format. - - `# kpod push imageID dir:/path/to/image` - -This example extracts the imageID image to a local directory in oci format. - - `# kpod push imageID oci:/path/to/layout` - -This example extracts the imageID image to a container registry named registry.example.com - - `# kpod push imageID docker://registry.example.com/repository:tag` - -This example extracts the imageID image and puts into the local docker container store - - `# kpod push imageID docker-daemon:image:tag` - -## SEE ALSO -kpod(1) diff --git a/docs/kpod-rename.1.md b/docs/kpod-rename.1.md deleted file mode 100644 index e59dbd99..00000000 --- a/docs/kpod-rename.1.md +++ /dev/null @@ -1,24 +0,0 @@ -% kpod(1) kpod-rename - Rename a container -% Ryan Cole -# kpod-images "1" "March 2017" "kpod" - -## NAME -kpod rename - Rename a container - -## SYNOPSIS -**kpod** **rename** CONTAINER NEW-NAME - -## DESCRIPTION -Rename a container. Container may be created, running, paused, or stopped - -## EXAMPLE - -kpod rename redis-container webserver - -kpod rename a236b9a4 mycontainer - -## SEE ALSO -kpod(1) - -## HISTORY -March 2017, Originally compiled by Ryan Cole diff --git a/docs/kpod-rm.1.md b/docs/kpod-rm.1.md deleted file mode 100644 index 4e2c490a..00000000 --- a/docs/kpod-rm.1.md +++ /dev/null @@ -1,31 +0,0 @@ -% kpod(1) kpod-rm - Remove one or more containers -% Ryan Cole -# kpod-rm "1" "August 2017" "kpod" - -## NAME -kpod rm - Remove one or more containers - -## SYNOPSIS -**kpod** **rm** [*options* [...]] container - -## DESCRIPTION -Kpod rm will remove one or more containers from the host. The container name or ID can be used. This does not remove images. Running containers will not be removed without the -f option - -## OPTIONS - -**--force, f** - -Force the removal of a running container - - -## EXAMPLE - -kpod rm mywebserver - -kpod rm -f 860a4b23 - -## SEE ALSO -kpod(1), kpod-rmi(1) - -## HISTORY -August 2017, Originally compiled by Ryan Cole diff --git a/docs/kpod-rmi.1.md b/docs/kpod-rmi.1.md deleted file mode 100644 index 1566f961..00000000 --- a/docs/kpod-rmi.1.md +++ /dev/null @@ -1,32 +0,0 @@ -% kpod(1) kpod-rmi - Removes one or more images -% Dan Walsh -# kpod-rmi "1" "March 2017" "kpod" - -## NAME -kpod rmi - Removes one or more images - -## SYNOPSIS -**kpod** **rmi** **imageID [...]** - -## DESCRIPTION -Removes one or more locally stored images. - -## OPTIONS - -**--force, -f** - -Executing this command will stop all containers that are using the image and remove them from the system - -## EXAMPLE - -kpod rmi imageID - -kpod rmi --force imageID - -kpod rmi imageID1 imageID2 imageID3 - -## SEE ALSO -kpod(1) - -## HISTORY -March 2017, Originally compiled by Dan Walsh diff --git a/docs/kpod-save.1.md b/docs/kpod-save.1.md deleted file mode 100644 index 454a18f0..00000000 --- a/docs/kpod-save.1.md +++ /dev/null @@ -1,51 +0,0 @@ -% kpod(1) kpod-save - Simple tool to save an image to an archive -% Urvashi Mohnani -# kpod-save "1" "July 2017" "kpod" - -## NAME -kpod-save - Save an image to docker-archive or oci - -## SYNOPSIS -**kpod save** -**NAME[:TAG]** -[**--help**|**-h**] - -## DESCRIPTION -**kpod save** saves an image to either **docker-archive** on the loacl machine. -**kpod save** writes to STDOUT by default and can be redirected to a file -using the **output** flag. The **quiet** flag suppresses the output when set. - -**kpod [GLOBAL OPTIONS]** - -**kpod save [GLOBAL OPTIONS]** - -**kpod save [OPTIONS] NAME[:TAG] [GLOBAL OPTIONS]** - -## OPTIONS - -**--output, -o** -Write to a file, default is STDOUT - -**--quiet, -q** -Suppress the output - -## GLOBAL OPTIONS - -**--help, -h** - Print usage statement - -## EXAMPLES - -``` -# kpod save --quiet -o alpine.tar alpine:2.6 -``` - -``` -# kpod save > alpine-all.tar alpine -``` - -## SEE ALSO -kpod(1), kpod-load(1), crio(8), crio.conf(5) - -## HISTORY -July 2017, Originally compiled by Urvashi Mohnani diff --git a/docs/kpod-stats.1.md b/docs/kpod-stats.1.md deleted file mode 100644 index 1c1c0b35..00000000 --- a/docs/kpod-stats.1.md +++ /dev/null @@ -1,37 +0,0 @@ -% kpod(1) kpod-stats - Display a live stream of 1 or more containers' resource usage statistics -% Ryan Cole -# kpod-stats "1" "July 2017" "kpod" - -## NAME -kpod-stats - Display a live stream of 1 or more containers' resource usage statistics - -## SYNOPSIS -**kpod** **stats** [*options* [...]] [container] - -## DESCRIPTION -Display a live stream of one or more containers' resource usage statistics - -## OPTIONS - -**--all, -a** - -Show all containers. Only running containers are shown by default - -**--no-stream** - -Disable streaming stats and only pull the first result, default setting is false - -**--format="TEMPLATE"** - -Pretty-print images using a Go template - - -## EXAMPLE - -TODO - -## SEE ALSO -kpod(1) - -## HISTORY -July 2017, Originally compiled by Ryan Cole diff --git a/docs/kpod-tag.1.md b/docs/kpod-tag.1.md deleted file mode 100644 index b92b2eb1..00000000 --- a/docs/kpod-tag.1.md +++ /dev/null @@ -1,34 +0,0 @@ -% kpod(1) kpod-tag - Add tags to an image -% Ryan Cole -# kpod-tag "1" "July 2017" "kpod" - -## NAME -kpod tag - Add an additional name to a local image - -## SYNOPSIS -**kpod tag** -[**--help**|**-h**] - -## DESCRIPTION -Assigns a new alias to an image in a registry. An alias refers to the entire image name, including the optional **TAG** after the ':' - -**kpod [GLOBAL OPTIONS]** - -**kpod [GLOBAL OPTIONS] tag [OPTIONS]** - -## GLOBAL OPTIONS - -**--help, -h** - Print usage statement - -## EXAMPLES - - kpod tag 0e3bbc2 fedora:latest - - kpod tag httpd myregistryhost:5000/fedora/httpd:v2 - -## SEE ALSO -kpod(1), crio(8), crio.conf(5) - -## HISTORY -July 2017, Originally compiled by Ryan Cole diff --git a/docs/kpod-umount.1.md b/docs/kpod-umount.1.md deleted file mode 100644 index 2ee03356..00000000 --- a/docs/kpod-umount.1.md +++ /dev/null @@ -1,19 +0,0 @@ -% kpod(1) kpod-umount - Unmount a working container's root filesystem. -% Dan Walsh -# kpod-umount "1" "July 2017" "kpod" - -## NAME -kpod umount - Unmount a working container's root file system - -## SYNOPSIS -**kpod** **umount** **containerID** - -## DESCRIPTION -Unmounts the specified container's root file system. - -## EXAMPLE - -kpod umount containerID - -## SEE ALSO -kpod(1), kpod-mount(1) diff --git a/docs/kpod-version.1.md b/docs/kpod-version.1.md deleted file mode 100644 index 60c79f77..00000000 --- a/docs/kpod-version.1.md +++ /dev/null @@ -1,29 +0,0 @@ -% kpod(1) kpod-version - Simple tool to view version information -% Urvashi Mohnani -# kpod-version "1" "July 2017" "kpod" - -## NAME -kpod-version - Display the KPOD Version Information - -## SYNOPSIS -**kpod version** -[**--help**|**-h**] - -## DESCRIPTION -Shows the the following information: Version, Go Version, Git Commit, Build Time, -OS, and Architecture. - -**kpod [GLOBAL OPTIONS]** - -**kpod [GLOBAL OPTIONS] version [OPTIONS]** - -## GLOBAL OPTIONS - -**--help, -h** - Print usage statement - -## SEE ALSO -kpod(1), crio(8), crio.conf(5) - -## HISTORY -July 2017, Originally compiled by Urvashi Mohnani diff --git a/docs/kpod.1.md b/docs/kpod.1.md deleted file mode 100644 index 1e2fc412..00000000 --- a/docs/kpod.1.md +++ /dev/null @@ -1,116 +0,0 @@ -% kpod(1) kpod - Simple management tool for pods and images -% Dan Walsh -# kpod "1" "September 2016" "kpod" -## NAME -kpod - Simple management tool for containers and images - -## SYNOPSIS -**kpod** [*options*] COMMAND - -# DESCRIPTION -kpod is a simple client only tool to help with debugging issues when daemons -such as CRI runtime and the kubelet are not responding or failing. A shared API -layer could be created to share code between the daemon and kpod. kpod does not -require any daemon running. kpod utilizes the same underlying components that -crio uses i.e. containers/image, container/storage, oci-runtime-tool/generate, -runc or any other OCI compatible runtime. kpod shares state with crio and so -has the capability to debug pods/images created by crio. - -**kpod [GLOBAL OPTIONS]** - -## GLOBAL OPTIONS - -**--help, -h** - Print usage statement - -**--config value, -c**=**"config.file"** - Path of a config file detailing container server configuration options - -**--debug** - Print debugging information - -**--root**=**value** - Path to the root directory in which data, including images, is stored - -**--runroot**=**value** - Path to the 'run directory' where all state information is stored - -**--runtime**=**value** - Path to the OCI compatible binary used to run containers - -**--storage-driver, -s**=**value** - Select which storage driver is used to manage storage of images and containers (default is overlay) - -**--storage-opt**=**value** - Used to pass an option to the storage driver - -**--version, -v** - Print the version - -## COMMANDS - -### diff -Inspect changes on a container or image's filesystem - -### export -Export container's filesystem contents as a tar archive - -### history -Shows the history of an image - -### images -List images in local storage - -### info -Displays system information - -### inspect -Display a container or image's configuration - -### load -Load an image from docker archive - -### logs -Display the logs of a container - -### mount -Mount a working container's root filesystem - -### ps -Prints out information about containers - -### pull -Pull an image from a registry - -### push -Push an image from local storage to elsewhere - -### rename -Rename a container - -### rm -Remove one or more containers - -### rmi -Removes one or more locally stored images - -### save -Save an image to docker-archive or oci - -### stats -Display a live stream of one or more containers' resource usage statistics - -### tag -Add an additional name to a local image - -### umount -Unmount a working container's root file system - -### version -Display the version information - -## SEE ALSO -crio(8), crio.conf(5) - -## HISTORY -Dec 2016, Originally compiled by Dan Walsh diff --git a/docs/play.png b/docs/play.png deleted file mode 100644 index 9be2868f..00000000 Binary files a/docs/play.png and /dev/null differ diff --git a/hack/btrfs_installed_tag.sh b/hack/btrfs_installed_tag.sh new file mode 100755 index 00000000..357f33b8 --- /dev/null +++ b/hack/btrfs_installed_tag.sh @@ -0,0 +1,7 @@ +#!/bin/bash +cc -E - > /dev/null 2> /dev/null << EOF +#include +EOF +if test $? -ne 0 ; then + echo exclude_graphdriver_btrfs +fi diff --git a/hack/libdm_installed.sh b/hack/libdm_installed.sh new file mode 100755 index 00000000..f48c7e27 --- /dev/null +++ b/hack/libdm_installed.sh @@ -0,0 +1,7 @@ +#!/bin/bash +cc -E - > /dev/null 2> /dev/null << EOF +#include +EOF +if test $? -ne 0 ; then + echo exclude_graphdriver_devicemapper +fi diff --git a/hack/libdm_tag.sh b/hack/libdm_no_deferred_remove_tag.sh similarity index 100% rename from hack/libdm_tag.sh rename to hack/libdm_no_deferred_remove_tag.sh diff --git a/hack/ostree_tag.sh b/hack/ostree_tag.sh new file mode 100755 index 00000000..89499c5e --- /dev/null +++ b/hack/ostree_tag.sh @@ -0,0 +1,4 @@ +#!/bin/bash +if ! pkg-config ostree-1 2> /dev/null ; then + echo containers_image_ostree_stub +fi diff --git a/hack/selinux_tag.sh b/hack/selinux_tag.sh new file mode 100755 index 00000000..ff80fda0 --- /dev/null +++ b/hack/selinux_tag.sh @@ -0,0 +1,4 @@ +#!/bin/bash +if pkg-config libselinux 2> /dev/null ; then + echo selinux +fi diff --git a/hack/validate/.validate b/hack/validate/.validate new file mode 100644 index 00000000..9f05ff11 --- /dev/null +++ b/hack/validate/.validate @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -e -o pipefail + +if [ -z "$VALIDATE_UPSTREAM" ]; then + # this is kind of an expensive check, so let's not do this twice if we + # are running more than one validate bundlescript + + VALIDATE_REPO='https://github.com/kubernetes-incubator/cri-o.git' + VALIDATE_BRANCH='master' + + VALIDATE_HEAD="$(git rev-parse --verify HEAD)" + + git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH" + VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)" + + VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD" + VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD" + + validate_diff() { + if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then + git diff "$VALIDATE_COMMIT_DIFF" "$@" + fi + } + validate_log() { + if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then + git log "$VALIDATE_COMMIT_LOG" "$@" + fi + } +fi diff --git a/hack/verify-gofmt.sh b/hack/verify-gofmt.sh deleted file mode 100755 index 5577d1b9..00000000 --- a/hack/verify-gofmt.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -set -o errexit -set -o nounset -set -o pipefail - -find_files() { - find . -not \( \ - \( \ - -wholename '*/vendor/*' \ - \) -prune \ - \) -name '*.go' -} - -GOFMT="gofmt -s" -bad_files=$(find_files | xargs $GOFMT -l) -if [[ -n "${bad_files}" ]]; then - echo "!!! '$GOFMT' needs to be run on the following files: " - echo "${bad_files}" - exit 1 -fi diff --git a/hooks.md b/hooks.md index 809dbdc2..cd3d0a40 100644 --- a/hooks.md +++ b/hooks.md @@ -53,6 +53,7 @@ type HookParams struct { Cmds []string `json:"cmds"` Annotations []string `json:"annotations"` HasBindMounts bool `json:"hasbindmounts"` + Arguments []string `json:"arguments"` } ``` @@ -63,6 +64,7 @@ type HookParams struct { | cmds | List of regular expressions to match the command for running the container. If the command matches a regex, the hook will be run | Optional | | annotations | List of regular expressions to match against the Annotations in the container runtime spec, if an Annotation matches the hook will be run|optional | | hasbindmounts | Tells CRI-O to run the hook if the container has bind mounts from the host into the container | Optional | +| arguments | Additional arguments to append to the hook command when executing it. For example --debug | Optional | ### Example @@ -85,6 +87,7 @@ cat /etc/containers/oci/hooks.d/oci-systemd-hook.json "hasbindmounts": true, "hook": "/usr/libexec/oci/hooks.d/oci-umount", "stages": [ "prestart" ] + "arguments": [ "--debug" ] } ``` -In this example the oci-umount will only be run during the prestart phase if the container has volume/bind mounts from the host into the container. +In this example the oci-umount will only be run during the prestart phase if the container has volume/bind mounts from the host into the container, it will also execute oci-umount with the --debug argument. diff --git a/kpod-images.json b/kpod-images.json deleted file mode 100644 index a87f1d5b..00000000 --- a/kpod-images.json +++ /dev/null @@ -1,18446 +0,0 @@ -{ - "version": 1, - "width": 211, - "height": 52, - "duration": 66890.973605, - "command": null, - "title": null, - "env": { - "TERM": "xterm-256color", - "SHELL": "/usr/bin/zsh" - }, - "stdout": [ - [ - 0.127359, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.026267, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001269, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 8.9e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000195, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 2.3e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 3.5e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.6e-05, - "\u001b[?2004h" - ], - [ - 0.431984, - "s" - ], - [ - 0.085317, - "\bsu" - ], - [ - 0.129126, - "d" - ], - [ - 0.088005, - "o" - ], - [ - 0.135671, - " " - ], - [ - 0.08011, - "k" - ], - [ - 0.080023, - "p" - ], - [ - 0.120208, - "o" - ], - [ - 0.063327, - "d" - ], - [ - 0.136258, - " " - ], - [ - 0.120298, - "i" - ], - [ - 0.07945, - "m" - ], - [ - 0.048586, - "a" - ], - [ - 0.111702, - "g" - ], - [ - 0.120452, - "e" - ], - [ - 0.055606, - "s" - ], - [ - 0.55923, - " " - ], - [ - 0.144712, - "-" - ], - [ - 0.152054, - "-" - ], - [ - 0.176129, - "n" - ], - [ - 0.100308, - "o" - ], - [ - 0.207951, - "-" - ], - [ - 0.140053, - "t" - ], - [ - 0.143338, - "r" - ], - [ - 0.076824, - "u" - ], - [ - 0.187154, - "n" - ], - [ - 0.107836, - "c" - ], - [ - 0.11269, - "\u001b[?1l\u001b>" - ], - [ - 0.000169, - "\u001b[?2004l\r\r\n" - ], - [ - 0.0058, - "\u001b]2;sudo kpod images --no-trunc\u0007\u001b]1;kpod\u0007" - ], - [ - 0.930864, - "[sudo] password for ryan: " - ], - [ - 2.238114, - "\r\n" - ], - [ - 1.996034, - "Sorry, try again.\r\n" - ], - [ - 0.916497, - "[sudo] password for ryan: " - ], - [ - 2.392113, - "\r\n" - ], - [ - 0.093899, - "\u001b[34mINFO\u001b[0m[0000] [graphdriver] using prior storage driver \"overlay\" \r\n" - ], - [ - 0.00085, - "IMAGE ID IMAGE NAME CREATED AT SIZE\r\n" - ], - [ - 0.000894, - "3edb693215a22336c352ba66d101fafda7e2ecbad1ecf2137e1c495e461d8f23 docker.io/kubernetes/pause:latest Jul 19, 2014 07:02 241 KB\r\n" - ], - [ - 0.000324, - "1adfcf922a991e2d59a98dd2b5adc813b590261737d77c3ec7ae23e4f927d6bb docker.io/library/fedora:latest Jul 20, 2017 17:07 219.9 MB\r\n" - ], - [ - 0.000358, - "524b9482e987a953b81321580372c07c3c765ce7c336445797428658384c6812 docker.io/library/redis:latest Jul 24, 2017 18:37 101 MB\r\n" - ], - [ - 0.000352, - "9518288ded9bd43a055a4022d84c440b3ac16981f943bb099b60e0984e9e23d2 docker.io/library/redis:alpine Jul 24, 2017 18:39 26.22 MB\r\n" - ], - [ - 0.003194, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024028, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m9s\u001b[39m\r\n" - ], - [ - 0.001135, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000102, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.8e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000176, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 4.3e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=" - ], - [ - 4.8e-05, - "\u001b[?2004h" - ], - [ - 1.658662, - "s" - ], - [ - 0.071652, - "\bsu" - ], - [ - 0.15257, - "d" - ], - [ - 0.143602, - "o" - ], - [ - 0.160358, - " " - ], - [ - 0.119704, - "k" - ], - [ - 0.104527, - "p" - ], - [ - 0.087276, - "o" - ], - [ - 0.096382, - "d" - ], - [ - 0.088303, - " " - ], - [ - 0.08811, - "i" - ], - [ - 0.0717, - "m" - ], - [ - 0.079483, - "a" - ], - [ - 0.103857, - "g" - ], - [ - 0.080719, - "e" - ], - [ - 0.056202, - "s" - ], - [ - 0.111434, - " " - ], - [ - 0.152746, - "-" - ], - [ - 0.111237, - "q" - ], - [ - 0.144032, - "\u001b[?1l\u001b>" - ], - [ - 0.000167, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004109, - "\u001b]2;sudo kpod images -q\u0007\u001b]1;kpod\u0007" - ], - [ - 0.094407, - "\u001b[34mINFO\u001b[0m[0000] [graphdriver] using prior storage driver \"overlay\" \r\n" - ], - [ - 0.001695, - "3edb693215a22336c352ba66d101fafda7e2ecbad1ecf2137e1c495e461d8f23\r\n" - ], - [ - 0.000296, - "1adfcf922a991e2d59a98dd2b5adc813b590261737d77c3ec7ae23e4f927d6bb\r\n" - ], - [ - 0.000377, - "524b9482e987a953b81321580372c07c3c765ce7c336445797428658384c6812\r\n" - ], - [ - 0.000392, - "9518288ded9bd43a055a4022d84c440b3ac16981f943bb099b60e0984e9e23d2\r\n" - ], - [ - 0.002686, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024178, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001405, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000211, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 5.2e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000251, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.9e-05, - "\u001b[?1h\u001b=" - ], - [ - 7.2e-05, - "\u001b[?2004h" - ], - [ - 0.566345, - "s" - ], - [ - 0.119935, - "\bsu" - ], - [ - 0.142907, - "d" - ], - [ - 0.104809, - "o" - ], - [ - 0.223809, - " " - ], - [ - 0.144031, - "k" - ], - [ - 0.127885, - "p" - ], - [ - 0.104093, - "o" - ], - [ - 0.136222, - "d" - ], - [ - 0.111758, - " " - ], - [ - 0.088148, - "i" - ], - [ - 0.079691, - "m" - ], - [ - 0.09608, - "a" - ], - [ - 0.111767, - "g" - ], - [ - 0.096631, - "e" - ], - [ - 0.063607, - "s" - ], - [ - 0.120256, - " " - ], - [ - 0.223186, - "-" - ], - [ - 0.16889, - "-" - ], - [ - 0.151472, - "d" - ], - [ - 0.120605, - "i" - ], - [ - 0.11131, - "g" - ], - [ - 0.096141, - "e" - ], - [ - 0.120451, - "s" - ], - [ - 0.095792, - "t" - ], - [ - 0.120047, - "s" - ], - [ - 0.272206, - " " - ], - [ - 0.1516, - "r" - ], - [ - 0.056258, - "e" - ], - [ - 0.175241, - "d" - ], - [ - 0.144489, - "i" - ], - [ - 0.136449, - "s" - ], - [ - 0.159879, - "\u001b[?1l\u001b>" - ], - [ - 0.000108, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004344, - "\u001b]2;sudo kpod images --digests redis\u0007\u001b]1;kpod\u0007" - ], - [ - 0.090151, - "\u001b[34mINFO\u001b[0m[0000] [graphdriver] using prior storage driver \"overlay\" \r\n" - ], - [ - 0.000807, - "IMAGE ID IMAGE NAME DIGEST CREATED AT SIZE\r\n" - ], - [ - 0.000758, - "524b9482e987 docker.io/library/redis:latest sha256:b839545984cee95685e514aeb441a8b0624818559d89910aa31ad645c904210f Jul 24, 2017 18:37 101 MB\r\n" - ], - [ - 0.000371, - "9518288ded9b docker.io/library/redis:alpine sha256:e633cded055a94202e4ccccb8125b7f383cd6ee56527ab890db643383a2647dd Jul 24, 2017 18:39 26.22 MB\r\n" - ], - [ - 0.00292, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.023228, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001399, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000108, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 5.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000122, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 2.7e-05, - "\u001b[?1h\u001b=" - ], - [ - 4.6e-05, - "\u001b[?2004h" - ], - [ - 0.987423, - "s" - ], - [ - 0.11214, - "\bsu" - ], - [ - 0.12844, - "d" - ], - [ - 0.079782, - "o" - ], - [ - 0.183347, - " " - ], - [ - 0.096053, - "k" - ], - [ - 0.103724, - "p" - ], - [ - 0.095897, - "o" - ], - [ - 0.128513, - "d" - ], - [ - 0.119949, - " " - ], - [ - 0.096107, - "i" - ], - [ - 0.087565, - "m" - ], - [ - 0.080849, - "a" - ], - [ - 0.095739, - "g" - ], - [ - 0.096047, - "e" - ], - [ - 0.055373, - "s" - ], - [ - 0.160498, - " " - ], - [ - 0.168065, - "-" - ], - [ - 0.135891, - "-" - ], - [ - 0.160099, - "f" - ], - [ - 0.144124, - "i" - ], - [ - 0.312147, - "l" - ], - [ - 0.183726, - "t" - ], - [ - 0.183798, - "e" - ], - [ - 0.104556, - "r" - ], - [ - 1.071509, - " " - ], - [ - 0.208609, - "\"" - ], - [ - 0.25584, - "s" - ], - [ - 0.191556, - "i" - ], - [ - 0.119659, - "n" - ], - [ - 0.536775, - "c" - ], - [ - 0.09534, - "e" - ], - [ - 0.54414, - "=" - ], - [ - 0.144045, - "f" - ], - [ - 0.06374, - "e" - ], - [ - 0.184989, - "d" - ], - [ - 0.135526, - "o" - ], - [ - 0.136149, - "r" - ], - [ - 0.080286, - "a" - ], - [ - 0.454973, - ":" - ], - [ - 0.624879, - "l" - ], - [ - 0.096184, - "a" - ], - [ - 0.111656, - "t" - ], - [ - 0.120147, - "e" - ], - [ - 0.08794, - "s" - ], - [ - 0.06356, - "t" - ], - [ - 0.288506, - "\"" - ], - [ - 0.543802, - " " - ], - [ - 0.200564, - "-" - ], - [ - 0.142861, - "-" - ], - [ - 0.128436, - "f" - ], - [ - 0.112036, - "o" - ], - [ - 0.151167, - "r" - ], - [ - 0.128503, - "m" - ], - [ - 0.120291, - "a" - ], - [ - 0.088635, - "t" - ], - [ - 0.143602, - " " - ], - [ - 0.256717, - "\"" - ], - [ - 1.46299, - "t" - ], - [ - 0.107784, - "a" - ], - [ - 0.156707, - "b" - ], - [ - 0.123646, - "l" - ], - [ - 0.223815, - "e" - ], - [ - 0.22418, - " " - ], - [ - 0.304259, - "{" - ], - [ - 0.20813, - "{" - ], - [ - 0.599583, - "." - ], - [ - 0.335909, - "I" - ], - [ - 0.268363, - "D" - ], - [ - 0.591353, - "\b \b" - ], - [ - 0.119781, - "\b \b" - ], - [ - 0.156399, - "\b \b" - ], - [ - 0.376287, - " " - ], - [ - 0.095973, - "t" - ], - [ - 0.151826, - "r" - ], - [ - 0.039602, - "u" - ], - [ - 0.192491, - "n" - ], - [ - 0.088154, - "c" - ], - [ - 0.147825, - "a" - ], - [ - 0.123835, - "t" - ], - [ - 0.104099, - "e" - ], - [ - 0.135647, - " " - ], - [ - 0.216888, - "." - ], - [ - 0.303994, - "I" - ], - [ - 0.191778, - "D" - ], - [ - 0.574958, - " " - ], - [ - 0.19329, - "8" - ], - [ - 0.439791, - "}" - ], - [ - 0.143884, - "}" - ], - [ - 0.143784, - " " - ], - [ - 0.256871, - "{" - ], - [ - 0.151265, - "{" - ], - [ - 0.671939, - "." - ], - [ - 0.880114, - "\b \b" - ], - [ - 0.759869, - " " - ], - [ - 0.167976, - "." - ], - [ - 0.344489, - "N" - ], - [ - 0.191508, - "a" - ], - [ - 0.095324, - "m" - ], - [ - 0.223992, - "e" - ], - [ - 0.128763, - " " - ], - [ - 0.15222, - "|" - ], - [ - 0.255382, - " " - ], - [ - 0.144061, - "p" - ], - [ - 0.128167, - "r" - ], - [ - 0.096088, - "i" - ], - [ - 0.071635, - "n" - ], - [ - 0.120055, - "t" - ], - [ - 0.264652, - "f" - ], - [ - 0.151244, - " " - ], - [ - 0.192646, - "\\" - ], - [ - 0.26379, - "\"" - ], - [ - 1.311952, - "%" - ], - [ - 4.68053, - "-" - ], - [ - 1.015928, - "t" - ], - [ - 0.551856, - "\b \b" - ], - [ - 1.032351, - "6" - ], - [ - 0.111812, - "4" - ], - [ - 0.295612, - "s" - ], - [ - 0.719816, - "\\" - ], - [ - 0.248899, - "\"" - ], - [ - 0.599051, - " " - ], - [ - 0.311681, - "}" - ], - [ - 0.137392, - "}" - ], - [ - 0.894842, - " " - ], - [ - 0.224545, - "{" - ], - [ - 0.127645, - "{" - ], - [ - 0.224058, - "." - ], - [ - 0.184217, - "C" - ], - [ - 0.248132, - "r" - ], - [ - 0.079278, - "e" - ], - [ - 0.104455, - "a" - ], - [ - 0.087831, - "t" - ], - [ - 0.064342, - "e" - ], - [ - 0.19221, - "d" - ], - [ - 0.199615, - "A" - ], - [ - 0.464438, - "t" - ], - [ - 0.367064, - "}" - ], - [ - 0.15281, - "}" - ], - [ - 0.559881, - "\"" - ], - [ - 0.448076, - "\u001b[?1l\u001b>" - ], - [ - 0.000334, - "\u001b[?2004l\r\r\n" - ], - [ - 0.005051, - "\u001b]2;sudo kpod images --filter \"since=fedora:latest\" --format \u0007" - ], - [ - 9.2e-05, - "\u001b]1;kpod\u0007" - ], - [ - 0.092964, - "\u001b[34mINFO\u001b[0m[0000] [graphdriver] using prior storage driver \"overlay\" \r\n" - ], - [ - 0.003089, - "template: image:1: function \"truncate\" not defined\r\nTemplate parsing error\r\nmain.outputUsingTemplate\r\n\t/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/cmd/kpod/images.go:178\r\nmain.outputImages\r\n\t/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/cmd/kpod/images.go:165\r\nmain.imagesCmd\r\n\t/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/cmd/kpod/images.go:120\r\ngithub.com/kubernetes-incubator/cri-o/vendor/github.com/urfave/cli.HandleAction\r\n\t/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/vendor/github.com/urfave/cli/app.go:485\r\ngithub.com/kubernetes-incubator/cri-o/vendor/github.com/urfave/cli.Command.Run\r\n\t/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/vendor/github.com/urfave/cli/command.go:193\r\ngithub.com/kubernetes-incubator/cri-o/vendor/github.com/urfave/cli.(*App).Run\r\n\t/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/vendor/github.com/urfave/cli/app.go:250\r\nmain.main\r\n\t" - ], - [ - 2.8e-05, - "/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/cmd/kpod/main.go:61\r\nruntime.main\r\n\t/usr/lib/golang/src/runtime/proc.go:185\r\nruntime.goexit\r\n\t/usr/lib/golang/src/runtime/asm_amd64.s:2197\r\n" - ], - [ - 0.002932, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.026506, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.000971, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000117, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.00031, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.9e-05, - "\u001b[?2004h" - ], - [ - 5.719975, - "sudo kpod images --filter \"since=fedora:latest\" --format \"table {{ truncate .ID 8}} {{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\"" - ], - [ - 0.111141, - "\b" - ], - [ - 0.500452, - "\b" - ], - [ - 0.030822, - "\b" - ], - [ - 0.031199, - "\b" - ], - [ - 0.030859, - "\b" - ], - [ - 0.030714, - "\b" - ], - [ - 0.029362, - "\b" - ], - [ - 0.031473, - "\b" - ], - [ - 0.030015, - "\b" - ], - [ - 0.030409, - "\b" - ], - [ - 0.030238, - "\b" - ], - [ - 0.031711, - "\b" - ], - [ - 0.029911, - "\b" - ], - [ - 0.031066, - "\b" - ], - [ - 0.029076, - "\b" - ], - [ - 0.03054, - "\b" - ], - [ - 0.031011, - "\b" - ], - [ - 0.031156, - "\b" - ], - [ - 0.030493, - "\b" - ], - [ - 0.029937, - "\b" - ], - [ - 0.030608, - "\b" - ], - [ - 0.030465, - "\b" - ], - [ - 0.030741, - "\b" - ], - [ - 0.030923, - "\b" - ], - [ - 0.029979, - "\b" - ], - [ - 0.0302, - "\b" - ], - [ - 0.031404, - "\b" - ], - [ - 0.030985, - "\b" - ], - [ - 0.030991, - "\b" - ], - [ - 0.030493, - "\b" - ], - [ - 0.031775, - "\b" - ], - [ - 0.030463, - "\b" - ], - [ - 0.03012, - "\b" - ], - [ - 0.030377, - "\b" - ], - [ - 0.030266, - "\b" - ], - [ - 0.031003, - "\b" - ], - [ - 0.029286, - "\b" - ], - [ - 0.031623, - "\b" - ], - [ - 0.029799, - "\b" - ], - [ - 0.029213, - "\b" - ], - [ - 0.03082, - "\b" - ], - [ - 0.030044, - "\b" - ], - [ - 0.030151, - "\b" - ], - [ - 0.030665, - "\b" - ], - [ - 0.030377, - "\b" - ], - [ - 0.030146, - "\b" - ], - [ - 0.093081, - "\b" - ], - [ - 0.191739, - "\b" - ], - [ - 0.144119, - "\b" - ], - [ - 0.327779, - "\b}} {{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[50D" - ], - [ - 0.176138, - "\b}} {{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[50D" - ], - [ - 0.39199, - "\b" - ], - [ - 0.223786, - "\b" - ], - [ - 0.144097, - "\b" - ], - [ - 0.279236, - "\u001b[1C" - ], - [ - 0.501129, - "\u001b[1C" - ], - [ - 0.029576, - "\u001b[1C" - ], - [ - 0.029751, - "\u001b[1C" - ], - [ - 0.032205, - "\u001b[1C" - ], - [ - 0.02899, - "\u001b[1C" - ], - [ - 0.515239, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.499546, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.030618, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.031062, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.030453, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.03064, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.029812, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.030975, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.030209, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.03139, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.030639, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.031502, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.030081, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.029882, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.030344, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.031029, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.030391, - "\u001b[1C .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.121577, - "\u001b[1C .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.167127, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.248096, - "\u001b[?1l\u001b>" - ], - [ - 0.000222, - "\u001b[?2004l\r\r\n" - ], - [ - 0.003167, - "\u001b]2;sudo kpod images --filter \"since=fedora:latest\" --format \u0007\u001b]1;kpod\u0007" - ], - [ - 0.095477, - "\u001b[34mINFO\u001b[0m[0000] [graphdriver] using prior storage driver \"overlay\" \r\n" - ], - [ - 0.003009, - "tabledocker.io/library/redis:latest Jul 24, 2017 18:37\r\n" - ], - [ - 0.000421, - "tabledocker.io/library/redis:alpine Jul 24, 2017 18:39\r\n" - ], - [ - 0.003381, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.023509, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001093, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 5.4e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000102, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 5.2e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.1e-05, - "\u001b[?2004h" - ], - [ - 0.493915, - "sudo kpod images --filter \"since=fedora:latest\" --format \"table{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\"" - ], - [ - 4.023647, - "\u001b[?2004l\r\r\n" - ], - [ - 0.000724, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.033033, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001493, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.00011, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 5.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 7.4e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m " - ], - [ - 6.4e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=" - ], - [ - 4.6e-05, - "\u001b[?2004h" - ], - [ - 2.477293, - "r" - ], - [ - 0.095704, - "\brm" - ], - [ - 0.159753, - " " - ], - [ - 0.216552, - "k" - ], - [ - 0.168314, - "\u0007" - ], - [ - 0.000157, - "\r\r\n" - ], - [ - 7.6e-05, - "\u001b[J\u001b[38;5;40mkpod\u001b[0m* \u001b[Jkpod-images.json \u001b[Jkubernetes.md \u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\u001b[39m\r\u001b[2Crm k\u001b[K\u001b[204C\u001b[90m\u001b[39m\u001b[39m\u001b[204D" - ], - [ - 0.679935, - "p" - ], - [ - 0.233955, - "\r\r\n\u001b[J\u001b[A\u001b[7Cod" - ], - [ - 0.493364, - "i" - ], - [ - 0.2874, - "\b \b" - ], - [ - 0.208224, - "-" - ], - [ - 0.208753, - "i" - ], - [ - 0.079436, - "m" - ], - [ - 0.137168, - "ages.json\u001b[1m \u001b[0m" - ], - [ - 0.598602, - "\b\u001b[0m \b" - ], - [ - 0.000266, - "\u001b[?1l\u001b>\u001b[?2004l\r\r\n\u001b[J" - ], - [ - 0.004718, - "\u001b]2;rm kpod-images.json\u0007\u001b]1;rm\u0007" - ], - [ - 0.002042, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.028039, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.000994, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000103, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.4e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 6.4e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 5.8e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.6e-05, - "\u001b[?2004h" - ], - [ - 0.779169, - "g" - ], - [ - 0.136128, - "\bgi" - ], - [ - 0.048534, - "t" - ], - [ - 0.104105, - " " - ], - [ - 0.079724, - "s" - ], - [ - 0.104241, - "t" - ], - [ - 0.079697, - "a" - ], - [ - 0.11214, - "t" - ], - [ - 0.119676, - "u" - ], - [ - 0.208639, - "s" - ], - [ - 0.191009, - "\u001b[?1l\u001b>" - ], - [ - 3.6e-05, - "\u001b[?2004l" - ], - [ - 2.7e-05, - "\r\r\n" - ], - [ - 0.00253, - "\u001b]2;git status\u0007\u001b]1;git\u0007" - ], - [ - 0.017282, - "On branch kpod-format-table\r\nnothing to commit, working tree clean\r\n" - ], - [ - 0.000574, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.036636, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001716, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 9.3e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 3.6e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8.1e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 4.9e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.6e-05, - "\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.75012, - "s" - ], - [ - 0.599165, - "\b \b" - ], - [ - 0.19198, - "m" - ], - [ - 0.064457, - "\bma" - ], - [ - 0.127947, - "k" - ], - [ - 0.136116, - "e" - ], - [ - 0.07172, - " " - ], - [ - 0.096215, - "k" - ], - [ - 0.071462, - "p" - ], - [ - 0.080589, - "o" - ], - [ - 0.10344, - "d" - ], - [ - 0.152208, - "\u001b[?1l\u001b>" - ], - [ - 5.1e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.003042, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 6.729784, - "make: 'kpod' is up to date.\r\n" - ], - [ - 0.000291, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.026264, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m7s\u001b[39m\r\n" - ], - [ - 0.001087, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 7.6e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.9e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 7.5e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000122, - "\u001b[?1h\u001b=" - ], - [ - 2.8e-05, - "\u001b[?2004h" - ], - [ - 13.163278, - "s" - ], - [ - 0.168391, - "\bsu" - ], - [ - 0.127831, - "d" - ], - [ - 0.10395, - "o" - ], - [ - 0.223816, - " " - ], - [ - 0.12801, - "k" - ], - [ - 0.615488, - "\b \b" - ], - [ - 0.176737, - "m" - ], - [ - 0.040224, - "a" - ], - [ - 0.216001, - "k" - ], - [ - 0.120125, - "e" - ], - [ - 0.095559, - " " - ], - [ - 0.176055, - "i" - ], - [ - 0.087364, - "n" - ], - [ - 0.064466, - "s" - ], - [ - 0.096026, - "t" - ], - [ - 0.128305, - "a" - ], - [ - 0.159057, - "l" - ], - [ - 0.152629, - "l" - ], - [ - 1.088301, - "\u001b[?1l\u001b>" - ], - [ - 0.000115, - "\u001b[?2004l\r\r\n" - ], - [ - 0.008092, - "\u001b]2;sudo make install\u0007\u001b]1;make\u0007" - ], - [ - 0.035813, - "mkdir -p \"/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/_output/src/github.com/kubernetes-incubator\"\r\n" - ], - [ - 0.003636, - "ln -s \"/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\" \"/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/_output/src/github.com/kubernetes-incubator\"\r\n" - ], - [ - 0.001734, - "touch \"/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/_output/.gopathok\"\r\n" - ], - [ - 0.001583, - "install -D -m 755 crio /usr/local/bin/crio\r\n" - ], - [ - 0.053752, - "install -D -m 755 crioctl /usr/local/bin/crioctl\r\n" - ], - [ - 0.032266, - "install -D -m 755 kpod /usr/local/bin/kpod\r\n" - ], - [ - 0.046161, - "install -D -m 755 conmon/conmon /usr/local/libexec/crio/conmon\r\n" - ], - [ - 0.003465, - "install -D -m 755 pause/pause /usr/local/libexec/crio/pause\r\n" - ], - [ - 0.004506, - "install -d -m 755 /usr/local/share/man/man1\r\n" - ], - [ - 0.000924, - "install -d -m 755 /usr/local/share/man/man5\r\n" - ], - [ - 0.000709, - "install -d -m 755 /usr/local/share/man/man8\r\n" - ], - [ - 0.000731, - "install -m 644 docs/kpod-diff.1 docs/kpod-push.1 docs/kpod-cp.1 docs/kpod.1 docs/kpod-export.1 docs/kpod-load.1 docs/kpod-logs.1 docs/kpod-images.1 docs/kpod-umount.1 docs/kpod-save.1 docs/kpod-mount.1 docs/kpod-info.1 docs/kpod-inspect.1 docs/kpod-history.1 docs/kpod-pull.1 docs/kpod-rmi.1 docs/kpod-version.1 docs/kpod-tag.1 -t /usr/local/share/man/man1\r\n" - ], - [ - 0.014923, - "install -m 644 docs/crio.conf.5 -t /usr/local/share/man/man5\r\n" - ], - [ - 0.006695, - "install -m 644 docs/crio.8 -t /usr/local/share/man/man8\r\n" - ], - [ - 0.005468, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.030073, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.00096, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 8.4e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.4e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8.2e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 2.2e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 3.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.5e-05, - "\u001b[?2004h" - ], - [ - 1.588728, - "s" - ], - [ - 0.158798, - "\bsu" - ], - [ - 0.12764, - "d" - ], - [ - 0.088209, - "o" - ], - [ - 0.160458, - " " - ], - [ - 0.152138, - "." - ], - [ - 0.200633, - "/" - ], - [ - 0.343621, - "\b \b" - ], - [ - 0.142987, - "\b \b" - ], - [ - 0.336517, - "k" - ], - [ - 0.192818, - "p" - ], - [ - 0.079464, - "o" - ], - [ - 0.167625, - "d" - ], - [ - 0.160306, - " " - ], - [ - 0.49613, - "i" - ], - [ - 0.120488, - "m" - ], - [ - 0.247645, - "a" - ], - [ - 0.160515, - "g" - ], - [ - 0.087028, - "e" - ], - [ - 0.072479, - "s" - ], - [ - 0.08742, - " " - ], - [ - 0.120873, - "-" - ], - [ - 0.127862, - "-" - ], - [ - 0.119898, - "f" - ], - [ - 0.12007, - "o" - ], - [ - 0.127375, - "r" - ], - [ - 0.111896, - "m" - ], - [ - 0.088551, - "a" - ], - [ - 0.119081, - "t" - ], - [ - 0.144899, - " " - ], - [ - 0.40945, - "{" - ], - [ - 0.598946, - "\b \b" - ], - [ - 0.497455, - "\"" - ], - [ - 0.498444, - "t" - ], - [ - 0.107574, - "a" - ], - [ - 0.143907, - "b" - ], - [ - 0.136371, - "l" - ], - [ - 0.119485, - "e" - ], - [ - 0.080146, - " " - ], - [ - 0.336807, - "{" - ], - [ - 0.231791, - "{" - ], - [ - 0.271322, - "." - ], - [ - 0.319863, - "I" - ], - [ - 0.160571, - "D" - ], - [ - 0.415578, - "}" - ], - [ - 0.424342, - "}" - ], - [ - 0.264361, - "\"" - ], - [ - 0.431581, - "\u001b[?1l\u001b>" - ], - [ - 6.9e-05, - "\u001b[?2004l" - ], - [ - 5.4e-05, - "\r\r\n" - ], - [ - 0.004691, - "\u001b]2;sudo kpod images --format \"table {{.ID}}\"\u0007\u001b]1;kpod\u0007" - ], - [ - 0.096877, - "IMAGE ID \r\n3edb693215a22336c352ba66d101fafda7e2ecbad1ecf2137e1c495e461d8f23\r\n1adfcf922a991e2d59a98dd2b5adc813b590261737d77c3ec7ae23e4f927d6bb\r\n524b9482e987a953b81321580372c07c3c765ce7c336445797428658384c6812\r\n9518288ded9bd43a055a4022d84c440b3ac16981f943bb099b60e0984e9e23d2\r\n" - ], - [ - 0.003109, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024093, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001338, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.00015, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.2e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000103, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 5.9e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.6e-05, - "\u001b[?2004h" - ], - [ - 5.653254, - "w" - ], - [ - 0.123493, - "\bwh" - ], - [ - 0.080798, - "i" - ], - [ - 0.127546, - "c" - ], - [ - 0.112514, - "h" - ], - [ - 0.079037, - " " - ], - [ - 0.129196, - "k" - ], - [ - 0.135422, - "p" - ], - [ - 0.076117, - "o" - ], - [ - 0.116658, - "d" - ], - [ - 0.144849, - "\u001b[?1l\u001b>\u001b[?2004l\r\r\n" - ], - [ - 0.002681, - "\u001b]2;( alias; declare -f; ) | /usr/bin/which --tty-only --read-alias --show-tilde\u0007\u001b]1;which\u0007" - ], - [ - 0.004242, - "/usr/local/bin/kpod\r\n" - ], - [ - 0.000126, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.026103, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.002394, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000493, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000187, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.9e-05, - "\u001b[?1h\u001b=" - ], - [ - 6.2e-05, - "\u001b[?2004h" - ], - [ - 86.650128, - "m" - ], - [ - 0.161011, - "\bma" - ], - [ - 0.655411, - "k" - ], - [ - 0.103692, - "e" - ], - [ - 0.063223, - " " - ], - [ - 0.09703, - "k" - ], - [ - 0.119343, - "p" - ], - [ - 0.049093, - "o" - ], - [ - 0.303802, - "d" - ], - [ - 0.144347, - "\u001b[?1l\u001b>" - ], - [ - 4.4e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004019, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 6.725858, - "go build -ldflags '-X main.gitCommit=99495909 -X main.buildInfo=1502916060' -tags \"selinux seccomp \" -o kpod github.com/kubernetes-incubator/cri-o/cmd/kpod\r\n" - ], - [ - 4.213403, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.018496, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m11s\u001b[39m\r\n" - ], - [ - 0.001197, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000111, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000112, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 5.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 8e-05, - "\u001b[?2004h" - ], - [ - 0.768399, - "s" - ], - [ - 0.111451, - "\bsu" - ], - [ - 0.096019, - "d" - ], - [ - 0.103589, - "o" - ], - [ - 0.104726, - " " - ], - [ - 0.079095, - "k" - ], - [ - 0.177292, - "p" - ], - [ - 0.358902, - "\b \b" - ], - [ - 0.12001, - "\b \b" - ], - [ - 0.167745, - "m" - ], - [ - 0.096101, - "a" - ], - [ - 0.096699, - "k" - ], - [ - 0.088325, - "e" - ], - [ - 0.087158, - " " - ], - [ - 0.064768, - "i" - ], - [ - 0.071949, - "n" - ], - [ - 0.048283, - "s" - ], - [ - 0.095575, - "t" - ], - [ - 0.080741, - "a" - ], - [ - 0.135503, - "l" - ], - [ - 0.111733, - "l" - ], - [ - 0.135263, - "\u001b[?1l\u001b>" - ], - [ - 0.000111, - "\u001b[?2004l\r\r\n" - ], - [ - 0.001633, - "\u001b]2;sudo make install\u0007\u001b]1;make\u0007" - ], - [ - 3.04395, - "install -D -m 755 crio /usr/local/bin/crio\r\n" - ], - [ - 0.05693, - "install -D -m 755 crioctl /usr/local/bin/crioctl\r\n" - ], - [ - 0.033176, - "install -D -m 755 kpod /usr/local/bin/kpod\r\n" - ], - [ - 0.04205, - "install -D -m 755 conmon/conmon /usr/local/libexec/crio/conmon\r\n" - ], - [ - 0.002903, - "install -D -m 755 pause/pause /usr/local/libexec/crio/pause\r\n" - ], - [ - 0.004348, - "install -d -m 755 /usr/local/share/man/man1\r\n" - ], - [ - 0.000355, - "install -d -m 755 /usr/local/share/man/man5\r\n" - ], - [ - 0.000615, - "install -d -m 755 /usr/local/share/man/man8\r\n" - ], - [ - 0.000636, - "install -m 644 docs/kpod-diff.1 docs/kpod-push.1 docs/kpod-cp.1 docs/kpod.1 docs/kpod-export.1 docs/kpod-load.1 docs/kpod-logs.1 docs/kpod-images.1 docs/kpod-umount.1 docs/kpod-save.1 docs/kpod-mount.1 docs/kpod-info.1 docs/kpod-inspect.1 docs/kpod-history.1 docs/kpod-pull.1 docs/kpod-rmi.1 docs/kpod-version.1 docs/kpod-tag.1 -t /usr/local/share/man/man1\r\n" - ], - [ - 0.011201, - "install -m 644 docs/crio.conf.5 -t /usr/local/share/man/man5\r\n" - ], - [ - 0.004078, - "install -m 644 docs/crio.8 -t /usr/local/share/man/man8\r\n" - ], - [ - 0.00848, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.020721, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001114, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 9.5e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.1e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000165, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 388.817771, - "v" - ], - [ - 0.052535, - "\bvi" - ], - [ - 0.099467, - " " - ], - [ - 0.312442, - "m" - ], - [ - 0.159718, - "c" - ], - [ - 0.207542, - "\b \b" - ], - [ - 0.111893, - "\b \b" - ], - [ - 0.088207, - "c" - ], - [ - 0.120601, - "m" - ], - [ - 0.122793, - "d\u001b[1m/\u001b[0m" - ], - [ - 0.06091, - "\b\u001b[0m/k" - ], - [ - 0.12778, - "pod\u001b[1m/\u001b[0m" - ], - [ - 0.339925, - "\b\u001b[0m/r" - ], - [ - 0.124315, - "m" - ], - [ - 0.119998, - "i.go\u001b[1m \u001b[0m" - ], - [ - 0.071475, - "\b\u001b[0m i" - ], - [ - 0.511445, - "\b \b" - ], - [ - 0.145079, - "\b" - ], - [ - 0.195924, - "\u001b[?1l\u001b>" - ], - [ - 0.000365, - "\u001b[?2004l\r\r\n" - ], - [ - 0.008801, - "\u001b]2;vim cmd/kpod/rmi.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.134848, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000609, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"cmd/kpod/rmi.go\"" - ], - [ - 0.000144, - " 123L, 3096C" - ], - [ - 0.008903, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.001667, - "\u001b[1;1H\u001b[96m\u001b[47m 1 \u001b[m\u001b[93m\u001b[107m\u001b[32mpackage\u001b[m\u001b[93m\u001b[107m main\r\n\u001b[96m\u001b[47m 2 \r\n 3 \u001b[m\u001b[93m\u001b[107m\u001b[32mimport\u001b[m\u001b[93m\u001b[107m (\r\n\u001b[96m\u001b[47m 4 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"fmt\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 5 \r\n 6 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"github.com/containers/storage\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 7 \u001b[m\u001b[93m\u001b[107m libkpodimage \u001b[36m\"github.com/kubernetes-incubator/cri-o/libkpod/image\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 8 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"github.com/pkg/errors\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 9 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"github.com/urfave/cli\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 10 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 11 \r\n 12 \u001b[m\u001b[93m\u001b[107m\u001b[32mvar\u001b[m\u001b[93m\u001b[107m (\r\n\u001b[96m\u001b[47m 13 \u001b[m\u001b[93m\u001b[107m rmiDescription = \u001b[36m\"removes one or more locally stored images.\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 14 \u001b[m\u001b[93m\u001b[107m rmiFlags\u001b[7C= []cli.Flag{\r\n\u001b[96m\u001b[47m 15 \u001b[m\u001b[93m\u001b[107m\u001b[8Ccli.BoolFlag{\r\n\u001b[96m\u001b[47m 16 \u001b[m\u001b[93m\u001b[107m\u001b[12CName: \u001b[36m\"force, f\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 17 \u001b[m\u001b[93m\u001b[107m\u001b[12CUsage: \u001b[36m\"force removal of the image\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[" - ], - [ - 2e-05, - "96m\u001b[47m 18 \u001b[m\u001b[93m\u001b[107m\u001b[8C},\r\n\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m rmiCommand = cli.Command{\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m\u001b[8CName:\u001b[8C\u001b[36m\"rmi\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m\u001b[8CUsage:\u001b[7C\u001b[36m\"removes one or more images from local storage\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m\u001b[8CDescription: rmiDescription,\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[8CAction: rmiCmd,\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m\u001b[8CArgsUsage: \u001b[36m\"IMAGE-NAME-OR-ID [...]\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m\u001b[8CFlags:\u001b[7CrmiFlags,\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 28 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 29 \r\n 30 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m rmiCmd(c *cli.Context) \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 31 \r\n 32 \u001b[m\u001b[93m\u001b[107m force := \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m c.IsSet(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m\u001b[8Cforce = c.Bool(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m" - ], - [ - 0.028983, - "\u001b[107m }\r\n\u001b[96m\u001b[47m 36 \r\n 37 \u001b[m\u001b[93m\u001b[107m args := c.Args()\r\n\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(args) == \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Errorf(\u001b[36m\"image name or ID must be specified\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 41 \r\n 42 \u001b[m\u001b[93m\u001b[107m config, err := getConfig(c)\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Could not get config\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m store, err := getStore(config)\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 50 \u001b[m\u001b[93m\u001b[107m\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m" - ], - [ - 2.8e-05, - "\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;50H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                             \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m   1%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m  1\u001b[m\u001b[93m" - ], - [ - 0.009152, - "\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1  \u001b[1;5H\u001b[?12l\u001b[?25h\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 0.418144, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K\u001b[52;1H/\u001b[?2004h" - ], - [ - 6.9e-05, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.594911, - "m\u001b[?25l" - ], - [ - 0.011367, - "\u001b[1;13H\u001b[7m\u001b[91mm\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[51;2H\u001b[1m\u001b[38;5;22m\u001b[48;5;148mCOMMND \u001b[m\u001b[93m\u001b[107m\u001b[200C\u001b[38;5;22m\u001b[48;5;252m9\r\n\u001b[m\u001b[93m\u001b[107m/m\u001b[?12l\u001b[?25h" - ], - [ - 0.22557, - "u\u001b[?25l" - ], - [ - 0.014952, - "\u001b[1;13Hm\u001b[39;52H\u001b[7m\u001b[91mmu\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  32%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m39\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:48\r\n\u001b[m\u001b[93m\u001b[107m/mu\u001b[?12l\u001b[?25h" - ], - [ - 0.081033, - "s\u001b[?25l" - ], - [ - 0.011335, - "\u001b[39;54H\u001b[7m\u001b[91ms\u001b[52;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.076678, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mt\u001b[?25l" - ], - [ - 0.012142, - "\u001b[39;55H\u001b[7m\u001b[91mt\u001b[52;6H\u001b[?12l\u001b[?25h" - ], - [ - 0.180332, - "\u001b[?25l" - ], - [ - 0.008175, - "\u001b[39;56H \u001b[52;7H\u001b[?12l\u001b[?25h" - ], - [ - 0.087385, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mf\u001b[?25l" - ], - [ - 0.011478, - "\u001b[1;50r\u001b[1;1H\u001b[19M\u001b[1;52r\u001b[20;52H\u001b[36mmust \u001b[m\u001b[93m\u001b[107m\u001b[32;1H\u001b[96m\u001b[47m 51 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, id := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m args {\r\n\u001b[96m\u001b[47m 52 \u001b[m\u001b[93m\u001b[107m\u001b[8Cimage, err := libkpodimage.FindImage(store, id)\r\n\u001b[96m\u001b[47m 53 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 54 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"could not get image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 55 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 56 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m image != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 57 \u001b[m\u001b[93m\u001b[107m\u001b[12CctrIDs, err := runningContainers(image, store)\r\n\u001b[96m\u001b[47m 58 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 59 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"error getting running containers for image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 60 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 61" - ], - [ - 2.8e-05, - " \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(ctrIDs) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m && \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(image.Names) <= \u001b[36m1\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 62 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m force {\r\n\u001b[96m\u001b[47m 63 \u001b[m\u001b[93m\u001b[107m\u001b[20CremoveContainers(ctrIDs, store)\r\n\u001b[96m\u001b[47m 64 \u001b[m\u001b[93m\u001b[107m\u001b[16C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 65 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mfor\u001b[m\u001b[93m\u001b[107m ctrID := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\r\n\u001b[96m\u001b[47m 66 \u001b[m\u001b[93m\u001b[107m\u001b[24C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m fmt.Errorf(\u001b[36m\"Could not remove image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m (\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[91mmust f\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[36morce) - container \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\r\n\u001b[96m\u001b[47m 67 \u001b[m\u001b[93m\u001b[107m\u001b[20C}\r\n\u001b[96m\u001b[47m 68 \u001b[m\u001b[93m\u001b[107m\u001b[16C}\r\n\u001b[96m\u001b[47m 69 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:71\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K\u001b[52;1H/" - ], - [ - 2.2e-05, - "must f\u001b[?12l\u001b[?25h" - ], - [ - 0.084161, - "o\u001b[?25l" - ], - [ - 0.012305, - "\u001b[47;81H\u001b[7m\u001b[91mo\u001b[52;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.116387, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mr\u001b[?25l" - ], - [ - 0.011734, - "\u001b[47;82H\u001b[7m\u001b[91mr\u001b[52;10H\u001b[?12l\u001b[?25h" - ], - [ - 0.255362, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mc\u001b[?25l" - ], - [ - 0.011191, - "\u001b[47;83H\u001b[7m\u001b[91mc\u001b[52;11H\u001b[?12l\u001b[?25h" - ], - [ - 0.0573, - "\u001b[27m\u001b[m\u001b[93m\u001b[107me\u001b[?25l" - ], - [ - 0.013394, - "\u001b[47;84H\u001b[7m\u001b[91me\u001b[52;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.202815, - "\r\u001b[?25l" - ], - [ - 0.008924, - "\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[47;75H\u001b[7m\u001b[33mmust force\u001b[m\u001b[93m\u001b[107m\u001b[51;2H\u001b[1m\u001b[38;5;22m\u001b[48;5;148mNORMAL \u001b[47;75H\u001b[?12l\u001b[?25h" - ], - [ - 1.635195, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[45;51H\u001b[1m\u001b[31m\u001b[106m{\u001b[47;25H}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 70 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[96m// If the user supplied an ID, we cannot delete the image if it is referred to by multiple tags\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:2\u001b[47;25H\u001b[?12l\u001b[?25h" - ], - [ - 0.536079, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[45;51H{\u001b[47;25H}\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:7\u001b[46;75H\u001b[?12l\u001b[?25h" - ], - [ - 0.422312, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[45;51H\u001b[1m\u001b[31m\u001b[106m{\u001b[47;25H}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  53%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:47\u001b[45;51H\u001b[?12l\u001b[?25h" - ], - [ - 7.590339, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{\u001b[47;25H}\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:71\u001b[46;75H\u001b[?12l\u001b[?25h" - ], - [ - 0.524344, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[45;51H\u001b[1m\u001b[31m\u001b[106m{\u001b[47;25H}\u001b[m\u001b[93m\u001b[107m\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:2\u001b[47;25H\u001b[?12l\u001b[?25h" - ], - [ - 0.384796, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[43;28H\u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\u001b[44;51H{\u001b[46;25H}\u001b[47;21H\u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 71 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m libkpodimage.MatchesID(image.ID, id) {\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  55%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:17\u001b[47;21H\u001b[?12l\u001b[?25h" - ], - [ - 1.34616, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[43;28H{\u001b[44;51H\u001b[1m\u001b[31m\u001b[106m{\u001b[46;25H}\u001b[m\u001b[93m\u001b[107m\u001b[47;21H}\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:21\u001b[46;25H\u001b[?12l\u001b[?25h" - ], - [ - 6.08698, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[44;51H{\u001b[46;25H}\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:7\u001b[45;75H\u001b[?12l\u001b[?25h" - ], - [ - 0.759914, - "\u001b[51;210H2\u001b[45;76H" - ], - [ - 0.494462, - "\u001b[51;210H3\u001b[45;77H" - ], - [ - 0.033092, - "\u001b[51;210H4\u001b[45;78H" - ], - [ - 0.027331, - "\u001b[51;210H5\u001b[45;79H" - ], - [ - 0.032653, - "\u001b[51;210H6\u001b[45;80H" - ], - [ - 0.028729, - "\u001b[51;210H7\u001b[45;81H" - ], - [ - 0.034268, - "\u001b[51;210H8\u001b[45;82H" - ], - [ - 0.022711, - "\u001b[51;210H9\u001b[45;83H" - ], - [ - 0.033818, - "\u001b[51;209H80\u001b[45;84H" - ], - [ - 0.033041, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[45;74H\u001b[1m\u001b[31m\u001b[106m(\u001b[10C)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;252m1\u001b[45;85H\u001b[?12l\u001b[?25h" - ], - [ - 0.029548, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[45;74H\u001b[36m(\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[33mm\u001b[m\u001b[93m\u001b[107m\u001b[9C\u001b[36m) \u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;252m2\u001b[45;86H\u001b[?12l\u001b[?25h" - ], - [ - 0.02919, - "\u001b[51;210H3\u001b[45;87H" - ], - [ - 0.030088, - "\u001b[51;210H4\u001b[45;88H" - ], - [ - 0.032142, - "\u001b[51;210H5\u001b[45;89H" - ], - [ - 0.03027, - "\u001b[51;210H6\u001b[45;90H" - ], - [ - 0.030009, - "\u001b[51;210H7\u001b[45;91H" - ], - [ - 0.031925, - "\u001b[51;210H8\u001b[45;92H" - ], - [ - 0.027725, - "\u001b[51;210H9\u001b[45;93H" - ], - [ - 0.031953, - "\u001b[51;209H90\u001b[45;94H" - ], - [ - 0.029383, - "\u001b[51;210H1\u001b[45;95H" - ], - [ - 0.030754, - "\u001b[51;210H2\u001b[45;96H" - ], - [ - 0.03042, - "\u001b[51;210H3\u001b[45;97H" - ], - [ - 0.030576, - "\u001b[51;210H4\u001b[45;98H" - ], - [ - 0.031217, - "\u001b[51;210H5\u001b[45;99H" - ], - [ - 0.031561, - "\u001b[51;210H6\u001b[45;100H" - ], - [ - 0.136981, - "\u001b[51;210H7\u001b[45;101H" - ], - [ - 8.584841, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[44;51H\u001b[1m\u001b[31m\u001b[106m{\u001b[46;25H}\u001b[m\u001b[93m\u001b[107m\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:21\u001b[46;25H\u001b[?12l\u001b[?25h" - ], - [ - 0.501453, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[43;28H\u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\u001b[44;51H{\u001b[46;25H}\u001b[47;21H\u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  55%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:17\u001b[47;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.032681, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[39;61H\u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\u001b[42;28H{\u001b[46;21H}\u001b[47;17H\u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 72 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(image.Names) > \u001b[36m1\u001b[m\u001b[93m\u001b[107m && !force {\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  56%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:13\u001b[47;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.026312, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[38;61H{\u001b[46;17H}\u001b[50;1H\u001b[96m\u001b[47m 73 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m fmt.Errorf(\u001b[36m\"unable to delete \u001b[m\u001b[93m\u001b[107m\u001b[31m%s\u001b[m\u001b[93m\u001b[107m\u001b[36m (\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[33mmust force\u001b[m\u001b[93m\u001b[107m\u001b[36m) - image is referred to in multiple tags\"\u001b[m\u001b[93m\u001b[107m, image.ID)\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  57%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m70\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:97\u001b[47;101H\u001b[?12l\u001b[?25h" - ], - [ - 0.027588, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[50;1H\u001b[96m\u001b[47m 74 \u001b[m\u001b[93m\u001b[107m\u001b[16C}\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  58%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:53\u001b[47;57H\u001b[?12l\u001b[?25h" - ], - [ - 0.034204, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[47;55H\u001b[1m\u001b[31m\u001b[106m{\u001b[49;21H}\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 75 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[96m// If it is forced, we have to untag the image so that it can be deleted\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  59%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:51\u001b[47;55H\u001b[?12l\u001b[?25h" - ], - [ - 0.025136, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[46;55H{\u001b[48;21H}\r\n\r\n\u001b[96m\u001b[47m 76 \u001b[m\u001b[93m\u001b[107m\u001b[16Cimage.Names = image.Names[:\u001b[36m0\u001b[m\u001b[93m\u001b[107m]\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:97\u001b[47;101H\u001b[?12l\u001b[?25h" - ], - [ - 0.037748, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[45;55H\u001b[1m\u001b[31m\u001b[106m{\u001b[47;21H}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 77 \u001b[m\u001b[93m\u001b[107m\u001b[12C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  60%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[47;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.029845, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[44;55H{\u001b[46;21H}\u001b[50;1H\u001b[96m\u001b[47m 78 \u001b[m\u001b[93m\u001b[107m\u001b[16Cname, err2 := libkpodimage.UntagImage(store, image, id)\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  61%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:88\u001b[47;92H\u001b[?12l\u001b[?25h" - ], - [ - 0.029474, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[47;46H\u001b[1m\u001b[31m\u001b[106m[\u001b[2C]\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 79 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err2 != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  62%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:45\u001b[47;49H\u001b[?12l\u001b[?25h" - ], - [ - 0.027846, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[46;46H[:\u001b[1C]\u001b[50;1H\u001b[96m\u001b[47m 80 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  63%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:20\u001b[47;24H\u001b[?12l\u001b[?25h" - ], - [ - 0.028156, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[47;58H\u001b[1m\u001b[31m\u001b[106m(\u001b[16C)\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 81 \u001b[m\u001b[93m\u001b[107m\u001b[16C}\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:71\u001b[47;75H\u001b[?12l\u001b[?25h" - ], - [ - 0.036773, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[46;58H(s\u001b[15C)\u001b[47;36H\u001b[1m\u001b[31m\u001b[106m{\u001b[49;21H}\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 82 \u001b[m\u001b[93m\u001b[107m\u001b[16Cfmt.Printf(\u001b[36m\"untagged: \u001b[m\u001b[93m\u001b[107m\u001b[31m%s\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, name)\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  64%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:32\u001b[47;36H\u001b[?12l\u001b[?25h" - ], - [ - 0.029719, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[46;36H{\u001b[48;21H}\r\n\r\n\u001b[96m\u001b[47m 83 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  65%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m80\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:30\u001b[47;34H\u001b[?12l\u001b[?25h" - ], - [ - 0.032707, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[45;36H\u001b[1m\u001b[31m\u001b[106m{\u001b[47;21H}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 84 \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  66%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:17\u001b[47;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.026852, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[44;36H{\u001b[46;21H}\u001b[47;31H\u001b[1m\u001b[31m\u001b[106m(\u001b[20C)\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 85 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(image.Names) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  67%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:48\u001b[47;52H\u001b[?12l\u001b[?25h" - ], - [ - 0.026473, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[41;24H\u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\u001b[46;31H(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[19C)\u001b[47;17H\u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 86 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mcontinue\u001b[m\u001b[93m\u001b[107m\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:13\u001b[47;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.032368, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[40;24H{\u001b[46;17H}\u001b[50;1H\u001b[96m\u001b[47m 87 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  68%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1 \u001b[47;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.03189, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[47;41H\u001b[1m\u001b[31m\u001b[106m{\u001b[49;17H}\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 88 \u001b[m\u001b[93m\u001b[107m\u001b[12Cid, err := libkpodimage.RemoveImage(image, store)\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  69%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:37\u001b[47;41H\u001b[?12l\u001b[?25h" - ], - [ - 0.036978, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[46;41H{\u001b[48;17H}\r\n\r\n\u001b[96m\u001b[47m 89 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  70%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:24\u001b[47;28H\u001b[?12l\u001b[?25h" - ], - [ - 0.031976, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[45;41H\u001b[1m\u001b[31m\u001b[106m{\u001b[47;17H}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 90 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  71%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:13\u001b[47;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.034896, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[44;41H{\u001b[46;17H}\u001b[47;52H\u001b[1m\u001b[31m\u001b[106m(\u001b[12C)\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 91 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  72%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:61\u001b[47;65H\u001b[?12l\u001b[?25h" - ], - [ - 0.017603, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[46;52H(i\u001b[11C)\u001b[47;31H\u001b[1m\u001b[31m\u001b[106m{\u001b[49;17H}\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 92 \u001b[m\u001b[93m\u001b[107m\u001b[12Cfmt.Printf(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[31m%s\\n\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:27\u001b[47;31H\u001b[?12l\u001b[?25h" - ], - [ - 0.040503, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[46;31H{\u001b[48;17H}\r\n\r\n\u001b[96m\u001b[47m 93 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  73%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m90\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:26\u001b[47;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.028582, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[45;31H\u001b[1m\u001b[31m\u001b[106m{\u001b[47;17H}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 94 \u001b[m\u001b[93m\u001b[107m }\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  74%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:13\u001b[47;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.031483, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[44;31H{\u001b[46;17H}\u001b[47;27H\u001b[1m\u001b[31m\u001b[106m(\u001b[10C)\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 95 \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  75%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:34\u001b[47;38H\u001b[?12l\u001b[?25h" - ], - [ - 0.161023, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[44;31H\u001b[1m\u001b[31m\u001b[106m{\u001b[46;17H}\u001b[m\u001b[93m\u001b[107m\u001b[47;27H(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[9C)\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  74%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:13\u001b[46;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.499146, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[44;31H{\u001b[46;17H}\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  73%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:26\u001b[45;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.027118, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[44;31H\u001b[1m\u001b[31m\u001b[106m{\u001b[46;17H}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  72%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m89\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:27\u001b[44;31H\u001b[?12l\u001b[?25h" - ], - [ - 0.033789, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[43;52H\u001b[1m\u001b[31m\u001b[106m(\u001b[12C)\u001b[m\u001b[93m\u001b[107m\u001b[44;31H{\u001b[46;17H}\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:61\u001b[43;65H\u001b[?12l\u001b[?25h" - ], - [ - 0.026661, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[40;41H\u001b[1m\u001b[31m\u001b[106m{\u001b[42;17H}\u001b[m\u001b[93m\u001b[107m\u001b[43;52H(i\u001b[11C)\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  71%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:13\u001b[42;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.032548, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[40;41H{\u001b[42;17H}\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  70%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:24\u001b[41;28H\u001b[?12l\u001b[?25h" - ], - [ - 0.029278, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[40;41H\u001b[1m\u001b[31m\u001b[106m{\u001b[42;17H}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  69%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:37\u001b[40;41H\u001b[?12l\u001b[?25h" - ], - [ - 0.026258, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{\u001b[42;17H}\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  68%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1 \u001b[39;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.034793, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[32;24H\u001b[1m\u001b[31m\u001b[106m{\u001b[38;17H}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  67%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:13\u001b[38;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.032971, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[32;24H{\u001b[37;31H\u001b[1m\u001b[31m\u001b[106m(\u001b[20C)\u001b[m\u001b[93m\u001b[107m\u001b[38;17H}\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:48\u001b[37;52H\u001b[?12l\u001b[?25h" - ], - [ - 0.029998, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[34;36H\u001b[1m\u001b[31m\u001b[106m{\u001b[36;21H}\u001b[m\u001b[93m\u001b[107m\u001b[37;31H(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[19C)\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  66%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:17\u001b[36;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.030022, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[34;36H{\u001b[36;21H}\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  65%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:30\u001b[35;34H\u001b[?12l\u001b[?25h" - ], - [ - 0.031828, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[34;36H\u001b[1m\u001b[31m\u001b[106m{\u001b[36;21H}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  64%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m79\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:32\u001b[34;36H\u001b[?12l\u001b[?25h" - ], - [ - 0.027492, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[33;58H\u001b[1m\u001b[31m\u001b[106m(\u001b[16C)\u001b[m\u001b[93m\u001b[107m\u001b[34;36H{\u001b[36;21H}\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  63%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:71\u001b[33;75H\u001b[?12l\u001b[?25h" - ], - [ - 0.032435, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[32;24H\u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\u001b[33;58H(s\u001b[15C)\u001b[38;17H\u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:20\u001b[32;24H\u001b[?12l\u001b[?25h" - ], - [ - 0.031073, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[31;46H\u001b[1m\u001b[31m\u001b[106m[\u001b[2C]\u001b[m\u001b[93m\u001b[107m\u001b[32;24H{\u001b[38;17H}\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  62%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:45\u001b[31;49H\u001b[?12l\u001b[?25h" - ], - [ - 0.025565, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\b\b[:\u001b[1C]\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  61%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:88\u001b[30;92H\u001b[?12l\u001b[?25h" - ], - [ - 0.034775, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[27;55H\u001b[1m\u001b[31m\u001b[106m{\u001b[29;21H}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  60%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:17\u001b[29;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.031444, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[27;55H{\u001b[29;21H}\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  59%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[28;101H\u001b[?12l\u001b[?25h" - ], - [ - 0.027046, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[27;55H\u001b[1m\u001b[31m\u001b[106m{\u001b[29;21H}\u001b[m\u001b[93m\u001b[107m\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:51\u001b[27;55H\u001b[?12l\u001b[?25h" - ], - [ - 0.034344, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[26;57H\u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\u001b[27;55H{\u001b[29;21H}\u001b[32;17H\u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  58%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:53\u001b[26;57H\u001b[?12l\u001b[?25h" - ], - [ - 0.032004, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{\u001b[32;17H} \u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  57%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:97\u001b[25;101H\u001b[?12l\u001b[?25h" - ], - [ - 0.031757, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[16;61H\u001b[1m\u001b[31m\u001b[106m{\u001b[24;17H}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  56%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m69\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:13\u001b[24;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.027946, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[16;61H{\u001b[19;28H\u001b[1m\u001b[31m\u001b[106m{\u001b[23;21H}\u001b[m\u001b[93m\u001b[107m\u001b[24;17H}\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  55%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:17\u001b[23;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.032109, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[19;28H{\u001b[20;51H\u001b[1m\u001b[31m\u001b[106m{\u001b[22;25H}\u001b[m\u001b[93m\u001b[107m\u001b[23;21H}\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:21\u001b[22;25H\u001b[?12l\u001b[?25h" - ], - [ - 0.185364, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[20;51H{\u001b[22;25H}\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:97\u001b[21;101H\u001b[?12l\u001b[?25h" - ], - [ - 12.251553, - "\u001b[51;210H8\u001b[21;102H" - ], - [ - 0.640334, - "\u001b[51;210H9\u001b[21;103H" - ], - [ - 0.179298, - "\u001b[m\u001b[93m\u001b[107m\u001b[51;175H\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;181H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;189H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;194H\u001b[38;5;107m\u001b[48;5;240m \u001b[1C54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;201H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:100\u001b[21;104H" - ], - [ - 0.807758, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[34m-- INSERT --" - ], - [ - 0.042962, - "\u001b[m\u001b[93m\u001b[107m\u001b[51;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[51;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[51;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mrmi.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[m\u001b[93m\u001b[107m\u001b[51;49H\u001b[38;5;31m\u001b[48;5;24m\u001b[51;50H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                            \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;181H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;189H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;194H\u001b[38;5;186m\u001b[4" - ], - [ - 2.9e-05, - "8;5;31m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;201H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:100 \u001b[21;104H\u001b[?12l\u001b[?25h" - ], - [ - 0.145919, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;142H\u001b[K\u001b[51;48H\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[51;52H \u001b[m\u001b[93m\u001b[107m\u001b[122C\u001b[38;5;231m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;196H\u001b[38;5;186m\u001b[48;5;31m 54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m6\b 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:99\u001b[21;103H\u001b[?12l\u001b[?25h" - ], - [ - 0.48937, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;141H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[21;102H\u001b[?12l\u001b[?25h" - ], - [ - 0.027128, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36musing its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;140H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[21;101H\u001b[?12l\u001b[?25h" - ], - [ - 0.02875, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\b\u001b[36m% using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;139H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[21;100H\u001b[?12l\u001b[?25h" - ], - [ - 0.031194, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;138H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[21;99H\u001b[?12l\u001b[?25h" - ], - [ - 0.02624, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36musing its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;137H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[21;98H\u001b[?12l\u001b[?25h" - ], - [ - 0.032683, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;136H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[21;97H\u001b[?12l\u001b[?25h" - ], - [ - 0.029086, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;135H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[21;96H\u001b[?12l\u001b[?25h" - ], - [ - 0.030766, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;134H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[21;95H\u001b[?12l\u001b[?25h" - ], - [ - 0.036114, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;133H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m0\u001b[21;94H\u001b[?12l\u001b[?25h" - ], - [ - 0.119662, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;132H\u001b[K\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m89\u001b[21;93H\u001b[?12l\u001b[?25h" - ], - [ - 0.150503, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;131H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[21;92H\u001b[?12l\u001b[?25h" - ], - [ - 0.162647, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;130H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[21;91H\u001b[?12l\u001b[?25h" - ], - [ - 0.143355, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;129H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[21;90H\u001b[?12l\u001b[?25h" - ], - [ - 0.152961, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;128H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[21;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.214499, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mo using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[21;90H\u001b[?12l\u001b[?25h" - ], - [ - 0.118991, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mn using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[21;91H\u001b[?12l\u001b[?25h" - ], - [ - 0.211947, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[21;92H\u001b[?12l\u001b[?25h" - ], - [ - 0.109298, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[21;93H\u001b[?12l\u001b[?25h" - ], - [ - 0.165278, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mo using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m90\u001b[21;94H\u001b[?12l\u001b[?25h" - ], - [ - 0.123492, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mr using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[21;95H\u001b[?12l\u001b[?25h" - ], - [ - 0.096682, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[21;96H\u001b[?12l\u001b[?25h" - ], - [ - 0.08435, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mm using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[21;97H\u001b[?12l\u001b[?25h" - ], - [ - 0.130601, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mo using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[21;98H\u001b[?12l\u001b[?25h" - ], - [ - 0.052001, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mr using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[21;99H\u001b[?12l\u001b[?25h" - ], - [ - 0.075667, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[21;100H\u001b[?12l\u001b[?25h" - ], - [ - 0.06988, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[21;101H\u001b[?12l\u001b[?25h" - ], - [ - 0.107312, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mc using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[21;102H\u001b[?12l\u001b[?25h" - ], - [ - 0.052857, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mo using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[21;103H\u001b[?12l\u001b[?25h" - ], - [ - 0.080952, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mn using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;175H\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;181H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;189H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;194H\u001b[38;5;186m\u001b[48;5;31m \u001b[1C54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;201H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:100\u001b[21;104H\u001b[?12l\u001b[?25h" - ], - [ - 0.106035, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mt using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[21;105H\u001b[?12l\u001b[?25h" - ], - [ - 0.069849, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36ma using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[21;106H\u001b[?12l\u001b[?25h" - ], - [ - 0.080188, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mi using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[21;107H\u001b[?12l\u001b[?25h" - ], - [ - 0.053983, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mn using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[21;108H\u001b[?12l\u001b[?25h" - ], - [ - 0.083169, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[21;109H\u001b[?12l\u001b[?25h" - ], - [ - 0.064025, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mr using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[21;110H\u001b[?12l\u001b[?25h" - ], - [ - 0.114373, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36ms using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[21;111H\u001b[?12l\u001b[?25h" - ], - [ - 0.140141, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[21;112H\u001b[?12l\u001b[?25h" - ], - [ - 0.178511, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36ma using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[21;113H\u001b[?12l\u001b[?25h" - ], - [ - 0.078589, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mr using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m10\u001b[21;114H\u001b[?12l\u001b[?25h" - ], - [ - 0.088307, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[21;115H\u001b[?12l\u001b[?25h" - ], - [ - 0.270773, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K" - ], - [ - 0.007389, - "\u001b[51;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[51;51H\u001b[38;5;240m\u001b[48;5;236m\u001b[51;52H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                          \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;181H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;189H\u001b[38;5;247m\u001b[48;5;236m" - ], - [ - 3.4e-05, - " go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;194H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;201H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:110 \u001b[21;114H\u001b[?12l\u001b[?25h" - ], - [ - 0.232519, - "\u001b[?25l\u001b[52;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.143782, - "w\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.064041, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.737175, - "\r\u001b[?25l\u001b[?2004l" - ], - [ - 0.022609, - "\"cmd/kpod/rmi.go\"" - ], - [ - 0.010737, - " 123L, 3107C written" - ], - [ - 0.01326, - "\r\r\r\n\u001b[39;49m\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.002223, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.022486, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m54s\u001b[39m\r\n" - ], - [ - 0.001213, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 8.7e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000306, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.1e-05, - "\u001b[?1h\u001b=" - ], - [ - 2e-05, - "\u001b[?2004h" - ], - [ - 4.650947, - "s" - ], - [ - 0.114659, - "\bsu" - ], - [ - 0.136564, - "d" - ], - [ - 0.088137, - "o" - ], - [ - 0.111636, - " " - ], - [ - 0.12095, - "m" - ], - [ - 0.271219, - "\b \b" - ], - [ - 0.500424, - "\b" - ], - [ - 0.030636, - "\b \b" - ], - [ - 0.03127, - "\b \b" - ], - [ - 0.031598, - "\b\bs \b" - ], - [ - 0.029362, - "\b \b" - ], - [ - 0.153403, - "m" - ], - [ - 0.127382, - "\bma" - ], - [ - 0.14863, - "k" - ], - [ - 0.131117, - "e" - ], - [ - 0.088373, - " " - ], - [ - 0.064223, - "k" - ], - [ - 0.087523, - "p" - ], - [ - 0.096297, - "o" - ], - [ - 0.119679, - "d" - ], - [ - 0.112054, - "\u001b[?1l\u001b>" - ], - [ - 0.000159, - "\u001b[?2004l\r\r\n" - ], - [ - 0.00324, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 6.788264, - "go build -ldflags '-X main.gitCommit=99495909 -X main.buildInfo=1502916531' -tags \"selinux seccomp \" -o kpod github.com/kubernetes-incubator/cri-o/cmd/kpod\r\n" - ], - [ - 4.283762, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.017721, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m11s\u001b[39m\r\n" - ], - [ - 0.001236, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000128, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.00018, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 3.3e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 8e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.1e-05, - "\u001b[?2004h" - ], - [ - 168.290484, - "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 2.584789, - "g" - ], - [ - 0.198102, - "\bgi" - ], - [ - 0.19998, - "t" - ], - [ - 0.600599, - "\b \b" - ], - [ - 0.127993, - "\b\bg \b" - ], - [ - 0.175966, - "\b \b" - ], - [ - 0.216044, - "s" - ], - [ - 0.112522, - "\bsu" - ], - [ - 0.135935, - "d" - ], - [ - 0.079322, - "o" - ], - [ - 0.14454, - " " - ], - [ - 0.079601, - "m" - ], - [ - 0.128569, - "a" - ], - [ - 0.08771, - "k" - ], - [ - 0.09614, - "e" - ], - [ - 0.095448, - " " - ], - [ - 0.056775, - "i" - ], - [ - 0.063421, - "n" - ], - [ - 0.072685, - "s" - ], - [ - 0.111636, - "t" - ], - [ - 0.080215, - "a" - ], - [ - 0.119661, - "l" - ], - [ - 0.135786, - "l" - ], - [ - 0.296278, - "\u001b[?1l\u001b>" - ], - [ - 8.3e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.00983, - "\u001b]2;sudo make install\u0007\u001b]1;make\u0007" - ], - [ - 0.947581, - "[sudo] password for ryan: " - ], - [ - 2.602557, - "\r\n" - ], - [ - 2.991429, - "install -D -m 755 crio /usr/local/bin/crio\r\n" - ], - [ - 0.053259, - "install -D -m 755 crioctl /usr/local/bin/crioctl\r\n" - ], - [ - 0.030024, - "install -D -m 755 kpod /usr/local/bin/kpod\r\n" - ], - [ - 0.041823, - "install -D -m 755 conmon/conmon /usr/local/libexec/crio/conmon\r\n" - ], - [ - 0.002995, - "install -D -m 755 pause/pause /usr/local/libexec/crio/pause\r\n" - ], - [ - 0.005043, - "install -d -m 755 /usr/local/share/man/man1\r\n" - ], - [ - 0.001982, - "install -d -m 755 /usr/local/share/man/man5\r\n" - ], - [ - 0.000749, - "install -d -m 755 /usr/local/share/man/man8\r\n" - ], - [ - 0.000879, - "install -m 644 docs/kpod-diff.1 docs/kpod-push.1 docs/kpod-cp.1 docs/kpod.1 docs/kpod-export.1 docs/kpod-load.1 docs/kpod-logs.1 docs/kpod-images.1 docs/kpod-umount.1 docs/kpod-save.1 docs/kpod-mount.1 docs/kpod-info.1 docs/kpod-inspect.1 docs/kpod-history.1 docs/kpod-pull.1 docs/kpod-rmi.1 docs/kpod-version.1 docs/kpod-tag.1 -t /usr/local/share/man/man1\r\n" - ], - [ - 0.011056, - "install -m 644 docs/crio.conf.5 -t /usr/local/share/man/man5\r\n" - ], - [ - 0.002986, - "install -m 644 docs/crio.8 -t /usr/local/share/man/man8\r\n" - ], - [ - 0.004783, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.019387, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m7s\u001b[39m\r\n" - ], - [ - 0.002291, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 9.3e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.1e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8.8e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.6e-05, - "\u001b[?2004h" - ], - [ - 15.331424, - "g" - ], - [ - 0.398976, - "\b \b" - ], - [ - 0.536701, - "v" - ], - [ - 0.160554, - "\bvi" - ], - [ - 0.127435, - " " - ], - [ - 1.040078, - "\b" - ], - [ - 0.144093, - "\b\bv \b" - ], - [ - 0.167843, - "\b \b" - ], - [ - 0.448464, - "g" - ], - [ - 0.087315, - "\bgi" - ], - [ - 0.104665, - "t" - ], - [ - 0.119248, - " " - ], - [ - 0.176373, - "r" - ], - [ - 0.151944, - "e" - ], - [ - 0.080321, - "s" - ], - [ - 0.175774, - "e" - ], - [ - 0.120008, - "t" - ], - [ - 0.168118, - " " - ], - [ - 0.647723, - "c" - ], - [ - 0.11246, - "m" - ], - [ - 0.353225, - "\u0007" - ], - [ - 0.0001, - "k" - ], - [ - 0.225653, - "\u0007" - ], - [ - 0.388813, - "\b \b" - ], - [ - 0.152155, - "d" - ], - [ - 0.225421, - "\u0007" - ], - [ - 0.142048, - "m" - ], - [ - 0.448625, - "\b \b" - ], - [ - 0.248141, - "/" - ], - [ - 0.303478, - "m" - ], - [ - 0.415244, - "\b \b" - ], - [ - 0.501387, - "\b \b" - ], - [ - 0.030592, - "\b \b" - ], - [ - 0.031729, - "\b \b" - ], - [ - 0.276247, - "\b \b" - ], - [ - 0.296245, - "H" - ], - [ - 0.111636, - "E" - ], - [ - 0.128194, - "A" - ], - [ - 0.160328, - "D" - ], - [ - 0.216523, - " " - ], - [ - 0.160304, - "m" - ], - [ - 0.159999, - "e" - ], - [ - 0.22375, - "\b \b" - ], - [ - 0.159675, - "\b \b" - ], - [ - 0.088274, - "c" - ], - [ - 0.111635, - "m" - ], - [ - 0.178731, - "\u0007" - ], - [ - 0.05329, - "k" - ], - [ - 0.196815, - "\u0007" - ], - [ - 0.282938, - "\b \b" - ], - [ - 0.136923, - "d" - ], - [ - 0.239219, - "/" - ], - [ - 0.368537, - "c" - ], - [ - 0.319505, - "\b \b" - ], - [ - 0.232433, - "k" - ], - [ - 0.504033, - "p" - ], - [ - 0.176414, - "o" - ], - [ - 0.126785, - "d" - ], - [ - 0.864534, - "/" - ], - [ - 0.155176, - "r" - ], - [ - 0.109177, - "m" - ], - [ - 0.119347, - "i" - ], - [ - 0.24468, - "." - ], - [ - 0.195895, - "g" - ], - [ - 0.087415, - "o" - ], - [ - 1.588076, - "\u001b[?1l\u001b>" - ], - [ - 0.000131, - "\u001b[?2004l\r\r\n" - ], - [ - 0.003239, - "\u001b]2;git reset HEAD cmd/kpod/rmi.go\u0007\u001b]1;git\u0007" - ], - [ - 0.005443, - "Unstaged changes after reset:\r\nM\tcmd/kpod/images.go\r\n" - ], - [ - 0.000165, - "M\tcmd/kpod/rmi.go\r\n" - ], - [ - 0.010191, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.023696, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001223, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000118, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000101, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.3e-05, - "\u001b[?2004h" - ], - [ - 117.912011, - "v" - ], - [ - 0.072168, - "\bvi" - ], - [ - 0.13525, - " " - ], - [ - 0.224662, - "m" - ], - [ - 0.447667, - "\b \b" - ], - [ - 0.119783, - "c" - ], - [ - 0.096089, - "m" - ], - [ - 0.146517, - "d\u001b[1m/\u001b[0m" - ], - [ - 0.10283, - "\b\u001b[0m/k" - ], - [ - 0.110527, - "pod\u001b[1m/\u001b[0m" - ], - [ - 0.48823, - "\b\u001b[0m/r" - ], - [ - 0.176379, - "m" - ], - [ - 0.10899, - "i.go\u001b[1m \u001b[0m" - ], - [ - 0.387068, - "\b\u001b[0m \b" - ], - [ - 0.000493, - "\u001b[?1l\u001b>\u001b[?2004l\r\r\n" - ], - [ - 0.004413, - "\u001b]2;vim cmd/kpod/rmi.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.139925, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000574, - "\u001b[1;54r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[54;1H\"cmd/kpod/rmi.go\"" - ], - [ - 0.000165, - " 123L, 3107C" - ], - [ - 0.009214, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.003104, - "\u001b[1;1H\u001b[96m\u001b[47m 18 \u001b[m\u001b[93m\u001b[107m\u001b[8C},\r\n\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m rmiCommand = cli.Command{\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m\u001b[8CName:\u001b[8C\u001b[36m\"rmi\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m\u001b[8CUsage:\u001b[7C\u001b[36m\"removes one or more images from local storage\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m\u001b[8CDescription: rmiDescription,\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[8CAction: rmiCmd,\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m\u001b[8CArgsUsage: \u001b[36m\"IMAGE-NAME-OR-ID [...]\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m\u001b[8CFlags:\u001b[7CrmiFlags,\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 28 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 29 \r\n 30 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m rmiCmd(c *cli.Context) \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 31 \r\n 32 \u001b[m\u001b[93m\u001b[107m force := \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m c.IsSet(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m\u001b[8Cforce = c.Bool(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m\u001b[107m " - ], - [ - 1.9e-05, - "}\r\n\u001b[96m\u001b[47m 36 \r\n 37 \u001b[m\u001b[93m\u001b[107m args := c.Args()\r\n\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(args) == \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Errorf(\u001b[36m\"image name or ID must be specified\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 41 \r\n 42 \u001b[m\u001b[93m\u001b[107m config, err := getConfig(c)\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Could not get config\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m store, err := getStore(config)\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 50 \r\n 51 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, id := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m args {\r\n\u001b[96m\u001b[47m 52 " - ], - [ - 0.032096, - "\u001b[m\u001b[93m\u001b[107m\u001b[8Cimage, err := libkpodimage.FindImage(store, id)\r\n\u001b[96m\u001b[47m 53 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 54 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"could not get image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 55 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 56 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m image != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 57 \u001b[m\u001b[93m\u001b[107m\u001b[12CctrIDs, err := runningContainers(image, store)\r\n\u001b[96m\u001b[47m 58 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 59 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"error getting running containers for image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 60 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 61 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(ctrIDs) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m && \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(image.Names) <= \u001b[36m1\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 62" - ], - [ - 9.1e-05, - " \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m force {\r\n\u001b[96m\u001b[47m 63 \u001b[m\u001b[93m\u001b[107m\u001b[20CremoveContainers(ctrIDs, store)\r\n\u001b[96m\u001b[47m 64 \u001b[m\u001b[93m\u001b[107m\u001b[16C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 65 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mfor\u001b[m\u001b[93m\u001b[107m ctrID := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\r\n\u001b[96m\u001b[47m 66 \u001b[m\u001b[93m\u001b[107m\u001b[24C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m fmt.Errorf(\u001b[36m\"Could not remove image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m (\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[33mmust force\u001b[m\u001b[93m\u001b[107m\u001b[36m) - one or more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\r\n\u001b[96m\u001b[47m 67 \u001b[m\u001b[93m\u001b[107m\u001b[20C}\r\n\u001b[96m\u001b[47m 68 \u001b[m\u001b[93m\u001b[107m\u001b[16C}\r\n\u001b[96m\u001b[47m 69 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;220m\u001b[48;5;240m M \u001b[m\u001b[93m" - ], - [ - 0.00914, - "\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;52H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                           \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:25 \u001b[49;29H\u001b[?12l\u001b[?25h\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 1.315626, - "\u001b[?25l\u001b[53;210H6\u001b[49;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.490663, - "\u001b[53;210H7\u001b[49;31H" - ], - [ - 0.024219, - "\u001b[53;210H8\u001b[49;32H" - ], - [ - 0.030955, - "\u001b[53;210H9\u001b[49;33H" - ], - [ - 0.03195, - "\u001b[53;209H30\u001b[49;34H" - ], - [ - 0.028457, - "\u001b[53;210H1\u001b[49;35H" - ], - [ - 0.030479, - "\u001b[53;210H2\u001b[49;36H" - ], - [ - 0.031626, - "\u001b[53;210H3\u001b[49;37H" - ], - [ - 0.032067, - "\u001b[53;210H4\u001b[49;38H" - ], - [ - 0.026593, - "\u001b[53;210H5\u001b[49;39H" - ], - [ - 0.033566, - "\u001b[53;210H6\u001b[49;40H" - ], - [ - 0.027309, - "\u001b[53;210H7\u001b[49;41H" - ], - [ - 0.03294, - "\u001b[53;210H8\u001b[49;42H" - ], - [ - 0.030662, - "\u001b[53;210H9\u001b[49;43H" - ], - [ - 0.031394, - "\u001b[53;209H40\u001b[49;44H" - ], - [ - 0.032204, - "\u001b[53;210H1\u001b[49;45H" - ], - [ - 0.033628, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mf\u001b[1m\u001b[31m\u001b[106m(\u001b[106C)\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m2\u001b[49;46H\u001b[?12l\u001b[?25h" - ], - [ - 0.026953, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[105C)\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m3\u001b[49;47H\u001b[?12l\u001b[?25h" - ], - [ - 0.031767, - "\u001b[53;210H4\u001b[49;48H" - ], - [ - 0.026243, - "\u001b[53;210H5\u001b[49;49H" - ], - [ - 0.032594, - "\u001b[53;210H6\u001b[49;50H" - ], - [ - 0.02764, - "\u001b[53;210H7\u001b[49;51H" - ], - [ - 0.034964, - "\u001b[53;210H8\u001b[49;52H" - ], - [ - 0.026422, - "\u001b[53;210H9\u001b[49;53H" - ], - [ - 0.032607, - "\u001b[53;209H50\u001b[49;54H" - ], - [ - 0.028623, - "\u001b[53;210H1\u001b[49;55H" - ], - [ - 0.028244, - "\u001b[53;210H2\u001b[49;56H" - ], - [ - 0.033991, - "\u001b[53;210H3\u001b[49;57H" - ], - [ - 0.032985, - "\u001b[53;210H4\u001b[49;58H" - ], - [ - 0.027615, - "\u001b[53;210H5\u001b[49;59H" - ], - [ - 0.028609, - "\u001b[53;210H6\u001b[49;60H" - ], - [ - 0.02978, - "\u001b[53;210H7\u001b[49;61H" - ], - [ - 0.030592, - "\u001b[53;210H8\u001b[49;62H" - ], - [ - 0.030011, - "\u001b[53;210H9\u001b[49;63H" - ], - [ - 0.031518, - "\u001b[53;209H60\u001b[49;64H" - ], - [ - 0.030297, - "\u001b[53;210H1\u001b[49;65H" - ], - [ - 0.029334, - "\u001b[53;210H2\u001b[49;66H" - ], - [ - 0.031119, - "\u001b[53;210H3\u001b[49;67H" - ], - [ - 0.027634, - "\u001b[53;210H4\u001b[49;68H" - ], - [ - 0.031088, - "\u001b[53;210H5\u001b[49;69H" - ], - [ - 0.03047, - "\u001b[53;210H6\u001b[49;70H" - ], - [ - 0.03291, - "\u001b[53;210H7\u001b[49;71H" - ], - [ - 0.029259, - "\u001b[53;210H8\u001b[49;72H" - ], - [ - 0.034775, - "\u001b[53;210H9\u001b[49;73H" - ], - [ - 0.026392, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[1m\u001b[31m\u001b[106m(\u001b[10C)\u001b[m\u001b[93m\u001b[107m\u001b[53;209H\u001b[38;5;22m\u001b[48;5;252m70\u001b[49;74H\u001b[?12l\u001b[?25h" - ], - [ - 0.031136, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36m(\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[33mm\u001b[m\u001b[93m\u001b[107m\u001b[9C\u001b[36m) \u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m1\u001b[49;75H\u001b[?12l\u001b[?25h" - ], - [ - 0.02956, - "\u001b[53;210H2\u001b[49;76H" - ], - [ - 0.029258, - "\u001b[53;210H3\u001b[49;77H" - ], - [ - 0.032617, - "\u001b[53;210H4\u001b[49;78H" - ], - [ - 0.02963, - "\u001b[53;210H5\u001b[49;79H" - ], - [ - 0.027516, - "\u001b[53;210H6\u001b[49;80H" - ], - [ - 0.031034, - "\u001b[53;210H7\u001b[49;81H" - ], - [ - 0.0348, - "\u001b[53;210H8\u001b[49;82H" - ], - [ - 0.026686, - "\u001b[53;210H9\u001b[49;83H" - ], - [ - 0.031977, - "\u001b[53;209H80\u001b[49;84H" - ], - [ - 0.032521, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[49;74H\u001b[1m\u001b[31m\u001b[106m(\u001b[10C)\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m1\u001b[49;85H\u001b[?12l\u001b[?25h" - ], - [ - 0.027602, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[49;74H\u001b[36m(\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[33mm\u001b[m\u001b[93m\u001b[107m\u001b[9C\u001b[36m) \u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m2\u001b[49;86H\u001b[?12l\u001b[?25h" - ], - [ - 0.144764, - "\u001b[53;210H3\u001b[49;87H" - ], - [ - 0.167503, - "\u001b[53;210H4\u001b[49;88H" - ], - [ - 0.165071, - "\u001b[53;210H5\u001b[49;89H" - ], - [ - 0.254279, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mne or more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;153H\u001b[K\u001b[53;50H\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;54H \u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.501147, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me or more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;152H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.019148, - "\u001b[?25l\u001b[36m or more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;151H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.032617, - "\u001b[?25l\u001b[36mor more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;150H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.031692, - "\u001b[?25l\u001b[36mr more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;149H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.030868, - "\u001b[?25l\u001b[36m more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;148H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.03056, - "\u001b[?25l\u001b[36mmore containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;147H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.03168, - "\u001b[?25l\u001b[36more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;146H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.027805, - "\u001b[?25l\u001b[36mre containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;145H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.031902, - "\u001b[?25l\u001b[36me containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;144H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.283191, - "\u001b[?25l\u001b[36m containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;143H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.178515, - "\u001b[?25l\u001b[36mcontainers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;142H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.38466, - "\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m6\u001b[49;90H" - ], - [ - 0.501961, - "\u001b[53;210H7\u001b[49;91H" - ], - [ - 0.025949, - "\u001b[53;210H8\u001b[49;92H" - ], - [ - 0.029349, - "\u001b[53;210H9\u001b[49;93H" - ], - [ - 0.031181, - "\u001b[53;209H90\u001b[49;94H" - ], - [ - 0.030544, - "\u001b[53;210H1\u001b[49;95H" - ], - [ - 0.030634, - "\u001b[53;210H2\u001b[49;96H" - ], - [ - 0.030484, - "\u001b[53;210H3\u001b[49;97H" - ], - [ - 0.033104, - "\u001b[53;210H4\u001b[49;98H" - ], - [ - 0.024166, - "\u001b[53;210H5\u001b[49;99H" - ], - [ - 0.032752, - "\u001b[53;210H6\u001b[49;100H" - ], - [ - 0.030921, - "\u001b[53;210H7\u001b[49;101H" - ], - [ - 0.517314, - "\u001b[53;210H6\u001b[49;100H" - ], - [ - 3.312677, - "\u001b[53;210H7\u001b[49;101H" - ], - [ - 0.50269, - "\u001b[53;210H8\u001b[49;102H" - ], - [ - 0.030313, - "\u001b[53;210H9\u001b[49;103H" - ], - [ - 0.024339, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;175H\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;181H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;189H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;194H\u001b[38;5;107m\u001b[48;5;240m \u001b[1C54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;201H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:100\u001b[49;104H" - ], - [ - 0.033592, - "\u001b[53;210H1\u001b[49;105H" - ], - [ - 0.031721, - "\u001b[53;210H2\u001b[49;106H" - ], - [ - 0.02682, - "\u001b[53;210H3\u001b[49;107H" - ], - [ - 0.03135, - "\u001b[53;210H4\u001b[49;108H" - ], - [ - 0.029141, - "\u001b[53;210H5\u001b[49;109H" - ], - [ - 0.03261, - "\u001b[53;210H6\u001b[49;110H" - ], - [ - 0.028367, - "\u001b[53;210H7\u001b[49;111H" - ], - [ - 0.028863, - "\u001b[53;210H8\u001b[49;112H" - ], - [ - 0.032551, - "\u001b[53;210H9\u001b[49;113H" - ], - [ - 0.02896, - "\u001b[53;209H10\u001b[49;114H" - ], - [ - 0.029171, - "\u001b[53;210H1\u001b[49;115H" - ], - [ - 0.032167, - "\u001b[53;210H2\u001b[49;116H" - ], - [ - 0.029809, - "\u001b[53;210H3\u001b[49;117H" - ], - [ - 0.025445, - "\u001b[53;210H4\u001b[49;118H" - ], - [ - 0.020289, - "\u001b[53;210H3\u001b[49;117H" - ], - [ - 0.501457, - "\u001b[53;210H2\u001b[49;116H" - ], - [ - 0.033753, - "\u001b[53;210H1\u001b[49;115H" - ], - [ - 0.030371, - "\u001b[53;210H0\u001b[49;114H" - ], - [ - 0.026993, - "\u001b[53;209H09\u001b[49;113H" - ], - [ - 0.030967, - "\u001b[53;210H8\u001b[49;112H" - ], - [ - 0.026829, - "\u001b[53;210H7\u001b[49;111H" - ], - [ - 0.032439, - "\u001b[53;210H6\u001b[49;110H" - ], - [ - 0.03057, - "\u001b[53;210H5\u001b[49;109H" - ], - [ - 0.031109, - "\u001b[53;210H4\u001b[49;108H" - ], - [ - 0.032679, - "\u001b[53;210H3\u001b[49;107H" - ], - [ - 0.033994, - "\u001b[53;210H2\u001b[49;106H" - ], - [ - 0.025439, - "\u001b[53;210H1\u001b[49;105H" - ], - [ - 0.032603, - "\u001b[53;210H0\u001b[49;104H" - ], - [ - 0.028393, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;175H\u001b[38;5;231m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;196H\u001b[38;5;107m\u001b[48;5;240m 54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\b 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:99\u001b[49;103H" - ], - [ - 0.136603, - "\u001b[53;210H8\u001b[49;102H" - ], - [ - 0.1836, - "\u001b[53;210H7\u001b[49;101H" - ], - [ - 0.185079, - "\u001b[53;210H6\u001b[49;100H" - ], - [ - 0.383613, - "\u001b[53;210H5\u001b[49;99H" - ], - [ - 0.161001, - "\u001b[53;210H4\u001b[49;98H" - ], - [ - 0.669446, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36m are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;141H\u001b[K\u001b[49;98H\u001b[?12l\u001b[?25h" - ], - [ - 0.586565, - "\u001b[?25l\u001b[54;1H\u001b[34m-- INSERT --\u001b[m\u001b[93m\u001b[107m\u001b[54;13H\u001b[K" - ], - [ - 0.043535, - "\u001b[53;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[53;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[53;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[53;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;220m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;220m\u001b[48;5;31m M\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[m\u001b[93m\u001b[107m\u001b[53;53H\u001b[38;5;31m\u001b[48;5;24m\u001b[53;54H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                         \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[" - ], - [ - 2.9e-05, - "38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;186m\u001b[48;5;31m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:94 \u001b[49;98H\u001b[?12l\u001b[?25h" - ], - [ - 0.189046, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[49;99H\u001b[?12l\u001b[?25h" - ], - [ - 1.150929, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m are using its reference image\u001b[36m\", id, ctrID)\u001b[50;5H }\u001b[51;5H }\u001b[52;5H }\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[49;100H\u001b[?12l\u001b[?25h" - ], - [ - 0.222856, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m% are using its reference image\u001b[36m\", id, ctrID)\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[49;101H\u001b[?12l\u001b[?25h" - ], - [ - 0.508774, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mq are using its reference image\u001b[36m\", id, ctrID)\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[49;102H\u001b[?12l\u001b[?25h" - ], - [ - 0.429575, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36m\" are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[50;5H }\u001b[51;5H }\u001b[52;5H }\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[49;103H\u001b[?12l\u001b[?25h" - ], - [ - 0.293573, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[53;175H\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;181H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;189H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;194H\u001b[38;5;186m\u001b[48;5;31m \u001b[1C54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[53;201H\u001b[38;5;24m\u001b[48;5;117m \u001b[53;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:100\u001b[49;104H\u001b[?12l\u001b[?25h" - ], - [ - 1.664414, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[K" - ], - [ - 0.007242, - "\u001b[53;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[53;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;220m\u001b[48;5;240m M\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[53;53H\u001b[38;5;240m\u001b[48;5;236m\u001b[53;54H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                         \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[10" - ], - [ - 3.7e-05, - "7m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m6\u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:99 \u001b[49;103H\u001b[?12l\u001b[?25h" - ], - [ - 0.497524, - "\u001b[?25l\u001b[54;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.127272, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.320956, - "!\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 1.072551, - "\r" - ], - [ - 0.015046, - "\u001b[?25l\u001b[?2004l\u001b[54;1H\u001b[K\u001b[54;1H\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.002419, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.020555, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m22s\u001b[39m\r\n" - ], - [ - 0.001416, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 7.6e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 3.1e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000124, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.1e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.2e-05, - "\u001b[?2004h" - ], - [ - 1.279799, - "g" - ], - [ - 0.167996, - "\bgi" - ], - [ - 0.128101, - "t" - ], - [ - 0.21601, - " " - ], - [ - 0.416115, - "s" - ], - [ - 0.11937, - "t" - ], - [ - 0.160387, - "a" - ], - [ - 0.128023, - "t" - ], - [ - 0.168159, - "u" - ], - [ - 0.127468, - "s" - ], - [ - 0.136345, - "\u001b[?1l\u001b>" - ], - [ - 0.000192, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004674, - "\u001b]2;git status\u0007\u001b]1;git\u0007" - ], - [ - 0.017951, - "On branch kpod-format-table\r\nChanges not staged for commit:\r\n (use \"git add ...\" to update what will be committed)\r\n (use \"git checkout -- ...\" to discard changes in working directory)\r\n\r\n" - ], - [ - 3.6e-05, - "\t\u001b[31mmodified: cmd/kpod/images.go\u001b[m\r\n\t\u001b[31mmodified: cmd/kpod/rmi.go\u001b[m\r\n\r\nno changes added to commit (use \"git add\" and/or \"git commit -a\")\r\n" - ], - [ - 0.000309, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024514, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001388, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000102, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.7e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 7.7e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.1e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.3e-05, - "\u001b[?2004h" - ], - [ - 10.906, - "e" - ], - [ - 0.233197, - "\bex" - ], - [ - 0.342482, - "\b\be \b" - ], - [ - 0.189249, - "\b \b" - ], - [ - 56220.893544, - "\u001b[?1l\u001b>" - ], - [ - 4.1e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.001035, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.020391, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m56232s\u001b[39m\r\n" - ], - [ - 0.000897, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000111, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.9e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000461, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 1.5e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 5.1e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.9e-05, - "\u001b[?2004h" - ], - [ - 5.901847, - "g" - ], - [ - 0.160227, - "\bgi" - ], - [ - 0.104667, - "t" - ], - [ - 0.055222, - " " - ], - [ - 0.127953, - "d" - ], - [ - 0.11279, - "i" - ], - [ - 0.078928, - "f" - ], - [ - 0.153039, - "f" - ], - [ - 0.1441, - "\u001b[?1l\u001b>" - ], - [ - 0.000244, - "\u001b[?2004l\r\r\n" - ], - [ - 0.005357, - "\u001b]2;git diff\u0007\u001b]1;git\u0007" - ], - [ - 0.005163, - "\u001b[?1049h\u001b[?1h\u001b=\r" - ], - [ - 0.003303, - "\u001b[1mdiff --git a/cmd/kpod/images.go b/cmd/kpod/images.go\u001b[m\u001b[m\r\n\u001b[1mindex 593c2b27..d6f2bda1 100644\u001b[m\u001b[m\r\n\u001b[1m--- a/cmd/kpod/images.go\u001b[m\u001b[m\r\n\u001b[1m+++ b/cmd/kpod/images.go\u001b[m\u001b[m\r\n\u001b[36m@@ -120,7 +120,7 @@\u001b[m \u001b[mfunc genImagesFormat(quiet, truncate, digests bool) (format string) {\u001b[m\u001b[m\r\n format += \"{{ .Name | printf \\\"%-56s\\\" }} \"\u001b[m\u001b[m\r\n \u001b[m\u001b[m\r\n if digests {\u001b[m\u001b[m\r\n\u001b[31m- format += \"{{ .DIGEST | printf \\\"%-71s \\\"}} \"\u001b[m\u001b[m\r\n\u001b[32m+\u001b[m \u001b[32mformat += \"{{ .Digest | printf \\\"%-71s \\\"}} \"\u001b[m\u001b[m\r\n }\u001b[m\u001b[m\r\n \u001b[m\u001b[m\r\n format += \"{{ .CreatedAt | printf \\\"%-22s\\\" }} {{.Size}}\"\u001b[m\u001b[m\r\n\u001b[1mdiff --git a/cmd/kpod/rmi.go b/cmd/kpod/rmi.go\u001b[m\u001b[m\r\n\u001b[1mindex c7752fc1..a8da7da6 100644\u001b[m\u001b[m\r\n\u001b[1m--- a/cmd/kpod/rmi.go\u001b[m\u001b[m\r\n\u001b[1m+++ b/cmd/kpod/rmi.go\u001b[m\u001b[m\r\n\u001b[36m@@ -63,7 +63,7 @@\u001b[m \u001b[mfunc rmiCmd(c *cli.Context) error {\u001b[m\u001b[m\r\n removeContainers(ctrIDs, store)\u001b[m\u001b[m\r\n } else {\u001b[m\u001b[m\r\n " - ], - [ - 2.9e-05, - " " - ], - [ - 0.000128, - " for ctrID := range ctrIDs {\u001b[m\u001b[m\r\n\u001b[31m- return fmt.Errorf(\"Could not remove image %q (must force) - container %q is using its reference image\", id, ctrID)\u001b[m\u001b[m\r\n\u001b[32m+\u001b[m \u001b[32mreturn fmt.Errorf(\"Could not remove image %q (must force) - one or more containers are using its reference image\", id, ctrID)\u001b[m\u001b[m\r\n }\u001b[m\u001b[m\r\n }\u001b[m\u001b[m\r\n }\u001b[m\u001b[m\r\n" - ], - [ - 2.2e-05, - "\u001b[7m(END)\u001b[27m\u001b[K" - ], - [ - 3.085748, - "\r\u001b[K\u001b[?1l\u001b>\u001b[?1049l" - ], - [ - 0.001753, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.026198, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.00165, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000111, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.8e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 9e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.8e-05, - "\u001b[?1h\u001b=" - ], - [ - 4.6e-05, - "\u001b[?2004h" - ], - [ - 0.198452, - "g" - ], - [ - 0.095541, - "\bgi" - ], - [ - 0.120141, - "t" - ], - [ - 0.087239, - " " - ], - [ - 0.295807, - "r" - ], - [ - 0.124741, - "e" - ], - [ - 0.107908, - "s" - ], - [ - 0.136109, - "e" - ], - [ - 0.099662, - "t" - ], - [ - 0.123811, - " " - ], - [ - 0.112331, - "-" - ], - [ - 0.143709, - "-" - ], - [ - 0.220177, - "h" - ], - [ - 0.084927, - "a" - ], - [ - 0.050956, - "r" - ], - [ - 0.116765, - "d" - ], - [ - 0.042884, - " " - ], - [ - 0.239721, - "H" - ], - [ - 0.068192, - "E" - ], - [ - 0.148337, - "A" - ], - [ - 0.280701, - "D\u001b[1m \u001b[0m" - ], - [ - 0.689919, - "\b\u001b[0m \b\u001b[?1l\u001b>\u001b[?2004l\r\r\n" - ], - [ - 0.004057, - "\u001b]2;git reset --hard HEAD\u0007\u001b]1;git\u0007" - ], - [ - 0.023549, - "HEAD is now at 99495909 Make kpod images use text/template by default\r\n" - ], - [ - 0.000466, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.027615, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001079, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 9.4e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.6e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8.1e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.7e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.3e-05, - "\u001b[?2004h" - ], - [ - 0.377121, - "v" - ], - [ - 0.083308, - "\bvi" - ], - [ - 0.079468, - " " - ], - [ - 0.080413, - "c" - ], - [ - 0.088276, - "m" - ], - [ - 0.144748, - "d\u001b[1m/\u001b[0m" - ], - [ - 0.074972, - "\b\u001b[0m/k" - ], - [ - 0.152342, - "pod\u001b[1m/\u001b[0m" - ], - [ - 0.396108, - "\b\u001b[0m/i" - ], - [ - 0.079857, - "m" - ], - [ - 0.074579, - "ages.go\u001b[1m \u001b[0m" - ], - [ - 0.548619, - "\b\u001b[0m \b" - ], - [ - 0.000101, - "\u001b[?1l\u001b>" - ], - [ - 0.000148, - "\u001b[?2004l\r\r\n" - ], - [ - 0.001987, - "\u001b]2;vim cmd/kpod/images.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.161796, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000788, - "\u001b[1;54r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[54;1H\"cmd/kpod/images.go\"" - ], - [ - 7.9e-05, - " 203L, 4796C" - ], - [ - 0.011442, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.008169, - "\u001b[1;1H\u001b[96m\u001b[47m152 \u001b[m\u001b[93m\u001b[107m\u001b[12CName: name,\r\n\u001b[96m\u001b[47m153 \u001b[m\u001b[93m\u001b[107m\u001b[12CDigest: imageDigest,\r\n\u001b[96m\u001b[47m154 \u001b[m\u001b[93m\u001b[107m\u001b[12CCreatedAt: createdTime.Format(\u001b[36m\"Jan 2, 2006 15:04\"\u001b[m\u001b[93m\u001b[107m),\r\n\u001b[96m\u001b[47m155 \u001b[m\u001b[93m\u001b[107m\u001b[12CSize: libkpodimage.FormattedSize(size),\r\n\u001b[96m\u001b[47m156 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m157 \u001b[m\u001b[93m\u001b[107m\u001b[8CimageOutput = \u001b[32mappend\u001b[m\u001b[93m\u001b[107m(imageOutput, params)\r\n\u001b[96m\u001b[47m158 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m159 \r\n160 \u001b[m\u001b[93m\u001b[107m \u001b[32mvar\u001b[m\u001b[93m\u001b[107m out formats.Writer\r\n\u001b[96m\u001b[47m161 \r\n162 \u001b[m\u001b[93m\u001b[107m \u001b[32mswitch\u001b[m\u001b[93m\u001b[107m outputFormat {\r\n\u001b[96m\u001b[47m163 \u001b[m\u001b[93m\u001b[107m \u001b[32mcase\u001b[m\u001b[93m\u001b[107m \u001b[36m\"json\"\u001b[m\u001b[93m\u001b[107m:\r\n\u001b[96m\u001b[47m164 \u001b[m\u001b[93m\u001b[107m\u001b[8Cout = formats.JSONstruct{Output: toGeneric(imageOutput)}\r\n\u001b[96m\u001b[47m165 \u001b[m\u001b[93m\u001b[107m \u001b[32mdefault\u001b[m\u001b[93m\u001b[107m:\r\n\u001b[96m\u001b[47m166 \u001b[m\u001b[93m\u001b[107m\u001b[8Cout = formats.StdoutTemplate{Output: toGeneric(imageOutput), Template: outputFormat, Fields: imageOutput[\u001b[36m0\u001b[m\u001b[93m\u001b[107m].header" - ], - [ - 3.1e-05, - "Map()}\r\n\u001b[96m\u001b[47m167 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m168 \r\n169 \u001b[m\u001b[93m\u001b[107m formats.Writer(out).Out()\r\n\u001b[96m\u001b[47m170 \r\n171 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m172 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m173 \r\n174 \u001b[m\u001b[93m\u001b[107m\u001b[32mtype\u001b[m\u001b[93m\u001b[107m imageOutputParams \u001b[32mstruct\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m175 \u001b[m\u001b[93m\u001b[107m ID\u001b[8C\u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[36m`json:\"id\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m176 \u001b[m\u001b[93m\u001b[107m Name \u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[36m`json:\"names\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m177 \u001b[m\u001b[93m\u001b[107m Digest digest.Digest \u001b[36m`json:\"digest\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m178 \u001b[m\u001b[93m\u001b[107m CreatedAt \u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[36m`json:\"created\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m179 \u001b[m\u001b[93m\u001b[107m Size \u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[36m`json:\"size\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m180 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m181 \r\n182 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m toGeneric(params []imageOutputParams) []\u001b[32minterface\u001b[m\u001b[93m\u001b[107m{} {\r\n" - ], - [ - 0.034447, - "\u001b[96m\u001b[47m183 \u001b[m\u001b[93m\u001b[107m genericParams := \u001b[32mmake\u001b[m\u001b[93m\u001b[107m([]\u001b[32minterface\u001b[m\u001b[93m\u001b[107m{}, \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(params))\r\n\u001b[96m\u001b[47m184 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m i, v := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m params {\r\n\u001b[96m\u001b[47m185 \u001b[m\u001b[93m\u001b[107m\u001b[8CgenericParams[i] = \u001b[32minterface\u001b[m\u001b[93m\u001b[107m{}(v)\r\n\u001b[96m\u001b[47m186 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m187 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m genericParams\r\n\u001b[96m\u001b[47m188 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m189 \r\n190 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (i *imageOutputParams) headerMap() \u001b[33mmap\u001b[m\u001b[93m\u001b[107m[\u001b[33mstring\u001b[m\u001b[93m\u001b[107m]\u001b[33mstring\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m191 \u001b[m\u001b[93m\u001b[107m v := reflect.Indirect(reflect.ValueOf(i))\r\n\u001b[96m\u001b[47m192 \u001b[m\u001b[93m\u001b[107m values := \u001b[32mmake\u001b[m\u001b[93m\u001b[107m(\u001b[33mmap\u001b[m\u001b[93m\u001b[107m[\u001b[33mstring\u001b[m\u001b[93m\u001b[107m]\u001b[33mstring\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m193 \r\n194 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m i := \u001b[36m0\u001b[m\u001b[93m\u001b[107m; i < v.NumField(); i++ {\r\n\u001b[96m\u001b[47m195 \u001b[m\u001b[93m\u001b[107m\u001b[8Ckey := v.Type().Field(i).Na" - ], - [ - 5.7e-05, - "me\r\n\u001b[96m\u001b[47m196 \u001b[m\u001b[93m\u001b[107m\u001b[8Cvalue := key\r\n\u001b[96m\u001b[47m197 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m value == \u001b[36m\"ID\"\u001b[m\u001b[93m\u001b[107m || value == \u001b[36m\"Name\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m198 \u001b[m\u001b[93m\u001b[107m\u001b[12Cvalue = \u001b[36m\"Image\"\u001b[m\u001b[93m\u001b[107m + value\r\n\u001b[96m\u001b[47m199 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m200 \u001b[m\u001b[93m\u001b[107m\u001b[8Cvalues[key] = fmt.Sprintf(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[31m%s\u001b[m\u001b[93m\u001b[107m\u001b[36m \"\u001b[m\u001b[93m\u001b[107m, strings.ToUpper(splitCamelCase(value)))\r\n\u001b[96m\u001b[47m201 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m202 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m values\r\n\u001b[96m\u001b[47m203 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mimages.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;53H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m               " - ], - [ - 0.01263, - "                                                                                                           \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  87%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m177\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5  \u001b[26;9H\u001b[?12l\u001b[?25h" - ], - [ - 2.3e-05, - "\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 0.07219, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[K\u001b[54;1H/" - ], - [ - 6.3e-05, - "\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.376099, - "D" - ], - [ - 8.8e-05, - "\u001b[?25l" - ], - [ - 0.008132, - "\u001b[26;26H\u001b[7m\u001b[91mD\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[53;2H\u001b[1m\u001b[38;5;22m\u001b[48;5;148mCOMMND \u001b[m\u001b[93m\u001b[107m\u001b[200C\u001b[38;5;22m\u001b[48;5;252m22\r\n\u001b[m\u001b[93m\u001b[107m/D" - ], - [ - 6.3e-05, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.127597, - "I" - ], - [ - 6e-05, - "\u001b[?25l" - ], - [ - 0.001462, - "\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[1;1H\u001b[96m\u001b[47m 98 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 99 \u001b[m\u001b[93m\u001b[107m } \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m100 \u001b[m\u001b[93m\u001b[107m\u001b[8Cparams = \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m101 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m102 \r\n103 \u001b[m\u001b[93m\u001b[107m imageList, err := libkpodimage.GetImagesMatchingFilter(store, params, name)\r\n\u001b[96m\u001b[47m104 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m105 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"could not get list of images matching filter\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m106 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m107 \r\n108 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m outputImages(store, imageList, truncate, digests, quiet, outputFormat, noheading)\r\n\u001b[96m\u001b[47m109 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m110 \r\n111 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m genImagesFormat(quiet, truncate, digests \u001b[33mbool\u001b[m\u001b[93m\u001b[107m) (format \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m112 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m quiet {\r\n\u001b[96m\u001b[" - ], - [ - 1.7e-05, - "47m113 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36m\"{{.ID}}\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m114 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m115 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m truncate {\r\n\u001b[96m\u001b[47m116 \u001b[m\u001b[93m\u001b[107m\u001b[8Cformat = \u001b[36m\"table {{ .ID | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-20.12s\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m }} \"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m117 \u001b[m\u001b[93m\u001b[107m } \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m118 \u001b[m\u001b[93m\u001b[107m\u001b[8Cformat = \u001b[36m\"table {{ .ID | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-64s\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m }} \"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m119 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m120 \u001b[m\u001b[93m\u001b[107m format += \u001b[36m\"{{ .Name | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-56s\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m }} \"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m121 \r\n122 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m digests {\r\n\u001b[96m\u001b[47m123 \u001b[m\u001b[93m\u001b[107m\u001b[8Cformat += \u001b[36m\"{{ .\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[91mDI\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[36mGEST | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m}} \"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m124 \u001b[m\u001b[93m\u001b[107m }\r\n" - ], - [ - 0.006692, - "\u001b[96m\u001b[47m125 \r\n126 \u001b[m\u001b[93m\u001b[107m format += \u001b[36m\"{{ .CreatedAt | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-22s\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m }} {{.Size}}\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m127 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m128 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m129 \r\n130 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m outputImages(store storage.Store, images []storage.Image, truncate, digests, quiet \u001b[33mbool\u001b[m\u001b[93m\u001b[107m, outputFormat \u001b[33mstring\u001b[m\u001b[93m\u001b[107m, noheading \u001b[33mbool\u001b[m\u001b[93m\u001b[107m) \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m131 \u001b[m\u001b[93m\u001b[107m imageOutput := []imageOutputParams{}\r\n\u001b[96m\u001b[47m132 \r\n133 \u001b[m\u001b[93m\u001b[107m lastID := \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m134 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, img := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m images {\r\n\u001b[96m\u001b[47m135 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m quiet && lastID == img.ID {\r\n\u001b[96m\u001b[47m136 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mcontinue\u001b[m\u001b[93m\u001b[107m \u001b[96m// quiet should not show the same ID multiple times\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m137 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m138 \u001b[m\u001b[" - ], - [ - 3e-05, - "93m\u001b[107m\u001b[8CcreatedTime := img.Created\r\n\u001b[96m\u001b[47m139 \r\n140 \u001b[m\u001b[93m\u001b[107m\u001b[8Cname := \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m141 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(img.Names) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m142 \u001b[m\u001b[93m\u001b[107m\u001b[12Cname = img.Names[\u001b[36m0\u001b[m\u001b[93m\u001b[107m]\r\n\u001b[96m\u001b[47m143 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m144 \r\n145 \u001b[m\u001b[93m\u001b[107m\u001b[8Cinfo, imageDigest, size, _ := libkpodimage.InfoAndDigestAndSize(store, img)\r\n\u001b[96m\u001b[47m146 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m info != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m147 \u001b[m\u001b[93m\u001b[107m\u001b[12CcreatedTime = info.Created\r\n\u001b[96m\u001b[47m148 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m149 \u001b[m\u001b[93m\u001b[107m\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m COMMND \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mimages.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;53H" - ], - [ - 6.1e-05, - " \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                          \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  61%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m123\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:24 \u001b[m\u001b[93m\u001b[107m\u001b[54;1H/DI\u001b[?12l\u001b[?25h" - ], - [ - 0.104778, - "G\u001b[?25l" - ], - [ - 0.004039, - "\u001b[26;30H\u001b[7m\u001b[91mG\u001b[54;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.202824, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mE\u001b[?25l" - ], - [ - 0.005173, - "\u001b[26;31H\u001b[7m\u001b[91mE\u001b[54;6H\u001b[?12l\u001b[?25h" - ], - [ - 0.122869, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mS\u001b[?25l" - ], - [ - 0.005278, - "\u001b[26;32H\u001b[7m\u001b[91mS\u001b[54;7H\u001b[?12l\u001b[?25h" - ], - [ - 0.090915, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mT\u001b[?25l" - ], - [ - 0.005887, - "\u001b[26;33H\u001b[7m\u001b[91mT\u001b[54;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.154865, - "\r\u001b[?25l\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31msearch hit BOTTOM, continuing at TOP" - ], - [ - 0.001958, - "\u001b[m\u001b[93m\u001b[107m\u001b[26;28H\u001b[7m\u001b[33mDIGEST" - ], - [ - 0.0027, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;2H\u001b[1m\u001b[38;5;22m\u001b[48;5;148mNORMAL \u001b[26;28H\u001b[?12l\u001b[?25h" - ], - [ - 0.713596, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m5\u001b[26;29H" - ], - [ - 0.44085, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36mDGEST | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m}} \"\u001b[m\u001b[93m\u001b[107m\u001b[26;57H\u001b[K\u001b[53;51H\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;55H \u001b[26;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.166708, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mEST | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m}} \"\u001b[m\u001b[93m\u001b[107m\u001b[26;56H\u001b[K\u001b[26;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.169515, - "\u001b[?25l\u001b[36mST | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m}} \"\u001b[m\u001b[93m\u001b[107m\u001b[26;55H\u001b[K\u001b[26;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.174197, - "\u001b[?25l\u001b[36mT | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m}} \"\u001b[m\u001b[93m\u001b[107m\u001b[26;54H\u001b[K\u001b[26;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.401145, - "\u001b[?25l\u001b[36m | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m}} \"\u001b[m\u001b[93m\u001b[107m\u001b[26;53H\u001b[K\u001b[26;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.176906, - "\u001b[?25l\u001b[54;1H\u001b[34m--\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31ma\u001b[m\u001b[93m\u001b[107m\b\u001b[34m INSERT\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31mt\u001b[m\u001b[93m\u001b[107m\b\u001b[34m --\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31mO\u001b[m\u001b[93m\u001b[107m\u001b[54;13H\u001b[K" - ], - [ - 0.042519, - "\u001b[53;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[53;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[53;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[53;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mimages.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[m\u001b[93m\u001b[107m\u001b[53;54H\u001b[38;5;31m\u001b[48;5;24m\u001b[53;55H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                        \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[" - ], - [ - 3.8e-05, - "107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;186m\u001b[48;5;31m  61%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m123\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:25 \u001b[26;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.163842, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mg | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m} \"\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[26;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.172451, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m} \"\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[26;31H\u001b[?12l\u001b[?25h" - ], - [ - 0.331326, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m}} \"\u001b[m\u001b[93m\u001b[107m\u001b[26;54H\u001b[K\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[26;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.13664, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m}} \"\u001b[m\u001b[93m\u001b[107m\u001b[26;53H\u001b[K\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[26;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.177579, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mi | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m} \"\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[26;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.174307, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mg | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m} \"\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[26;31H\u001b[?12l\u001b[?25h" - ], - [ - 0.131684, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m} \"\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[26;32H\u001b[?12l\u001b[?25h" - ], - [ - 0.086181, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36ms | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m} \"\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[26;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.037497, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mt | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m} \"\u001b[m\u001b[93m\u001b[107m\u001b[53;209H\u001b[38;5;22m\u001b[48;5;117m30\u001b[26;34H\u001b[?12l\u001b[?25h" - ], - [ - 0.329898, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[K" - ], - [ - 0.005721, - "\u001b[53;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[53;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mimages.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[53;54H\u001b[38;5;240m\u001b[48;5;236m\u001b[53;55H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                        \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m" - ], - [ - 2.8e-05, - " go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  61%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m123\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:29 \u001b[26;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.282806, - "\u001b[?25l\u001b[54;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.320108, - "w" - ], - [ - 0.00016, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.09557, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.080523, - "\r\u001b[?25l\u001b[?2004l" - ], - [ - 0.015569, - "\"cmd/kpod/images.go\"" - ], - [ - 0.005889, - " 203L, 4796C written" - ], - [ - 0.015946, - "\r\r\r\n\u001b[39;49m\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.003478, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.034812, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m7s\u001b[39m\r\n" - ], - [ - 0.00239, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 9.2e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000169, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=" - ], - [ - 3.6e-05, - "\u001b[?2004h" - ], - [ - 0.906176, - "g" - ], - [ - 0.11065, - "\bgi" - ], - [ - 0.064415, - "t" - ], - [ - 0.143259, - " " - ], - [ - 0.080894, - "c" - ], - [ - 0.048032, - "o" - ], - [ - 0.103375, - "m" - ], - [ - 0.337522, - "i" - ], - [ - 0.287202, - "\b \b" - ], - [ - 0.191689, - "m" - ], - [ - 0.127496, - "i" - ], - [ - 0.096447, - "t" - ], - [ - 0.055022, - " " - ], - [ - 0.128991, - "-" - ], - [ - 0.136055, - "a" - ], - [ - 0.119988, - " " - ], - [ - 0.135881, - "-" - ], - [ - 0.159957, - "-" - ], - [ - 0.120583, - "a" - ], - [ - 0.087513, - "m" - ], - [ - 0.111928, - "e" - ], - [ - 0.104033, - "n" - ], - [ - 0.088352, - "d" - ], - [ - 0.127489, - "\u001b[?1l\u001b>" - ], - [ - 0.0001, - "\u001b[?2004l\r\r\n" - ], - [ - 0.002622, - "\u001b]2;git commit -a --amend\u0007\u001b]1;git\u0007" - ], - [ - 0.026245, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000861, - "\u001b[1;54r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[H\u001b[2J\u001b[?25l\u001b[54;1H\"~/Development/Go/src/github.com/kubernetes-incubator/cri-o/.git/COMMIT_EDITMSG\"" - ], - [ - 0.000158, - " 15L, 455C" - ], - [ - 0.00016, - "\u001b[1;1HMake kpod images use text/template by default\r\n\r\nSigned-off-by: Ryan Cole \r\n\r\n# Please enter the commit message for your changes. Lines starting\r\n# with '#' will be ignored, and an empty message aborts the commit.\r\n#\r\n# Date: Tue Aug 15 22:23:40 2017 -0400\r\n#\r\n# On branch kpod-format-table\r\n# Changes to be committed:\r\n#\u001b[7Cmodified: cmd/kpod/formats/formats.go\r\n#\u001b[7Cmodified: cmd/kpod/formats/templates.go\r\n#\u001b[7Cmodified: cmd/kpod/images.go\r\n#\r\n\u001b[94m~ \u001b[17;1H~ \u001b[18;1H~ " - ], - [ - 2e-05, - " \u001b[19;1H~ \u001b[20;1H~ \u001b[21;1H~ \u001b[22;1H~ \u001b[23;1H~ " - ], - [ - 5.1e-05, - " \u001b[24;1H~ \u001b[25;1H~ \u001b[26;1H~ \u001b[27;1H~ " - ], - [ - 1.5e-05, - " \u001b[28;1H~ \u001b[29;1H~ \u001b[30;1H~ \u001b[31;1H~ \u001b[32;1H~ " - ], - [ - 5e-05, - " \u001b[33;1H~ \u001b[34;1H~ \u001b[35;1H~ \u001b[36;1H~ \u001b[37;1H~ " - ], - [ - 1.5e-05, - " \u001b[38;1H~ \u001b[39;1H~ \u001b[40;1H~ \u001b[41;1H~ " - ], - [ - 7.1e-05, - " \u001b[42;1H~ \u001b[43;1H~ \u001b[44;1H~ \u001b[45;1H~ \u001b[46;1H~ " - ], - [ - 1.6e-05, - " \u001b[47;1H~ \u001b[48;1H~ \u001b[49;1H~ \u001b[50;1H~ \u001b[51;1H~ " - ], - [ - 1.6e-05, - " \u001b[52;1H~ \u001b[53;1H~ \u001b[1;1H\u001b[?12l\u001b[?25h" - ], - [ - 0.297423, - "\u001b[?25l\u001b[m\u001b[54;1H\u001b[K\u001b[54;1H:\u001b[?2004h" - ], - [ - 0.000258, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.143378, - "w" - ], - [ - 0.072145, - "q" - ], - [ - 0.095864, - "\r\u001b[?25l\u001b[?2004l" - ], - [ - 6e-05, - "\".git/COMMIT_EDITMSG\"" - ], - [ - 0.0116, - " 15L, 455C written\r\r\r\n\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.002475, - "[kpod-format-table ba07bfb9] Make kpod images use text/template by default\r\n Date: Tue Aug 15 22:23:40 2017 -0400\r\n" - ], - [ - 0.000465, - " 3 files changed, 36 insertions(+), 61 deletions(-)\r\n" - ], - [ - 0.000403, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.026137, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001205, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000102, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000115, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 2.9e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 2e-05, - "\u001b[?2004h" - ], - [ - 14.9661, - "g" - ], - [ - 0.136155, - "\bgi" - ], - [ - 0.079381, - "t" - ], - [ - 0.080921, - " " - ], - [ - 0.047606, - "p" - ], - [ - 0.09957, - "u" - ], - [ - 0.13683, - "s" - ], - [ - 0.083539, - "h" - ], - [ - 0.111539, - " " - ], - [ - 0.11253, - "-" - ], - [ - 0.159311, - "f" - ], - [ - 0.132344, - " " - ], - [ - 0.132514, - "o" - ], - [ - 0.144066, - "r" - ], - [ - 0.143563, - "i" - ], - [ - 0.155326, - "g" - ], - [ - 0.128262, - "i" - ], - [ - 0.112524, - "n" - ], - [ - 0.035327, - " " - ], - [ - 0.184803, - "k" - ], - [ - 0.120956, - "pod-" - ], - [ - 0.139448, - "f" - ], - [ - 0.127973, - "o" - ], - [ - 0.134993, - "rmat-table\u001b[1m \u001b[0m" - ], - [ - 0.600821, - "\b\u001b[0m \b\u001b[?1l\u001b>\u001b[?2004l\r\r\n" - ], - [ - 0.002119, - "\u001b]2;git push -f origin kpod-format-table\u0007\u001b]1;git\u0007" - ], - [ - 1.180562, - "Counting objects: 8, done.\r\n" - ], - [ - 0.000119, - "Delta compression using up to 4 threads.\r\n" - ], - [ - 4.1e-05, - "Compressing objects: 12% (1/8) \r" - ], - [ - 9.2e-05, - "Compressing objects: 25% (2/8) \r" - ], - [ - 7.4e-05, - "Compressing objects: 37% (3/8) \r" - ], - [ - 0.000131, - "Compressing objects: 50% (4/8) \r" - ], - [ - 1.4e-05, - "Compressing objects: 62% (5/8) \r" - ], - [ - 3.3e-05, - "Compressing objects: 75% (6/8) \r" - ], - [ - 2.2e-05, - "Compressing objects: 87% (7/8) \r" - ], - [ - 3.6e-05, - "Compressing objects: 100% (8/8) \r" - ], - [ - 4e-05, - "Compressing objects: 100% (8/8), done.\r\n" - ], - [ - 0.000193, - "Writing objects: 12% (1/8) \r" - ], - [ - 4.3e-05, - "Writing objects: 25% (2/8) \r" - ], - [ - 4.2e-05, - "Writing objects: 37% (3/8) \r" - ], - [ - 4.4e-05, - "Writing objects: 50% (4/8) \r" - ], - [ - 6.2e-05, - "Writing objects: 62% (5/8) \r" - ], - [ - 4e-05, - "Writing objects: 75% (6/8) \r" - ], - [ - 3.9e-05, - "Writing objects: 87% (7/8) \r" - ], - [ - 7.2e-05, - "Writing objects: 100% (8/8) \r" - ], - [ - 2.3e-05, - "Writing objects: 100% (8/8), 1.02 KiB | 1.02 MiB/s, done.\r\nTotal 8 (delta 6), reused 0 (delta 0)\r\n" - ], - [ - 0.089402, - "remote: Resolving deltas: 0% (0/6) \u001b[K\r" - ], - [ - 0.036283, - "remote: Resolving deltas: 16% (1/6) \u001b[K\rremote: Resolving deltas: 33% (2/6) \u001b[K\rremote: Resolving deltas: 50% (3/6) \u001b[K\rremote: Resolving deltas: 66% (4/6) \u001b[K\rremote: Resolving deltas: 83% (5/6) \u001b[K\rremote: Resolving deltas: 100% (6/6) \u001b[K\rremote: Resolving deltas: 100% (6/6), completed with 6 local objects.\u001b[K\r\n" - ], - [ - 1.955619, - "To github.com:14rcole/cri-o\r\n + 99495909...ba07bfb9 kpod-format-table -> kpod-format-table (forced update" - ], - [ - 7.3e-05, - ")\r\n" - ], - [ - 0.001606, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.029351, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001026, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 8.9e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.1e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000163, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 2e-05, - "\u001b[?2004h" - ], - [ - 330.270968, - "v" - ], - [ - 0.119343, - "\bvi" - ], - [ - 0.103605, - " " - ], - [ - 0.088799, - "t" - ], - [ - 0.080179, - "e" - ], - [ - 0.198952, - "st\u001b[1m/\u001b[0m" - ], - [ - 0.353296, - "\b\u001b[0m \b" - ], - [ - 0.499203, - "\b \b" - ], - [ - 0.02974, - "\b \b" - ], - [ - 0.031437, - "\b \b" - ], - [ - 0.029177, - "\b \b" - ], - [ - 0.030945, - "\b" - ], - [ - 0.029122, - "\b\bv \b" - ], - [ - 0.029942, - "\b \b" - ], - [ - 0.136039, - "g" - ], - [ - 0.071775, - "\bgi" - ], - [ - 0.112776, - "t" - ], - [ - 0.063244, - " " - ], - [ - 0.144384, - "c" - ], - [ - 0.055871, - "h" - ], - [ - 0.144103, - "e" - ], - [ - 0.080216, - "c" - ], - [ - 0.07079, - "k" - ], - [ - 0.112014, - "o" - ], - [ - 0.064547, - "u" - ], - [ - 0.080139, - "t" - ], - [ - 0.095908, - " " - ], - [ - 0.104077, - "k" - ], - [ - 0.138478, - "pod-" - ], - [ - 0.117535, - "s" - ], - [ - 0.128338, - "t" - ], - [ - 0.259432, - "a" - ], - [ - 0.522987, - "\u0007" - ], - [ - 0.000167, - "\r\r\n" - ], - [ - 8.2e-05, - "\u001b[J\u001b[0mkpod-start \u001b[Jkpod-stats\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\u001b[39m\r\u001b[2Cgit checkout kpod-sta\u001b[K\u001b[187C\u001b[90m\u001b[39m\u001b[39m\u001b[187D" - ], - [ - 0.225726, - "t" - ], - [ - 0.239644, - "s\u001b[1m \u001b[0m" - ], - [ - 0.327637, - "\b\u001b[0m \b" - ], - [ - 0.000136, - "\u001b[?1l\u001b>" - ], - [ - 0.000445, - "\u001b[?2004l\r\r\n\u001b[J" - ], - [ - 0.004978, - "\u001b]2;git checkout kpod-stats\u0007\u001b]1;git\u0007" - ], - [ - 0.041451, - "Switched to branch 'kpod-stats'\r\n" - ], - [ - 6.6e-05, - "Your branch is up-to-date with 'origin/kpod-stats'.\r\n" - ], - [ - 0.000587, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.035434, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001297, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 7.4e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.1e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000329, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.2e-05, - "\u001b[?2004h" - ], - [ - 2.822658, - "v" - ], - [ - 0.107877, - "\bvi" - ], - [ - 0.28439, - " " - ], - [ - 0.083967, - "l" - ], - [ - 0.064538, - "i" - ], - [ - 0.100026, - "b" - ], - [ - 0.167219, - "kpod\u001b[1m/\u001b[0m" - ], - [ - 0.572222, - "\b\u001b[0m/c" - ], - [ - 0.09244, - "o" - ], - [ - 0.118682, - "\u0007" - ], - [ - 0.000448, - "\r\r\n" - ], - [ - 0.000186, - "\u001b[J\u001b[38;5;33mcommon\u001b[0m/ \u001b[Jconfig.go \u001b[Jcontainer_data.go \u001b[Jcontainer.go \u001b[Jcontainer_server.go\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\u001b[39m\r\u001b[2Cvi libkpod/co\u001b[K\u001b[195C\u001b[90m\u001b[39m\u001b[39m\u001b[195D" - ], - [ - 0.632962, - "t" - ], - [ - 0.209392, - "\u0007\r\r\n\u001b[J\u001b[A\u001b[16C" - ], - [ - 0.694432, - "\b \b" - ], - [ - 0.159374, - "n" - ], - [ - 0.086549, - "\u0007" - ], - [ - 0.000211, - "\r\r\n\u001b[J" - ], - [ - 9.9e-05, - "\u001b[J\u001b[0mconfig.go \u001b[Jcontainer_data.go \u001b[Jcontainer.go \u001b[Jcontainer_server.go\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\u001b[39m\r\u001b[2Cvi libkpod/con\u001b[K\u001b[194C\u001b[90m\u001b[39m\u001b[39m\u001b[194D" - ], - [ - 0.481713, - "t" - ], - [ - 0.111125, - "a" - ], - [ - 0.180888, - "\r\r\n\u001b[J\u001b[A\u001b[18Ciner" - ], - [ - 0.492502, - "_" - ], - [ - 0.206735, - "s" - ], - [ - 0.024184, - "e" - ], - [ - 0.325519, - "rver.go\u001b[1m \u001b[0m" - ], - [ - 0.3706, - "\b\u001b[0m \b" - ], - [ - 0.0002, - "\u001b[?1l\u001b>\u001b[?2004l\r\r\n\u001b[J" - ], - [ - 0.00447, - "\u001b]2;vim libkpod/container_server.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.135276, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000662, - "\u001b[1;54r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[54;1H\"libkpod/container_server.go\"" - ], - [ - 0.00017, - " 684L, 20532C" - ], - [ - 0.008173, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.025784, - "\u001b[1;1H\u001b[96m\u001b[47m586 \u001b[m\u001b[93m\u001b[107m \u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\r\n\u001b[96m\u001b[47m587 \u001b[m\u001b[93m\u001b[107m sbID := ctr.Sandbox()\r\n\u001b[96m\u001b[47m588 \u001b[m\u001b[93m\u001b[107m sb := c.state.sandboxes[sbID]\r\n\u001b[96m\u001b[47m589 \u001b[m\u001b[93m\u001b[107m sb.RemoveContainer(ctr)\r\n\u001b[96m\u001b[47m590 \u001b[m\u001b[93m\u001b[107m c.state.containers.Delete(ctr.ID())\r\n\u001b[96m\u001b[47m591 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m592 \r\n593 \u001b[m\u001b[93m\u001b[107m\u001b[96m// listContainers returns a list of all containers stored by the server state\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m594 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) listContainers() []*oci.Container {\r\n\u001b[96m\u001b[47m595 \u001b[m\u001b[93m\u001b[107m c.stateLock.Lock()\r\n\u001b[96m\u001b[47m596 \u001b[m\u001b[93m\u001b[107m \u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\r\n\u001b[96m\u001b[47m597 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m c.state.containers.List()\r\n\u001b[96m\u001b[47m598 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m599 \r\n600 \u001b[m\u001b[93m\u001b[107m\u001b[96m// ListContainers returns a list of all containers stored by the server state that match the given filter function\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b" - ], - [ - 2.9e-05, - "[47m601 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) ListContainers(filters ...\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m(*oci.Container) \u001b[33mbool\u001b[m\u001b[93m\u001b[107m) ([]*oci.Container, \u001b[33merror\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m602 \u001b[m\u001b[93m\u001b[107m containers := c.listContainers()\r\n\u001b[96m\u001b[47m603 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(filters) == \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m604 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m containers, \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m605 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m606 \u001b[m\u001b[93m\u001b[107m filteredContainers := \u001b[32mmake\u001b[m\u001b[93m\u001b[107m([]*oci.Container, \u001b[36m0\u001b[m\u001b[93m\u001b[107m, \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(containers))\r\n\u001b[96m\u001b[47m607 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, container := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m containers {\r\n\u001b[96m\u001b[47m608 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, filter := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m filters {\r\n\u001b[96m\u001b[47m609 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter(container) \u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m610 \u001b[m" - ], - [ - 0.027894, - "\u001b[93m\u001b[107m\u001b[16CfilteredContainers = \u001b[32mappend\u001b[m\u001b[93m\u001b[107m(filteredContainers, container)\r\n\u001b[96m\u001b[47m611 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m612 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m613 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m614 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m filteredContainers, \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m615 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m616 \r\n617 \u001b[m\u001b[93m\u001b[107m\u001b[96m// AddSandbox adds a sandbox to the sandbox state store\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m618 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) AddSandbox(sb *sandbox.Sandbox) {\r\n\u001b[96m\u001b[47m619 \u001b[m\u001b[93m\u001b[107m c.stateLock.Lock()\r\n\u001b[96m\u001b[47m620 \u001b[m\u001b[93m\u001b[107m \u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\r\n\u001b[96m\u001b[47m621 \u001b[m\u001b[93m\u001b[107m c.state.sandboxes[sb.ID()] = sb\r\n\u001b[96m\u001b[47m622 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m623 \r\n624 \u001b[m\u001b[93m\u001b[107m\u001b[96m// GetSandbox returns a sandbox by its ID\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m625 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) GetSandbox(id \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) *sandb" - ], - [ - 2.9e-05, - "ox.Sandbox {\r\n\u001b[96m\u001b[47m626 \u001b[m\u001b[93m\u001b[107m c.stateLock.Lock()\r\n\u001b[96m\u001b[47m627 \u001b[m\u001b[93m\u001b[107m \u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\r\n\u001b[96m\u001b[47m628 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m c.state.sandboxes[id]\r\n\u001b[96m\u001b[47m629 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m630 \r\n631 \u001b[m\u001b[93m\u001b[107m\u001b[96m// GetSandboxContainer returns a sandbox's infra container\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m632 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) GetSandboxContainer(id \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) *oci.Container {\r\n\u001b[96m\u001b[47m633 \u001b[m\u001b[93m\u001b[107m c.stateLock.Lock()\r\n\u001b[96m\u001b[47m634 \u001b[m\u001b[93m\u001b[107m \u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\r\n\u001b[96m\u001b[47m635 \u001b[m\u001b[93m\u001b[107m sb, ok := c.state.sandboxes[id]\r\n\u001b[96m\u001b[47m636 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m !ok {\r\n\u001b[96m\u001b[47m637 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H kpod-stats \u001b[m\u001b[93m\u001b[107m" - ], - [ - 0.00922, - "\u001b[38;5;245m\u001b[48;5;240m\u001b[53;25H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mlibkpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mcontainer_server.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;55H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                        \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  89%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m611\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:13 \u001b[26;17H\u001b[?12l\u001b[?25h" - ], - [ - 2e-05, - "\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 3.500046, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[K\u001b[54;1H/\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.32149, - "L\u001b[?25l" - ], - [ - 0.014673, - "\u001b[34;16H\u001b[7m\u001b[91mL\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[53;2H\u001b[1m\u001b[38;5;22m\u001b[48;5;148mCOMMND \u001b[m\u001b[93m\u001b[107m\u001b[186C\u001b[38;5;247m\u001b[48;5;240m  90%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:12\r\n\u001b[m\u001b[93m\u001b[107m/L\u001b[?12l\u001b[?25h" - ], - [ - 0.16782, - "i\u001b[?25l" - ], - [ - 0.004265, - "\u001b[1;1H\u001b[96m\u001b[47m609\u001b[m\u001b[93m\u001b[107m\u001b[5C \u001b[32mif\u001b[m\u001b[93m\u001b[107m filter(container) \u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m610\u001b[m\u001b[93m\u001b[107m\u001b[5C filteredContainers = \u001b[32mappend\u001b[m\u001b[93m\u001b[107m(filteredContainers, container)\r\n\u001b[96m\u001b[47m611\u001b[m\u001b[93m\u001b[107m\u001b[5C \u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\u001b[3;18H\u001b[K\u001b[4;1H\u001b[96m\u001b[47m612\u001b[m\u001b[93m\u001b[107m\u001b[5C }\u001b[4;14H\u001b[K\u001b[5;1H\u001b[96m\u001b[47m613\u001b[m\u001b[93m\u001b[107m\u001b[5C}\u001b[5;10H\u001b[K\u001b[6;1H\u001b[96m\u001b[47m614\u001b[m\u001b[93m\u001b[107m\u001b[1C \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m filteredContainers, \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m615\u001b[m\u001b[93m\u001b[107m\u001b[1C}\r\n\u001b[96m\u001b[47m616\u001b[m\u001b[93m\u001b[107m\u001b[8;5H\u001b[K\u001b[9;1H\u001b[96m\u001b[47m617\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// AddSandbox adds a sandbox to the sandbox state store\u001b[m\u001b[93m\u001b[107m\u001b[9;60H\u001b[K\u001b[10;1H\u001b[96m\u001b[47m618\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) AddSandbox(sb *sandbox.Sandbox) {\r\n\u001b[96m\u001b[47m619\u001b[m\u001b[93m\u001b[107m\u001b[5Cc.stateLock.Lock()\u001b[11;27H\u001b[K\u001b[12;1H\u001b[96m\u001b[47m620\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mdefe\u001b[m\u001b[93m\u001b[107m\u001b[1C c.stateLock.Unlock()\u001b[12;35H\u001b[K\u001b[13;1H\u001b[96m\u001b[47m621\u001b[m\u001b[93m\u001b" - ], - [ - 5.4e-05, - "[107m\u001b[1C c.state.sandboxes[sb.ID()] = sb\r\n\u001b[96m\u001b[47m622\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[15;2H\u001b[96m\u001b[47m23\u001b[m\u001b[93m\u001b[107m\u001b[15;5H\u001b[K\u001b[16;2H\u001b[96m\u001b[47m24\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// GetSandbox returns a sandbox by its ID\u001b[m\u001b[93m\u001b[107m\u001b[16;46H\u001b[K\u001b[17;2H\u001b[96m\u001b[47m25\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) GetSandbox(id \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) *sandbox.Sandbox {\u001b[18;2H\u001b[96m\u001b[47m26\u001b[m\u001b[93m\u001b[107m\u001b[5Cc.stateLock.Lock()\u001b[18;28H\u001b[K\u001b[19;2H\u001b[96m\u001b[47m27\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mdefe\u001b[m\u001b[93m\u001b[107m\u001b[1C c.stateLock.Unlock()\u001b[20;2H\u001b[96m\u001b[47m28\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m c.state.sandboxes[id]\u001b[21;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[21;9H\u001b[K\u001b[22;2H\u001b[96m\u001b[47m30\u001b[m\u001b[93m\u001b[107m\u001b[22;9H\u001b[K\u001b[23;2H\u001b[96m\u001b[47m31\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// GetSandboxContainer returns a sandbox's infra container\u001b[m\u001b[93m\u001b[107m\u001b[24;2H\u001b[96m\u001b[47m32\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) GetSandboxContainer(id \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) *oci.Container {\u001b[25;2H\u001b[96m\u001b[47m33\u001b[m\u001b[93m\u001b[107m\u001b[5Cc.stateLock.Lock()" - ], - [ - 0.003162, - "\u001b[25;27H\u001b[K\u001b[26;2H\u001b[96m\u001b[47m34\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\u001b[27;2H\u001b[96m\u001b[47m35\u001b[m\u001b[93m\u001b[107m\u001b[5Csb, ok := c.state.sandboxes[id]\u001b[28;2H\u001b[96m\u001b[47m36\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mif\u001b[m\u001b[93m\u001b[107m !ok {\u001b[29;2H\u001b[96m\u001b[47m37\u001b[m\u001b[93m\u001b[107m\u001b[5C \u001b[1C\u001b[32meturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\u001b[29;23H\u001b[K\u001b[30;2H\u001b[96m\u001b[47m38\u001b[m\u001b[93m\u001b[107m\u001b[1C }\u001b[31;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m sb.InfraContainer()\u001b[32;2H\u001b[96m\u001b[47m40\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[32;6H\u001b[K\u001b[33;2H\u001b[96m\u001b[47m41\u001b[m\u001b[93m\u001b[107m\u001b[33;5H\u001b[K\u001b[34;2H\u001b[96m\u001b[47m42\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// HasSandbox checks if a sandbox exists in the state\u001b[m\u001b[93m\u001b[107m\u001b[35;2H\u001b[96m\u001b[47m43\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) HasSandbox(id \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) \u001b[33mbool\u001b[m\u001b[93m\u001b[107m {\u001b[36;2H\u001b[96m\u001b[47m44\u001b[m\u001b[93m\u001b[107m\u001b[12CLock.Lock()\u001b[36;27H\u001b[K\u001b[37;2H\u001b[96m\u001b[47m45\u001b[m\u001b[93m\u001b[107m\u001b[1C \u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\u001b[38;2H\u001b[96m\u001b[47m46\u001b[m\u001b[93m\u001b[107m\u001b[5C_, ok := c.state.sandboxes[id]\u001b[39;2H\u001b[96" - ], - [ - 6.1e-05, - "m\u001b[47m47\u001b[m\u001b[93m\u001b[107m\u001b[1C \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m ok\u001b[39;18H\u001b[K\u001b[40;2H\u001b[96m\u001b[47m48\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[40;6H\u001b[K\u001b[41;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[41;9H\u001b[K\u001b[42;2H\u001b[96m\u001b[47m50\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// RemoveSandbox removes a sandbox from the state store\u001b[m\u001b[93m\u001b[107m\u001b[43;2H\u001b[96m\u001b[47m51\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) RemoveSandbox(id \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) {\u001b[44;2H\u001b[96m\u001b[47m52\u001b[m\u001b[93m\u001b[107m\u001b[1C c.stateLock.Lock()\u001b[45;2H\u001b[96m\u001b[47m53\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\u001b[46;2H\u001b[96m\u001b[47m54\u001b[m\u001b[93m\u001b[107m\u001b[1C \u001b[32mdelete\u001b[m\u001b[93m\u001b[107m(c.state.sandboxes, id)\u001b[46;38H\u001b[K\u001b[47;2H\u001b[96m\u001b[47m55\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[47;6H\u001b[K\u001b[48;2H\u001b[96m\u001b[47m56\u001b[m\u001b[93m\u001b[107m\u001b[48;9H\u001b[K\u001b[49;2H\u001b[96m\u001b[47m57\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// \u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[91mLi\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[96mstSandboxes lists all sandboxes in the state store\u001b[m\u001b[93m\u001b[107m\u001b[50;2H\u001b[96m\u001b[47m58\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) ListSandboxes() []*sandbox.Sandbox {" - ], - [ - 0.008005, - "\u001b[51;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cc.stateLock.Lock()\u001b[52;2H\u001b[96m\u001b[47m60\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mdefe\u001b[m\u001b[93m\u001b[107m\u001b[1C c.stateLock.Unlock()\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  96%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m57\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:4 \r\n\u001b[m\u001b[93m\u001b[107m/Li\u001b[?12l\u001b[?25h" - ], - [ - 0.064191, - "s\u001b[?25l" - ], - [ - 0.009148, - "\u001b[49;10H\u001b[7m\u001b[91ms\u001b[54;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.063177, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mt\u001b[?25l" - ], - [ - 0.011092, - "\u001b[49;11H\u001b[7m\u001b[91mt\u001b[54;6H\u001b[?12l\u001b[?25h" - ], - [ - 0.22857, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mC\u001b[?25l" - ], - [ - 0.012394, - "\u001b[1;52r\u001b[1;1H\u001b[23L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m586 \u001b[m\u001b[93m\u001b[107m \u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\r\n\u001b[96m\u001b[47m587 \u001b[m\u001b[93m\u001b[107m sbID := ctr.Sandbox()\r\n\u001b[96m\u001b[47m588 \u001b[m\u001b[93m\u001b[107m sb := c.state.sandboxes[sbID]\r\n\u001b[96m\u001b[47m589 \u001b[m\u001b[93m\u001b[107m sb.RemoveContainer(ctr)\r\n\u001b[96m\u001b[47m590 \u001b[m\u001b[93m\u001b[107m c.state.containers.Delete(ctr.ID())\r\n\u001b[96m\u001b[47m591 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m592 \r\n593 \u001b[m\u001b[93m\u001b[107m\u001b[96m// listContainers returns a list of all containers stored by the server state\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m594 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) listContainers() []*oci.Container {\r\n\u001b[96m\u001b[47m595 \u001b[m\u001b[93m\u001b[107m c.stateLock.Lock()\r\n\u001b[96m\u001b[47m596 \u001b[m\u001b[93m\u001b[107m \u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\r\n\u001b[96m\u001b[47m597 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m c.state.containers.List()\r\n\u001b[96m\u001b[47m598 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m599 \r\n600 \u001b[m\u001b[93m\u001b[107m\u001b[96m// \u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[91mListC\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[96montainers returns a list of all containers stored by the" - ], - [ - 3.7e-05, - " server state that match the given filter function\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m601 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) ListContainers(filters ...\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m(*oci.Container) \u001b[33mbool\u001b[m\u001b[93m\u001b[107m) ([]*oci.Container, \u001b[33merror\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m602 \u001b[m\u001b[93m\u001b[107m containers := c.listContainers()\r\n\u001b[96m\u001b[47m603 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(filters) == \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m604 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m containers, \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m605 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m606 \u001b[m\u001b[93m\u001b[107m filteredContainers := \u001b[32mmake\u001b[m\u001b[93m\u001b[107m([]*oci.Container, \u001b[36m0\u001b[m\u001b[93m\u001b[107m, \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(containers))\r\n\u001b[96m\u001b[47m607 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, container := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m containers {\r\n\u001b[96m\u001b[47m608 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, filter := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m filters {\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  88%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m" - ], - [ - 1.8e-05, - "\u001b[48;5;252m00\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[K\u001b[54;1H/ListC\u001b[?12l\u001b[?25h" - ], - [ - 0.108051, - "o\u001b[?25l" - ], - [ - 0.010669, - "\u001b[15;13H\u001b[7m\u001b[91mo\u001b[54;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.076559, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mn\u001b[?25l" - ], - [ - 0.010612, - "\u001b[15;14H\u001b[7m\u001b[91mn\u001b[54;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.109997, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mt\u001b[?25l" - ], - [ - 0.011985, - "\u001b[15;15H\u001b[7m\u001b[91mt\u001b[54;10H\u001b[?12l\u001b[?25h" - ], - [ - 0.059787, - "\u001b[27m\u001b[m\u001b[93m\u001b[107ma\u001b[?25l" - ], - [ - 0.01275, - "\u001b[15;16H\u001b[7m\u001b[91ma\u001b[54;11H\u001b[?12l\u001b[?25h" - ], - [ - 0.075246, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mi\u001b[?25l" - ], - [ - 0.008912, - "\u001b[15;17H\u001b[7m\u001b[91mi\u001b[54;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.063432, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mn\u001b[?25l" - ], - [ - 0.01195, - "\u001b[15;18H\u001b[7m\u001b[91mn\u001b[54;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.075708, - "\u001b[27m\u001b[m\u001b[93m\u001b[107me\u001b[?25l" - ], - [ - 0.012727, - "\u001b[15;19H\u001b[7m\u001b[91me\u001b[54;14H\u001b[?12l\u001b[?25h" - ], - [ - 0.035347, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mr\u001b[?25l" - ], - [ - 0.012424, - "\u001b[15;20H\u001b[7m\u001b[91mr\u001b[54;15H\u001b[?12l\u001b[?25h" - ], - [ - 0.124381, - "\u001b[27m\u001b[m\u001b[93m\u001b[107ms\u001b[?25l" - ], - [ - 0.010815, - "\u001b[15;21H\u001b[7m\u001b[91ms\u001b[54;16H\u001b[?12l\u001b[?25h" - ], - [ - 0.180264, - "\r" - ], - [ - 5.1e-05, - "\u001b[?25l" - ], - [ - 0.00018, - "\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31msearch hit BOTTOM, continuing at TOP" - ], - [ - 0.005654, - "\u001b[m\u001b[93m\u001b[107m\u001b[15;8H\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m\u001b[16;31H\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m\u001b[24;38H{\u001b[26;17H}" - ], - [ - 0.00472, - "\u001b[53;2H\u001b[1m\u001b[38;5;22m\u001b[48;5;148mNORMAL \u001b[15;8H\u001b[?12l\u001b[?25h" - ], - [ - 1.144572, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;209H\u001b[38;5;22m\u001b[48;5;252m5\u001b[15;9H" - ], - [ - 0.50187, - "\u001b[53;209H6\u001b[15;10H" - ], - [ - 0.032501, - "\u001b[53;209H7\u001b[15;11H" - ], - [ - 0.027726, - "\u001b[53;209H8\u001b[15;12H" - ], - [ - 0.031165, - "\u001b[53;209H9\u001b[15;13H" - ], - [ - 0.027591, - "\u001b[53;209H10\u001b[15;14H" - ], - [ - 0.033383, - "\u001b[53;210H1\u001b[15;15H" - ], - [ - 0.031794, - "\u001b[53;210H2\u001b[15;16H" - ], - [ - 0.029702, - "\u001b[53;210H3\u001b[15;17H" - ], - [ - 0.031021, - "\u001b[53;210H4\u001b[15;18H" - ], - [ - 0.028265, - "\u001b[53;210H5\u001b[15;19H" - ], - [ - 0.030334, - "\u001b[53;210H6\u001b[15;20H" - ], - [ - 0.033667, - "\u001b[53;210H7\u001b[15;21H" - ], - [ - 0.029757, - "\u001b[53;210H8\u001b[15;22H" - ], - [ - 0.028472, - "\u001b[53;210H9\u001b[15;23H" - ], - [ - 0.029385, - "\u001b[53;209H20\u001b[15;24H" - ], - [ - 0.030816, - "\u001b[53;210H1\u001b[15;25H" - ], - [ - 0.030044, - "\u001b[53;210H2\u001b[15;26H" - ], - [ - 0.028015, - "\u001b[53;210H3\u001b[15;27H" - ], - [ - 0.031825, - "\u001b[53;210H4\u001b[15;28H" - ], - [ - 0.029947, - "\u001b[53;210H5\u001b[15;29H" - ], - [ - 0.027564, - "\u001b[53;210H6\u001b[15;30H" - ], - [ - 0.03206, - "\u001b[53;210H7\u001b[15;31H" - ], - [ - 0.032422, - "\u001b[53;210H8\u001b[15;32H" - ], - [ - 0.030352, - "\u001b[53;210H9\u001b[15;33H" - ], - [ - 0.030131, - "\u001b[53;209H30\u001b[15;34H" - ], - [ - 0.031966, - "\u001b[53;210H1\u001b[15;35H" - ], - [ - 0.028785, - "\u001b[53;210H2\u001b[15;36H" - ], - [ - 0.030353, - "\u001b[53;210H3\u001b[15;37H" - ], - [ - 0.0284, - "\u001b[53;210H4\u001b[15;38H" - ], - [ - 0.032354, - "\u001b[53;210H5\u001b[15;39H" - ], - [ - 0.031925, - "\u001b[53;210H6\u001b[15;40H" - ], - [ - 0.027402, - "\u001b[53;210H7\u001b[15;41H" - ], - [ - 0.028353, - "\u001b[53;210H8\u001b[15;42H" - ], - [ - 0.033363, - "\u001b[53;210H9\u001b[15;43H" - ], - [ - 0.030492, - "\u001b[53;209H40\u001b[15;44H" - ], - [ - 0.029102, - "\u001b[53;210H1\u001b[15;45H" - ], - [ - 0.028712, - "\u001b[53;210H2\u001b[15;46H" - ], - [ - 0.031899, - "\u001b[53;210H3\u001b[15;47H" - ], - [ - 0.028425, - "\u001b[53;210H4\u001b[15;48H" - ], - [ - 0.031288, - "\u001b[53;210H5\u001b[15;49H" - ], - [ - 0.030582, - "\u001b[53;210H6\u001b[15;50H" - ], - [ - 0.030863, - "\u001b[53;210H7\u001b[15;51H" - ], - [ - 0.029856, - "\u001b[53;210H8\u001b[15;52H" - ], - [ - 0.03183, - "\u001b[53;210H9\u001b[15;53H" - ], - [ - 0.027287, - "\u001b[53;209H50\u001b[15;54H" - ], - [ - 0.030571, - "\u001b[53;210H1\u001b[15;55H" - ], - [ - 0.027721, - "\u001b[53;210H2\u001b[15;56H" - ], - [ - 0.034507, - "\u001b[53;210H3\u001b[15;57H" - ], - [ - 0.026321, - "\u001b[53;210H4\u001b[15;58H" - ], - [ - 0.033001, - "\u001b[53;210H5\u001b[15;59H" - ], - [ - 0.03007, - "\u001b[53;210H6\u001b[15;60H" - ], - [ - 0.031121, - "\u001b[53;210H7\u001b[15;61H" - ], - [ - 0.028288, - "\u001b[53;210H8\u001b[15;62H" - ], - [ - 0.032991, - "\u001b[53;210H9\u001b[15;63H" - ], - [ - 0.030687, - "\u001b[53;209H60\u001b[15;64H" - ], - [ - 0.031504, - "\u001b[53;210H1\u001b[15;65H" - ], - [ - 0.03011, - "\u001b[53;210H2\u001b[15;66H" - ], - [ - 0.029317, - "\u001b[53;210H3\u001b[15;67H" - ], - [ - 0.034275, - "\u001b[53;210H4\u001b[15;68H" - ], - [ - 0.59524, - "\u001b[53;210H5\u001b[15;69H" - ], - [ - 0.497159, - "\u001b[53;210H6\u001b[15;70H" - ], - [ - 0.033269, - "\u001b[53;210H7\u001b[15;71H" - ], - [ - 0.030617, - "\u001b[53;210H8\u001b[15;72H" - ], - [ - 0.028412, - "\u001b[53;210H9\u001b[15;73H" - ], - [ - 0.030755, - "\u001b[53;209H70\u001b[15;74H" - ], - [ - 0.031182, - "\u001b[53;210H1\u001b[15;75H" - ], - [ - 0.030179, - "\u001b[53;210H2\u001b[15;76H" - ], - [ - 0.469875, - "\u001b[53;210H3\u001b[15;77H" - ], - [ - 0.197619, - "\u001b[53;210H4\u001b[15;78H" - ], - [ - 0.324244, - "\u001b[53;210H3\u001b[15;77H" - ], - [ - 0.746299, - "\u001b[53;210H4\u001b[15;78H" - ], - [ - 0.177222, - "\u001b[53;210H5\u001b[15;79H" - ], - [ - 0.16103, - "\u001b[53;210H6\u001b[15;80H" - ], - [ - 0.178906, - "\u001b[53;210H7\u001b[15;81H" - ], - [ - 0.176875, - "\u001b[53;210H8\u001b[15;82H" - ], - [ - 0.168167, - "\u001b[53;210H9\u001b[15;83H" - ], - [ - 0.945125, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[34m--\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31ma\u001b[m\u001b[93m\u001b[107m\b\u001b[34m INSERT\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31mt\u001b[m\u001b[93m\u001b[107m\b\u001b[34m --\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31mO\u001b[m\u001b[93m\u001b[107m\u001b[54;13H\u001b[K" - ], - [ - 0.044251, - "\u001b[53;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[53;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[53;12H kpod-stats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[53;25H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mlibkpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mcontainer_server.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[m\u001b[93m\u001b[107m\u001b[53;54H\u001b[38;5;31m\u001b[48;5;24m\u001b[53;55H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                        \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;31m  88%\u001b[m" - ], - [ - 5.4e-05, - "\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m600\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:79 \u001b[15;83H\u001b[?12l\u001b[?25h" - ], - [ - 0.275463, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[96mthat match the given filter function\u001b[m\u001b[93m\u001b[107m\u001b[15;118H\u001b[K\u001b[53;53H\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[53;57H \u001b[m\u001b[93m\u001b[107m\u001b[152C\u001b[38;5;22m\u001b[48;5;117m8\u001b[15;82H\u001b[?12l\u001b[?25h" - ], - [ - 0.268424, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[15;82H\u001b[K\u001b[16;5Hthat match the given filter function\u001b[16;41H\u001b[K\u001b[17;5H\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) \u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m(filters ...\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m(*oci.Container) \u001b[33mbool\u001b[m\u001b[93m\u001b[107m) ([]*oci.Container, \u001b[33merror\u001b[m\u001b[93m\u001b[107m) {\u001b[18;9Hcontainers := c.listContainers()\u001b[19;9H\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(filters) == \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\u001b[19;32H\u001b[K\u001b[20;9H \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m containers, \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\u001b[21;9H}\u001b[21;10H\u001b[K\u001b[22;9HfilteredContainers := \u001b[32mmake\u001b[m\u001b[93m\u001b[107m([]*oci.Container, \u001b[36m0\u001b[m\u001b[93m\u001b[107m, \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(containers))\u001b[23;9H\u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, container := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m containers {\u001b[24;13H\u001b[32mfor\u001b[m\u001b[93m\u001b[107m _,\u001b[7C := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m filters {\u001b[25;17H\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter(container) {\u001b[25;40H\u001b[K\u001b[26;17H filteredContainers = \u001b[32mappend\u001b[m\u001b[93m\u001b[107m(filteredContainers, container)\u001b[27;13H }\u001b[28;9H }\u001b[29;9H}\u001b[29;10H\u001b[K\u001b[30;5H \u001b[32mreturn" - ], - [ - 5.2e-05, - "\u001b[m\u001b[93m\u001b[107m filteredContainers, \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\u001b[31;5H}\u001b[32;5H\u001b[K\u001b[33;5H\u001b[96m// AddSandbox adds a sandbox to the sandbox state store\u001b[m\u001b[93m\u001b[107m\u001b[33;60H\u001b[K\u001b[34;5H\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) AddSandbox(sb *sandbox.Sandbox) {\u001b[35;9Hc.stateLock.Lock()\u001b[35;27H\u001b[K\u001b[36;9H\u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\u001b[36;36H\u001b[K\u001b[37;5H c.state.sandboxes[sb.ID()] = sb\u001b[38;5H}\u001b[39;5H\u001b[K\u001b[40;5H\u001b[96m// GetSandbox returns a sandbox by its ID\u001b[m\u001b[93m\u001b[107m\u001b[40;46H\u001b[K\u001b[41;5H\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) GetSandbox(id \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) *sandbox.Sandbox {\u001b[42;9Hc.stateLock.Lock()\u001b[42;27H\u001b[K\u001b[43;9H\u001b[32mdefe\u001b[m\u001b[93m\u001b[107m\u001b[1C c.stateLock.Unlock()\u001b[43;35H\u001b[K\u001b[44;5H \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m c.state.sandboxes[id]\u001b[45;5H}\u001b[46;5H\u001b[K\u001b[47;5H\u001b[96m// GetSandboxContainer returns a sandbox's infra container\u001b[m\u001b[93m\u001b[107m\u001b[47;63H\u001b[K\u001b[48;5H\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) GetSandboxContainer(id \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) *oci.Container {\u001b[49;9Hc.stateLock.Lock()" - ], - [ - 0.005517, - "\u001b[49;27H\u001b[K\u001b[50;9H\u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\u001b[50;35H\u001b[K\u001b[51;9Hsb, ok := c.state.sandboxes[id]\u001b[52;9H\u001b[32mif\u001b[m\u001b[93m\u001b[107m !ok {\u001b[52;17H\u001b[K\u001b[53;207H\u001b[1m\u001b[38;5;24m\u001b[48;5;117m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:1 \u001b[16;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.829866, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m/that match the given filter function\u001b[53;209H\u001b[38;5;22m\u001b[48;5;117m2\u001b[16;6H\u001b[?12l\u001b[?25h" - ], - [ - 0.150054, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[96m//that match the given filter function\u001b[m\u001b[93m\u001b[107m\u001b[53;209H\u001b[38;5;22m\u001b[48;5;117m3\u001b[16;7H\u001b[?12l\u001b[?25h" - ], - [ - 0.080887, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[96m that match the given filter function\u001b[m\u001b[93m\u001b[107m\u001b[53;209H\u001b[38;5;22m\u001b[48;5;117m4\u001b[16;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.385382, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[K" - ], - [ - 0.014625, - "\u001b[53;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[53;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H kpod-stats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;25H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mlibkpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mcontainer_server.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[53;56H\u001b[38;5;240m\u001b[48;5;236m\u001b[53;57H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                      \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m g" - ], - [ - 2.4e-05, - "o\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  88%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m601\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:3  \u001b[16;7H\u001b[?12l\u001b[?25h" - ], - [ - 0.26471, - "\u001b[?25l\u001b[54;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h" - ], - [ - 7e-05, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.184441, - "w" - ], - [ - 9.6e-05, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.039251, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.104195, - "\r" - ], - [ - 5.6e-05, - "\u001b[?25l" - ], - [ - 0.00018, - "\u001b[?2004l" - ], - [ - 0.022882, - "\"libkpod/container_server.go\"" - ], - [ - 0.006415, - " 685L, 20535C written" - ], - [ - 0.012113, - "\r\r\r\n\u001b[39;49m\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.002425, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.019671, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats* \u001b[39m \u001b[33m17s\u001b[39m\r\n" - ], - [ - 0.002255, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000189, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 8.5e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000208, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000116, - "\u001b[?1h\u001b=" - ], - [ - 0.000101, - "\u001b[?2004h" - ], - [ - 11.704887, - "\u001b[?2004l\r\r\n" - ], - [ - 0.000651, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.025893, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats* \u001b[39m \u001b[33m29s\u001b[39m\r\n" - ], - [ - 0.001689, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 7.2e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 7.5e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000263, - "\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.604019, - ":" - ], - [ - 0.41525, - "\b \b" - ], - [ - 24.413263, - "v" - ], - [ - 0.123238, - "\bvi" - ], - [ - 0.096701, - " " - ], - [ - 0.103904, - "s" - ], - [ - 0.040009, - "e" - ], - [ - 0.056539, - "r" - ], - [ - 0.196367, - "ver\u001b[1m/\u001b[0m" - ], - [ - 0.158628, - "\b\u001b[0m/c" - ], - [ - 0.107515, - "o" - ], - [ - 0.072945, - "n" - ], - [ - 0.122494, - "\u0007" - ], - [ - 0.000289, - "\r\r\n" - ], - [ - 0.000116, - "\u001b[0mconfig.go container_create.go container_execsync.go container_portforward.go container_start.go container_stop.go \r\n\u001b[Jcontainer_attach.go \u001b[Jcontainer_exec.go \u001b[Jcontainer_list.go \u001b[Jcontainer_remove.go \u001b[Jcontainer_status.go \u001b[Jcontainer_updateruntimeconfig.go\u001b[J\u001b[A\u001b[A\u001b[0m\u001b[27m\u001b[24m\u001b[39m\r\u001b[2Cvi server/con\u001b[K\u001b[195C\u001b[90m\u001b[39m\u001b[39m\u001b[195D" - ], - [ - 0.477329, - "t" - ], - [ - 0.11174, - "a" - ], - [ - 0.211254, - "\r\r\n\u001b[J\u001b[A\u001b[17Ciner_" - ], - [ - 0.68805, - "l" - ], - [ - 0.124985, - "i" - ], - [ - 0.138637, - "st.go\u001b[1m \u001b[0m" - ], - [ - 0.548537, - "\b\u001b[0m \b" - ], - [ - 0.000176, - "\u001b[?1l\u001b>\u001b[?2004l\r\r\n\u001b[J" - ], - [ - 0.003295, - "\u001b]2;vim server/container_list.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.135184, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000569, - "\u001b[1;54r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[54;1H\"server/container_list.go\"" - ], - [ - 0.000171, - " 108L, 2729C" - ], - [ - 0.008118, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.001699, - "\u001b[1;1H\u001b[96m\u001b[47m 1 \u001b[m\u001b[93m\u001b[107m\u001b[32mpackage\u001b[m\u001b[93m\u001b[107m server\r\n\u001b[96m\u001b[47m 2 \r\n 3 \u001b[m\u001b[93m\u001b[107m\u001b[32mimport\u001b[m\u001b[93m\u001b[107m (\r\n\u001b[96m\u001b[47m 4 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"github.com/kubernetes-incubator/cri-o/oci\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 5 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"github.com/sirupsen/logrus\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 6 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"golang.org/x/net/context\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 7 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"k8s.io/apimachinery/pkg/fields\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 8 \u001b[m\u001b[93m\u001b[107m pb \u001b[36m\"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 9 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 10 \r\n 11 \u001b[m\u001b[93m\u001b[107m\u001b[96m// filterContainer returns whether passed container matches filtering criteria\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 12 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m filterContainer(c *pb.Container, filter *pb.ContainerFilter) \u001b[33mbool\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 13 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m filter != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 14 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[" - ], - [ - 1.6e-05, - "107m filter.State != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 15 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m c.State != filter.State.State {\r\n\u001b[96m\u001b[47m 16 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 17 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 18 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.LabelSelector != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m\u001b[12Csel := fields.SelectorFromSet(filter.LabelSelector)\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m !sel.Matches(fields.Set(c.Labels)) {\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mtrue\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 28 \r\n 29 \u001b[m\u001b[93m\u001b[107m\u001b[96m// \u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m\u001b[96m lists all containers by filters." - ], - [ - 0.033458, - "\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 30 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (s *Server) \u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m(ctx context.Context, req *pb.\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107mRequest) (*pb.\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107mResponse, \u001b[33merror\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m 31 \u001b[m\u001b[93m\u001b[107m logrus.Debugf(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m\u001b[36mRequest \u001b[m\u001b[93m\u001b[107m\u001b[31m%+v\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, req)\r\n\u001b[96m\u001b[47m 32 \u001b[m\u001b[93m\u001b[107m \u001b[32mvar\u001b[m\u001b[93m\u001b[107m ctrs []*pb.Container\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m filter := req.Filter\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m ctrList := s.ContainerServer.\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m()\r\n\u001b[96m\u001b[47m 35 \r\n 36 \u001b[m\u001b[93m\u001b[107m \u001b[96m// Filter using container id and pod id first.\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 37 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m filter != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.Id != \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m\u001b[12Cid, err := s.CtrI" - ], - [ - 3.2e-05, - "DIndex().Get(filter.Id)\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 41 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m, err\r\n\u001b[96m\u001b[47m 42 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m\u001b[12Cc := s.ContainerServer.GetContainer(id)\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m c != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.PodSandboxId != \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mif\u001b[m\u001b[93m\u001b[107m c.Sandbox() == filter.PodSandboxId {\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m\u001b[24CctrList = []*oci.Container{c}\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[20C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m\u001b[24CctrList = []*oci.Container{}\r\n\u001b[96m\u001b[47m 50 \u001b[m\u001b[93m\u001b[107m\u001b[20C}\r\n\u001b[96m\u001b[47m 51 \r\n 52 \u001b[m\u001b[93m\u001b[107m\u001b[16C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m" - ], - [ - 0.009216, - "\u001b[53;12H kpod-stats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;25H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mserver/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mcontainer_list.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;52H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                           \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m   1%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m  1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1  \u001b[1;5H\u001b[?12l\u001b" - ], - [ - 2e-05, - "[?25h\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 0.527381, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m   2%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[2;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.495163, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m   3%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[3;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.025763, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m   4%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:4\u001b[4;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.025962, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m   5%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[5;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.035885, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m   6%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[6;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.03159, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[7;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.027692, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m   7%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[8;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.030541, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[3;12H\u001b[1m\u001b[31m\u001b[106m(\u001b[9;5H)\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m   8%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[9;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.031712, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[3;12H(\u001b[9;5H)\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m   9%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m10\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[10;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.031153, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  10%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[11;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.026845, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  11%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[12;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.030542, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  12%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:4\u001b[13;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.032914, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  13%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[14;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.032993, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  14%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[15;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.026882, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  15%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[16;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.032257, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  16%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[17;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.031522, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  17%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[18;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.288102, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  18%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[19;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.496038, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  19%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m20\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[20;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.024987, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[21;8H" - ], - [ - 0.034118, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  20%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[22;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.029418, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  21%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[23;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.031726, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  22%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[24;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.026646, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  23%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[25;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.038387, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  24%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[26;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.02789, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;76H\u001b[1m\u001b[31m\u001b[106m{\u001b[27;5H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  25%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[27;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.027891, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;76H{\u001b[27;5H}\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  26%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[28;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.025499, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  27%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[29;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.032868, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  28%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m30\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[30;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.034132, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  29%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:4\u001b[31;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.734937, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  28%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[30;5H" - ], - [ - 0.244963, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  29%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:4\u001b[31;8H" - ], - [ - 1.147152, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  30%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[32;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.507676, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  31%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[33;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.021376, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[34;8H" - ], - [ - 0.52311, - "\u001b[53;209H5\u001b[34;9H" - ], - [ - 0.501301, - "\u001b[53;209H6\u001b[34;10H" - ], - [ - 0.026664, - "\u001b[53;209H7\u001b[34;11H" - ], - [ - 0.029294, - "\u001b[53;209H8\u001b[34;12H" - ], - [ - 0.029763, - "\u001b[53;209H9\u001b[34;13H" - ], - [ - 0.029935, - "\u001b[53;209H10\u001b[34;14H" - ], - [ - 0.030764, - "\u001b[53;210H1\u001b[34;15H" - ], - [ - 0.028666, - "\u001b[53;210H2\u001b[34;16H" - ], - [ - 0.031328, - "\u001b[53;210H3\u001b[34;17H" - ], - [ - 0.031664, - "\u001b[53;210H4\u001b[34;18H" - ], - [ - 0.027038, - "\u001b[53;210H5\u001b[34;19H" - ], - [ - 0.033849, - "\u001b[53;210H6\u001b[34;20H" - ], - [ - 0.028053, - "\u001b[53;210H7\u001b[34;21H" - ], - [ - 0.031407, - "\u001b[53;210H8\u001b[34;22H" - ], - [ - 0.029045, - "\u001b[53;210H9\u001b[34;23H" - ], - [ - 0.031094, - "\u001b[53;209H20\u001b[34;24H" - ], - [ - 0.030714, - "\u001b[53;210H1\u001b[34;25H" - ], - [ - 0.030843, - "\u001b[53;210H2\u001b[34;26H" - ], - [ - 0.029335, - "\u001b[53;210H3\u001b[34;27H" - ], - [ - 0.03625, - "\u001b[53;210H4\u001b[34;28H" - ], - [ - 0.02287, - "\u001b[53;210H5\u001b[34;29H" - ], - [ - 0.031991, - "\u001b[53;210H6\u001b[34;30H" - ], - [ - 0.026648, - "\u001b[53;210H7\u001b[34;31H" - ], - [ - 0.032914, - "\u001b[53;210H8\u001b[34;32H" - ], - [ - 0.030082, - "\u001b[53;210H9\u001b[34;33H" - ], - [ - 0.03302, - "\u001b[53;209H30\u001b[34;34H" - ], - [ - 0.029673, - "\u001b[53;210H1\u001b[34;35H" - ], - [ - 0.029969, - "\u001b[53;210H2\u001b[34;36H" - ], - [ - 0.030958, - "\u001b[53;210H3\u001b[34;37H" - ], - [ - 0.032073, - "\u001b[53;210H4\u001b[34;38H" - ], - [ - 0.029162, - "\u001b[53;210H5\u001b[34;39H" - ], - [ - 0.030591, - "\u001b[53;210H6\u001b[34;40H" - ], - [ - 0.02993, - "\u001b[53;210H7\u001b[34;41H" - ], - [ - 0.032535, - "\u001b[53;210H8\u001b[34;42H" - ], - [ - 0.029006, - "\u001b[53;210H9\u001b[34;43H" - ], - [ - 0.031122, - "\u001b[53;209H40\u001b[34;44H" - ], - [ - 0.027152, - "\u001b[53;210H1\u001b[34;45H" - ], - [ - 0.030614, - "\u001b[53;210H2\u001b[34;46H" - ], - [ - 0.030056, - "\u001b[53;210H3\u001b[34;47H" - ], - [ - 0.031423, - "\u001b[53;210H4\u001b[34;48H" - ], - [ - 0.03265, - "\u001b[53;210H5\u001b[34;49H" - ], - [ - 0.026915, - "\u001b[53;210H6\u001b[34;50H" - ], - [ - 0.032032, - "\u001b[53;210H7\u001b[34;51H" - ], - [ - 0.22945, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[1m\u001b[31m\u001b[106m()\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m8\u001b[34;52H\u001b[?12l\u001b[?25h" - ], - [ - 0.516968, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[34m-- INSERT --\u001b[m\u001b[93m\u001b[107m\u001b[54;13H\u001b[K" - ], - [ - 0.039223, - "\u001b[53;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[53;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[53;12H kpod-stats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[53;25H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mserver/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mcontainer_list.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[m\u001b[93m\u001b[107m\u001b[53;51H\u001b[38;5;31m\u001b[48;5;24m\u001b[53;52H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                           \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;149m\u001b[48;5;31m  31%" - ], - [ - 2.7e-05, - "\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 34\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:48 \u001b[34;52H\u001b[?12l\u001b[?25h" - ], - [ - 1.114697, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31m\u001b[106mj(\u001b[m\u001b[93m\u001b[107m)\u001b[53;50H\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[53;54H \u001b[m\u001b[93m\u001b[107m\u001b[155C\u001b[38;5;22m\u001b[48;5;117m9\u001b[m\u001b[93m\u001b[107m\u001b[34;52Hj\u001b[1m\u001b[31m\u001b[106m()\b\b\u001b[?12l\u001b[?25h" - ], - [ - 0.616695, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b(\u001b[1m\u001b[31m\u001b[106m)\u001b[m\u001b[93m\u001b[107m\u001b[34;54H\u001b[K\u001b[34;52H\u001b[1m\u001b[31m\u001b[106m(\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[34;52H\u001b[?12l\u001b[?25h" - ], - [ - 0.440354, - "\u001b[?25l\u001b[53;210H9\u001b[34;53H\u001b[?12l\u001b[?25h" - ], - [ - 8.949745, - "\u001b[?25l\u001b[53;210H8\u001b[34;52H\u001b[?12l\u001b[?25h" - ], - [ - 0.496969, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m()\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[34;51H\u001b[?12l\u001b[?25h" - ], - [ - 0.032523, - "\u001b[53;210H6\u001b[34;50H" - ], - [ - 0.025768, - "\u001b[53;210H5\u001b[34;49H" - ], - [ - 0.032765, - "\u001b[53;210H4\u001b[34;48H" - ], - [ - 0.031191, - "\u001b[53;210H3\u001b[34;47H" - ], - [ - 0.030756, - "\u001b[53;210H2\u001b[34;46H" - ], - [ - 0.029399, - "\u001b[53;210H1\u001b[34;45H" - ], - [ - 0.338294, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;149m\u001b[48;5;31m  32%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;24m\u001b[48;5;117m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:1 \u001b[35;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.473917, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;149m\u001b[48;5;31m  31%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;24m\u001b[48;5;117m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:41\u001b[34;45H" - ], - [ - 113.574346, - "\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[1;1H\u001b[96m\u001b[47m 2 \r\n 3 \u001b[m\u001b[93m\u001b[107m\u001b[32mimport\u001b[m\u001b[93m\u001b[107m (\r\n\u001b[96m\u001b[47m 4 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"github.com/kubernetes-incubator/cri-o/oci\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 5 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"github.com/sirupsen/logrus\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 6 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"golang.org/x/net/context\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 7 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"k8s.io/apimachinery/pkg/fields\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 8 \u001b[m\u001b[93m\u001b[107m pb \u001b[36m\"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 9 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 10 \r\n 11 \u001b[m\u001b[93m\u001b[107m\u001b[96m// filterContainer returns whether passed container matches filtering criteria\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 12 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m filterContainer(c *pb.Container, filter *pb.ContainerFilter) \u001b[33mbool\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 13 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m filter != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 14 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.State != \u001b[36m" - ], - [ - 2.7e-05, - "nil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 15 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m c.State != filter.State.State {\r\n\u001b[96m\u001b[47m 16 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 17 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 18 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.LabelSelector != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m\u001b[12Csel := fields.SelectorFromSet(filter.LabelSelector)\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m !sel.Matches(fields.Set(c.Labels)) {\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mtrue\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 28 \r\n 29 \u001b[m\u001b[93m\u001b[107m\u001b[96m// \u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m\u001b[96m lists all containers by filters.\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m" - ], - [ - 0.005724, - "\u001b[47m 30 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (s *Server) \u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m(ctx context.Context, req *pb.\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107mRequest) (*pb.\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107mResponse, \u001b[33merror\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m 31 \u001b[m\u001b[93m\u001b[107m logrus.Debugf(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m\u001b[36mRequest \u001b[m\u001b[93m\u001b[107m\u001b[31m%+v\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, req)\r\n\u001b[96m\u001b[47m 32 \u001b[m\u001b[93m\u001b[107m \u001b[32mvar\u001b[m\u001b[93m\u001b[107m ctrs []*pb.Container\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m filter := req.Filter\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m ctrList := s.ContainerServer.\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m()\r\n\u001b[96m\u001b[47m 35 \r\n 36 \u001b[m\u001b[93m\u001b[107m \u001b[96m// Filter using container id and pod id first.\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 37 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m filter != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.Id != \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m\u001b[12Cid, err := s.CtrIDIndex().Get(filter.I" - ], - [ - 3.6e-05, - "d)\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 41 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m, err\r\n\u001b[96m\u001b[47m 42 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m\u001b[12Cc := s.ContainerServer.GetContainer(id)\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m c != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.PodSandboxId != \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mif\u001b[m\u001b[93m\u001b[107m c.Sandbox() == filter.PodSandboxId {\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m\u001b[24CctrList = []*oci.Container{c}\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[20C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m\u001b[24CctrList = []*oci.Container{}\r\n\u001b[96m\u001b[47m 50 \u001b[m\u001b[93m\u001b[107m\u001b[20C}\r\n\u001b[96m\u001b[47m 51 \u001b[m\u001b[93m\u001b[107m\r\n\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[51;12H kpod-stats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m" - ], - [ - 1.9e-05, - "\u001b[51;25H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mserver/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mcontainer_list.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[51;54H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                         \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;149m\u001b[48;5;31m  31%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 34\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:41 \u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[34m-- INSERT --\u001b[33;45H\u001b[?12l\u001b[?2" - ], - [ - 1.4e-05, - "5h" - ], - [ - 0.903658, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[33;38HListCon:tainers()\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[33;46H\u001b[?12l\u001b[?25h" - ], - [ - 0.265128, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mqtainers()\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[33;47H\u001b[?12l\u001b[?25h" - ], - [ - 0.194087, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[33;47H\u001b[K\u001b[34;9Htainers()\u001b[35;9H\u001b[K\u001b[36;9H\u001b[96m// Filter using container id and pod id first.\u001b[m\u001b[93m\u001b[107m\u001b[37;9H\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[37;27H\u001b[K\u001b[38;13H\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.Id != \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m {\u001b[38;33H\u001b[K\u001b[39;17Hid, err := s.CtrIDIndex().Get(filter.Id)\u001b[40;17H\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[40;33H\u001b[K\u001b[41;17H \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m, err\u001b[42;17H}\u001b[42;19H\u001b[K\u001b[43;17Hc := s.ContainerServer.GetContainer(id)\u001b[44;17H\u001b[32mif\u001b[m\u001b[93m\u001b[107m c != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[44;30H\u001b[K\u001b[45;21H\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.PodSandboxId != \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m {\u001b[45;51H\u001b[K\u001b[46;25H\u001b[32mif\u001b[m\u001b[93m\u001b[107m c.Sandbox() == filter.PodSandboxId {\u001b[47;25H ctrList = []*oci.Container{c}\u001b[48;25H} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\u001b[48;33H\u001b[K\u001b[49;25H ctrList = []*oci.Container{}\u001b[50;25H}\u001b[51;195H\u001b[38;5;149m\u001b[48;5;31m  32%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;24m\u001b[48;5;117m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:5 \u001b[34;9H\u001b[?12l" - ], - [ - 2.1e-05, - "\u001b[?25h" - ], - [ - 0.588871, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K" - ], - [ - 0.015698, - "\u001b[51;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H kpod-stats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;25H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mserver/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mcontainer_list.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[51;53H\u001b[38;5;240m\u001b[48;5;236m\u001b[51;54H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                         \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;247m\u001b[48;5;236m" - ], - [ - 4.7e-05, - " go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  32%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 35\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:4  \u001b[34;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.392274, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H1 line less; before #2 2 seconds ago" - ], - [ - 0.004898, - "\u001b[33;38H\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m()\u001b[34;9H\u001b[K\u001b[35;9H\u001b[96m// Filter using container id and pod id first.\u001b[m\u001b[93m\u001b[107m\u001b[36;9H\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[36;27H\u001b[K\u001b[37;9H \u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.Id != \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m {\u001b[38;13H id, err := s.CtrIDIndex().Get(filter.Id)\u001b[39;17H\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[39;32H\u001b[K\u001b[40;17H \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m, err\u001b[41;17H}\u001b[41;21H\u001b[K\u001b[42;17Hc := s.ContainerServer.GetContainer(id)\u001b[43;17H\u001b[32mif\u001b[m\u001b[93m\u001b[107m c != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[43;30H\u001b[K\u001b[44;17H \u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.PodSandboxId != \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m {\u001b[45;21H \u001b[32mif\u001b[m\u001b[93m\u001b[107m c.Sandbox() == filter.PodSandboxId {\u001b[46;25H ctrList = []*oci.Container{c}\u001b[46;58H\u001b[K\u001b[47;25H} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\u001b[47;33H\u001b[K\u001b[48;25H ctrList = []*oci.Container{}\u001b[49;25H}\u001b[49;29H\u001b[K\u001b[50;25H\u001b[K\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  31%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m" - ], - [ - 3e-05, - "\u001b[48;5;252m:41\u001b[33;45H\u001b[?12l\u001b[?25h" - ], - [ - 0.603268, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1HType :qa! and press to abandon all changes and exit Vim\u001b[?5h\u001b[?12l\u001b[?25h" - ], - [ - 0.008041, - "\u001b[?5l\u001b[33;45H" - ], - [ - 0.319799, - "\u001b[?25l\u001b[52;1H\u001b[K\u001b[52;1H:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.127623, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.201264, - "\r\u001b[?25l\u001b[7m\u001b[31mE37: No write since last change (add ! to override)\u001b[?2004h" - ], - [ - 0.008793, - "\u001b[33;45H\u001b[?12l\u001b[?25h" - ], - [ - 0.638602, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K\u001b[52;1H:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.175644, - "q" - ], - [ - 7.5e-05, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.256365, - "!\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.232366, - "\r" - ], - [ - 0.014901, - "\u001b[?25l\u001b[?2004l\u001b[52;1H\u001b[K\u001b[52;1H\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.00252, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024247, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats* \u001b[39m \u001b[33m139s\u001b[39m\r\n" - ], - [ - 0.00148, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.00016, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 6.8e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000192, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000106, - "\u001b[?1h\u001b=" - ], - [ - 5e-05, - "\u001b[?2004h" - ], - [ - 0.340214, - "m" - ], - [ - 0.09551, - "\bma" - ], - [ - 0.120447, - "k" - ], - [ - 0.087296, - "e" - ], - [ - 0.071368, - " " - ], - [ - 0.089098, - "k" - ], - [ - 0.1039, - "p" - ], - [ - 0.071636, - "o" - ], - [ - 0.087901, - "d" - ], - [ - 0.136844, - "\u001b[?1l\u001b>" - ], - [ - 0.000113, - "\u001b[?2004l\r\r\n" - ], - [ - 0.012133, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 8.491971, - "go build -ldflags '-X main.gitCommit=1fd05c35 -X main.buildInfo=1502973722' -tags \"selinux seccomp \" -o kpod github.com/kubernetes-incubator/cri-o/cmd/kpod\r\n" - ], - [ - 6.307507, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.020575, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats* \u001b[39m \u001b[33m15s\u001b[39m\r\n" - ], - [ - 0.001077, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000118, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.6e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 9.3e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.5e-05, - "\u001b[?1h\u001b=" - ], - [ - 3e-05, - "\u001b[?2004h" - ], - [ - 146.677024, - "c" - ], - [ - 0.396395, - "\b \b" - ], - [ - 0.096947, - "g" - ], - [ - 0.119491, - "\bgi" - ], - [ - 0.065073, - "t" - ], - [ - 0.119305, - " " - ], - [ - 0.064229, - "s" - ], - [ - 0.367919, - "t" - ], - [ - 0.167391, - "a" - ], - [ - 0.088279, - "t" - ], - [ - 0.136684, - "u" - ], - [ - 0.111874, - "s" - ], - [ - 0.167915, - "\u001b[?1l\u001b>" - ], - [ - 0.000214, - "\u001b[?2004l\r\r\n" - ], - [ - 0.003677, - "\u001b]2;git status\u0007\u001b]1;git\u0007" - ], - [ - 0.018619, - "On branch kpod-stats\r\n" - ], - [ - 3.2e-05, - "Your branch is up-to-date with 'origin/kpod-stats'.\r\nChanges not staged for commit:\r\n (use \"git add ...\" to update what will be committed)\r\n (use \"git checkout -- ...\" to discard changes in working directory)\r\n\r\n\t\u001b[31mmodified: libkpod/container_server.go\u001b[m\r\n" - ], - [ - 1.3e-05, - "\t\u001b[31mmodified: server/container_list.go\u001b[m\r\n\r\nno changes added to commit (use \"git add\" and/or \"git commit -a\")\r\n" - ], - [ - 0.000564, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024004, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.003289, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000146, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000144, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000112, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000113, - "\u001b[?1h\u001b=" - ], - [ - 4.6e-05, - "\u001b[?2004h" - ], - [ - 0.157568, - "g" - ], - [ - 0.150725, - "\bgi" - ], - [ - 0.088758, - "t" - ], - [ - 0.191523, - " " - ], - [ - 0.703489, - "\b" - ], - [ - 0.499958, - "\b \b" - ], - [ - 0.031738, - "\b\bg \b" - ], - [ - 0.030624, - "\b \b" - ], - [ - 0.302685, - "m" - ], - [ - 0.143773, - "\bma" - ], - [ - 0.132383, - "k" - ], - [ - 0.13996, - "e" - ], - [ - 0.031734, - " " - ], - [ - 0.100103, - "k" - ], - [ - 0.13186, - "p" - ], - [ - 0.09625, - "o" - ], - [ - 0.135131, - "d" - ], - [ - 1.224544, - "\u001b[?1l\u001b>" - ], - [ - 0.0002, - "\u001b[?2004l" - ], - [ - 0.000282, - "\r\r\n" - ], - [ - 0.003861, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 7.767101, - "make: 'kpod' is up to date.\r\n" - ], - [ - 0.000406, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024134, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats* \u001b[39m \u001b[33m8s\u001b[39m\r\n" - ], - [ - 0.001383, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000122, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.6e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.00011, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.5e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.4e-05, - "\u001b[?2004h" - ], - [ - 11.517218, - " " - ], - [ - 4.345309, - "\b\b\b\b\b\b" - ], - [ - 0.359649, - "s" - ], - [ - 0.087548, - "\bsu" - ], - [ - 0.097684, - "d" - ], - [ - 0.086354, - "o" - ], - [ - 0.096813, - " " - ], - [ - 0.09598, - "m" - ], - [ - 0.095724, - "a" - ], - [ - 0.112142, - "k" - ], - [ - 0.095568, - "e" - ], - [ - 0.079489, - " " - ], - [ - 0.121036, - "c" - ], - [ - 0.06424, - "l" - ], - [ - 0.160013, - "e" - ], - [ - 0.078814, - "a" - ], - [ - 0.088993, - "n" - ], - [ - 0.198984, - "\u001b[?1l\u001b>" - ], - [ - 4.7e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.002964, - "\u001b]2;sudo make clean\u0007\u001b]1;make\u0007" - ], - [ - 0.970251, - "[sudo] password for ryan: " - ], - [ - 1.949557, - "\r\n" - ], - [ - 3.091473, - "rm -f \"/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/_output/.gopathok\"\r\n" - ], - [ - 0.001076, - "rm -rf _output\r\n" - ], - [ - 0.000525, - "rm -f docs/*.1 docs/*.5 docs/*.8\r\n" - ], - [ - 0.001472, - "rm -fr test/testdata/redis-image\r\n" - ], - [ - 0.000452, - "find . -name \\*~ -delete\r\n" - ], - [ - 0.015501, - "find . -name \\#\\* -delete\r\n" - ], - [ - 0.01696, - "rm -f crioctl crio kpod\r\n" - ], - [ - 0.025533, - "make -C conmon clean\r\n" - ], - [ - 0.002621, - "make[1]: Entering directory '/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/conmon'\r\nrm -f conmon.o cmsg.o conmon\r\n" - ], - [ - 0.001734, - "make[1]: Leaving directory '/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/conmon'\r\nmake -C pause clean\r\n" - ], - [ - 0.003556, - "make[1]: Entering directory '/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/pause'\r\nrm -f pause.o pause\r\n" - ], - [ - 0.000983, - "make[1]: Leaving directory '/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/pause'\r\n" - ], - [ - 0.000224, - "rm -f test/bin2img/bin2img\r\n" - ], - [ - 0.002959, - "rm -f test/copyimg/copyimg\r\n" - ], - [ - 0.004744, - "rm -f test/checkseccomp/checkseccomp\r\n" - ], - [ - 0.002614, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024046, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats* \u001b[39m \u001b[33m6s\u001b[39m\r\n" - ], - [ - 0.00245, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000119, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.6e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 2.4e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.6e-05, - "\u001b[?2004h" - ], - [ - 2.73537, - "g" - ], - [ - 0.063396, - "\bgi" - ], - [ - 0.128086, - "t" - ], - [ - 0.095837, - " " - ], - [ - 0.144554, - "c" - ], - [ - 0.083684, - "o" - ], - [ - 0.083361, - "m" - ], - [ - 0.165205, - "m" - ], - [ - 0.159495, - "i" - ], - [ - 0.108285, - "t" - ], - [ - 0.098609, - " " - ], - [ - 0.145308, - "-" - ], - [ - 0.099243, - "a" - ], - [ - 0.10427, - " " - ], - [ - 0.117066, - "-" - ], - [ - 0.131033, - "-" - ], - [ - 0.095675, - "a" - ], - [ - 0.112554, - "m" - ], - [ - 0.075379, - "e" - ], - [ - 0.108006, - "n" - ], - [ - 0.111673, - "d" - ], - [ - 0.121036, - "\u001b[?1l\u001b>" - ], - [ - 0.000282, - "\u001b[?2004l\r\r\n" - ], - [ - 0.006629, - "\u001b]2;git commit -a --amend\u0007\u001b]1;git\u0007" - ], - [ - 0.033575, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000393, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"~/Development/Go/src/github.com/kubernetes-incubator/cri-o/.git/COMMIT_EDITMSG\"" - ], - [ - 8.2e-05, - " 107L, 6228C" - ], - [ - 0.000173, - "\u001b[1;1Hadd kpod stats function\r\n\r\nSigned-off-by: Ryan Cole \r\n\r\n# Please enter the commit message for your changes. Lines starting\r\n# with '#' will be ignored, and an empty message aborts the commit.\r\n#\r\n# Date: Tue Jul 25 09:56:23 2017 -0400\r\n#\r\n# On branch kpod-stats\r\n# Your branch is up-to-date with 'origin/kpod-stats'.\r\n#\r\n# Changes to be committed:\r\n#\u001b[7Cmodified: README.md\r\n#\u001b[7Cmodified: cmd/kpod/images.go\r\n#\u001b[7Cmodified: cmd/kpod/main.go\r\n#\u001b[7Cnew file: cmd/kpod/stats.go\r\n#\u001b[7Cmodified: completions/bash/kpod\r\n#\u001b[7Cnew file: docs/kpod-stats.1.md\r\n#\u001b[7Cmodified: libkpod/container_server.go\r\n#\u001b[7Cmodified: libkpod/image/image.go\r\n#\u001b[7Cnew file: libkpod/stats.go\r\n#\u001b[7Cmodified: server/container_list.go\r\n#\u001b[7Cnew file: test/kpod_stats.bats\r\n#\u001b[7Cmodified: vendor.conf\r\n#\u001b[7Cdeleted: vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go\r\n#\u001b[7Cnew file: vendor/github.com/buger/goterm/README.md\r\n#\u001b[7Cnew file: vendor/github.com/buger/goterm/box.go\r\n#\u001b[7C" - ], - [ - 1.9e-05, - "new file: vendor/github.com/buger/goterm/plot.go\r\n#\u001b[7Cnew file: vendor/github.com/buger/goterm/table.go\r\n#\u001b[7Cnew file: vendor/github.com/buger/goterm/terminal.go\r\n#\u001b[7Cnew file: vendor/github.com/buger/goterm/terminal_nosysioctl.go\r\n#\u001b[7Cnew file: vendor/github.com/buger/goterm/terminal_sysioctl.go\r\n#\u001b[7Cdeleted: vendor/github.com/containers/storage/pkg/archive/example_changes.go\r\n#\u001b[7Cnew file: vendor/github.com/mrunalp/fileutils/LICENSE\r\n#\u001b[7Cnew file: vendor/github.com/mrunalp/fileutils/README.md\r\n#\u001b[7Cnew file: vendor/github.com/mrunalp/fileutils/fileutils.go\r\n#\u001b[7Cnew file: vendor/github.com/mrunalp/fileutils/idtools.go\r\n#\u001b[7Cmodified: vendor/github.com/opencontainers/runc/libcontainer/container_linux.go\r\n#\u001b[7Cmodified: vendor/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.pb.go\r\n#\u001b[7Cmodified: vendor/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.proto\r\n#\u001b[7Cmodified: vendor/github.com/opencontainers/runc/libcontainer/init_linux.go\r\n#" - ], - [ - 1.7e-05, - "\u001b[7Cmodified: vendor/github.com/opencontainers/runc/libcontainer/state_linux.go\r\n#\u001b[7Cnew file: vendor/github.com/vishvananda/netlink/LICENSE\r\n#\u001b[7Cnew file: vendor/github.com/vishvananda/netlink/README.md\r\n#\u001b[7Cnew file: vendor/github.com/vishvananda/netlink/addr.go\r\n#\u001b[7Cnew file: vendor/github.com/vishvananda/netlink/addr_linux.go\r\n#\u001b[7Cnew file: vendor/github.com/vishvananda/netlink/bpf_linux.go\r\n#\u001b[7Cnew file: vendor/github.com/vishvananda/netlink/bridge_linux.go\r\n#\u001b[7Cnew file: vendor/github.com/vishvananda/netlink/class.go\r\n#\u001b[7Cnew file: vendor/github.com/vishvananda/netlink/class_linux.go\u001b[1;1H\u001b[?12l\u001b[?25h" - ], - [ - 0.318498, - "\u001b[?25l\u001b[52;1H\u001b[K\u001b[52;1H:\u001b[?2004h" - ], - [ - 0.000184, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.194901, - "w" - ], - [ - 0.048866, - "q" - ], - [ - 0.079626, - "\r" - ], - [ - 6.7e-05, - "\u001b[?25l\u001b[?2004l" - ], - [ - 0.000842, - "\".git/COMMIT_EDITMSG\"" - ], - [ - 0.018643, - " 107L, 6228C written" - ], - [ - 0.000239, - "\r\r\r\n\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.012196, - "[kpod-stats 51fe5a83] add kpod stats function\r\n Date: Tue Jul 25 09:56:23 2017 -0400\r\n" - ], - [ - 0.005388, - " 93 files changed, 15781 insertions(+), 1249 deletions(-)\r\n create mode 100644 cmd/kpod/stats.go\r\n create mode 100644 docs/kpod-stats.1.md\r\n create mode 100644 libkpod/stats.go\r\n create mode 100644 test/kpod_stats.bats\r\n delete mode 100644 vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go\r\n create mode 100644 vendor/github.com/buger/goterm/README.md\r\n create mode 100644 vendor/github.com/buger/goterm/box.go\r\n create mode 100644 vendor/github.com/buger/goterm/plot.go\r\n create mode 100644 vendor/github.com/buger/goterm/table.go\r\n create mode 100644 vendor/github.com/buger/goterm/terminal.go\r\n create mode 100644 vendor/github.com/buger/goterm/terminal_nosysioctl.go\r\n create mode 100644 vendor/github.com/buger/goterm/terminal_sysioctl.go\r\n delete mode 100644 vendor/github.com/containers/storage/pkg/archive/example_changes.go\r\n create mode 100644 vendor/github.com/mrunalp/fileutils/LICENSE\r\n create mode 100644 vendor/github.com/mrunalp/fileutils/README.md\r\n create mode 100644 vendor/github.com/mrunalp/fileu" - ], - [ - 3.1e-05, - "tils/fileutils.go\r\n create mode 100644 vendor/github.com/mrunalp/fileutils/idtools.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/LICENSE\r\n create mode 100644 vendor/github.com/vishvananda/netlink/README.md\r\n create mode 100644 vendor/github.com/vishvananda/netlink/addr.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/addr_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/bpf_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/bridge_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/class.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/class_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/conntrack_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/conntrack_unspecified.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/filter.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/filter_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/genetlin" - ], - [ - 1.9e-05, - "k_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/genetlink_unspecified.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/gtp_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/handle_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/handle_unspecified.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/link.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/link_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/link_tuntap_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/neigh.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/neigh_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/netlink.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/netlink_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/netlink_unspecified.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/addr_linux.go\r\n create mode 100644 vendor/github." - ], - [ - 1.7e-05, - "com/vishvananda/netlink/nl/bridge_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/genetlink_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/link_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/mpls_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/nl_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/nl_unspecified.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/route_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/syscall.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/tc_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/xfrm_monitor_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go\r\n" - ], - [ - 1.6e-05, - " create mode 100644 vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/order.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/protinfo.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/protinfo_linux.go\r\n" - ], - [ - 1.9e-05, - " create mode 100644 vendor/github.com/vishvananda/netlink/qdisc.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/qdisc_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/route.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/route_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/route_unspecified.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/rule.go\r\n" - ], - [ - 1.5e-05, - " create mode 100644 vendor/github.com/vishvananda/netlink/rule_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/socket.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/socket_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/xfrm.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_policy.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go\r\n" - ], - [ - 1.3e-05, - " create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_state.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_state_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netns/LICENSE\r\n create mode 100644 vendor/github.com/vishvananda/netns/README.md\r\n create mode 100644 vendor/github.com/vishvananda/netns/netns.go\r\n create mode 100644 vendor/github.com/vishvananda/netns/netns_linux.go" - ], - [ - 1.3e-05, - "\r\n create mode 100644 vendor/github.com/vishvananda/netns/netns_unspecified.go\r\n" - ], - [ - 0.00046, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.023729, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001064, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 7.6e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.9e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 7.5e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.4e-05, - "\u001b[?1h\u001b=" - ], - [ - 1.7e-05, - "\u001b[?2004h" - ], - [ - 0.093363, - "g" - ], - [ - 0.116444, - "\bgi" - ], - [ - 0.09112, - "t" - ], - [ - 0.140489, - " " - ], - [ - 0.295456, - "\b" - ], - [ - 0.148558, - "\b \b" - ], - [ - 0.151794, - "\b\bg \b" - ], - [ - 0.143687, - "\b \b" - ], - [ - 0.1717, - "m" - ], - [ - 0.113167, - "\bma" - ], - [ - 0.071539, - "k" - ], - [ - 0.151864, - "e" - ], - [ - 0.068538, - " " - ], - [ - 0.051546, - "k" - ], - [ - 0.136138, - "p" - ], - [ - 0.087532, - "o" - ], - [ - 0.136608, - "d" - ], - [ - 0.107472, - "\u001b[?1l\u001b>" - ], - [ - 0.000279, - "\u001b[?2004l\r\r\n" - ], - [ - 0.003576, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 7.566279, - "go build -ldflags '-X main.gitCommit=51fe5a83 -X main.buildInfo=1502973930' -tags \"selinux seccomp \" -o kpod github.com/kubernetes-incubator/cri-o/cmd/kpod\r\n" - ], - [ - 5.528396, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.025808, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats \u001b[39m \u001b[33m13s\u001b[39m\r\n" - ], - [ - 0.002211, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 64.301943, - "g" - ], - [ - 0.072172, - "\bgi" - ], - [ - 0.104359, - "t" - ], - [ - 0.079837, - " " - ], - [ - 0.080135, - "p" - ], - [ - 0.055427, - "u" - ], - [ - 0.119911, - "s" - ], - [ - 0.104008, - "h" - ], - [ - 0.111595, - " " - ], - [ - 0.112396, - "-" - ], - [ - 0.112504, - "f" - ], - [ - 0.143522, - " " - ], - [ - 0.191507, - "o" - ], - [ - 0.120131, - "r" - ], - [ - 0.096379, - "i" - ], - [ - 0.127918, - "g" - ], - [ - 0.063437, - "i" - ], - [ - 0.056858, - "n" - ], - [ - 0.104321, - " " - ], - [ - 0.095939, - "k" - ], - [ - 0.192254, - "pod-" - ], - [ - 0.303579, - "s" - ], - [ - 0.088135, - "t" - ], - [ - 0.179185, - "a" - ], - [ - 0.580721, - "t" - ], - [ - 0.182035, - "s\u001b[1m \u001b[0m" - ], - [ - 0.393146, - "\b\u001b[0m \b" - ], - [ - 0.000296, - "\u001b[?1l\u001b>\u001b[?2004l\r\r\n" - ], - [ - 0.003558, - "\u001b]2;git push -f origin kpod-stats\u0007\u001b]1;git\u0007" - ], - [ - 0.735535, - "Counting objects: 120, done.\r\n" - ], - [ - 0.001739, - "Delta compression using up to 4 threads.\r\n" - ], - [ - 0.000228, - "Compressing objects: 0% (1/105) \r" - ], - [ - 0.000124, - "Compressing objects: 1% (2/105) \rCompressing objects: 2% (3/105) \r" - ], - [ - 4.3e-05, - "Compressing objects: 3% (4/105) \rCompressing objects: 4% (5/105) \rCompressing objects: 5% (6/105) \rCompressing objects: 6% (7/105) \r" - ], - [ - 2e-05, - "Compressing objects: 7% (8/105) \rCompressing objects: 8% (9/105) \rCompressing objects: 9% (10/105) \r" - ], - [ - 4.8e-05, - "Compressing objects: 10% (11/105) \rCompressing objects: 11% (12/105) \rCompressing objects: 12% (13/105) \r" - ], - [ - 0.000331, - "Compressing objects: 13% (14/105) \r" - ], - [ - 4.3e-05, - "Compressing objects: 14% (15/105) \r" - ], - [ - 4e-05, - "Compressing objects: 15% (16/105) \rCompressing objects: 16% (17/105) \r" - ], - [ - 1.5e-05, - "Compressing objects: 17% (18/105) \r" - ], - [ - 5.3e-05, - "Compressing objects: 18% (19/105) \rCompressing objects: 19% (20/105) \r" - ], - [ - 2e-05, - "Compressing objects: 20% (21/105) \r" - ], - [ - 0.000375, - "Compressing objects: 21% (23/105) \r" - ], - [ - 2e-05, - "Compressing objects: 22% (24/105) \r" - ], - [ - 4.8e-05, - "Compressing objects: 23% (25/105) \r" - ], - [ - 3.7e-05, - "Compressing objects: 24% (26/105) \r" - ], - [ - 3.7e-05, - "Compressing objects: 25% (27/105) \r" - ], - [ - 7.7e-05, - "Compressing objects: 26% (28/105) \r" - ], - [ - 1.7e-05, - "Compressing objects: 27% (29/105) \rCompressing objects: 28% (30/105) \r" - ], - [ - 3.7e-05, - "Compressing objects: 29% (31/105) \r" - ], - [ - 7.1e-05, - "Compressing objects: 30% (32/105) \rCompressing objects: 31% (33/105) \rCompressing objects: 32% (34/105) \r" - ], - [ - 4.1e-05, - "Compressing objects: 33% (35/105) \rCompressing objects: 34% (36/105) \r" - ], - [ - 6.3e-05, - "Compressing objects: 35% (37/105) \r" - ], - [ - 0.00012, - "Compressing objects: 36% (38/105) \r" - ], - [ - 0.000158, - "Compressing objects: 37% (39/105) \r" - ], - [ - 1.2e-05, - "Compressing objects: 38% (40/105) \r" - ], - [ - 0.000264, - "Compressing objects: 39% (41/105) \r" - ], - [ - 3.5e-05, - "Compressing objects: 40% (42/105) \r" - ], - [ - 0.000137, - "Compressing objects: 41% (44/105) \r" - ], - [ - 8.9e-05, - "Compressing objects: 42% (45/105) \r" - ], - [ - 8.5e-05, - "Compressing objects: 43% (46/105) \r" - ], - [ - 1e-05, - "Compressing objects: 44% (47/105) \r" - ], - [ - 6.8e-05, - "Compressing objects: 45% (48/105) \r" - ], - [ - 1.4e-05, - "Compressing objects: 46% (49/105) \r" - ], - [ - 0.000117, - "Compressing objects: 47% (50/105) \r" - ], - [ - 3.1e-05, - "Compressing objects: 48% (51/105) \r" - ], - [ - 3.1e-05, - "Compressing objects: 49% (52/105) \rCompressing objects: 50% (53/105) \r" - ], - [ - 4e-05, - "Compressing objects: 51% (54/105) \r" - ], - [ - 6.3e-05, - "Compressing objects: 52% (55/105) \r" - ], - [ - 9.2e-05, - "Compressing objects: 53% (56/105) \r" - ], - [ - 4.3e-05, - "Compressing objects: 54% (57/105) \r" - ], - [ - 7.9e-05, - "Compressing objects: 55% (58/105) \r" - ], - [ - 4.1e-05, - "Compressing objects: 56% (59/105) \r" - ], - [ - 6.3e-05, - "Compressing objects: 57% (60/105) \r" - ], - [ - 5e-05, - "Compressing objects: 58% (61/105) \r" - ], - [ - 0.000414, - "Compressing objects: 59% (62/105) \r" - ], - [ - 7.8e-05, - "Compressing objects: 60% (63/105) \r" - ], - [ - 5.6e-05, - "Compressing objects: 61% (65/105) \r" - ], - [ - 0.000113, - "Compressing objects: 62% (66/105) \r" - ], - [ - 9.4e-05, - "Compressing objects: 63% (67/105) \r" - ], - [ - 0.000101, - "Compressing objects: 64% (68/105) \r" - ], - [ - 8.3e-05, - "Compressing objects: 65% (69/105) \r" - ], - [ - 3.4e-05, - "Compressing objects: 66% (70/105) \r" - ], - [ - 9.2e-05, - "Compressing objects: 67% (71/105) \r" - ], - [ - 0.000266, - "Compressing objects: 68% (72/105) \r" - ], - [ - 2.2e-05, - "Compressing objects: 69% (73/105) \rCompressing objects: 70% (74/105) \rCompressing objects: 71% (75/105) \r" - ], - [ - 4.2e-05, - "Compressing objects: 72% (76/105) \r" - ], - [ - 3e-05, - "Compressing objects: 73% (77/105) \rCompressing objects: 74% (78/105) \r" - ], - [ - 0.000171, - "Compressing objects: 75% (79/105) \r" - ], - [ - 6.2e-05, - "Compressing objects: 76% (80/105) \r" - ], - [ - 2.4e-05, - "Compressing objects: 77% (81/105) \r" - ], - [ - 6.7e-05, - "Compressing objects: 78% (82/105) \rCompressing objects: 79% (83/105) \r" - ], - [ - 0.000233, - "Compressing objects: 80% (84/105) \r" - ], - [ - 0.00016, - "Compressing objects: 81% (86/105) \r" - ], - [ - 0.000135, - "Compressing objects: 82% (87/105) \r" - ], - [ - 0.000101, - "Compressing objects: 83% (88/105) \rCompressing objects: 84% (89/105) \r" - ], - [ - 2.1e-05, - "Compressing objects: 85% (90/105) \r" - ], - [ - 1.3e-05, - "Compressing objects: 86% (91/105) \r" - ], - [ - 0.000128, - "Compressing objects: 87% (92/105) \r" - ], - [ - 3.5e-05, - "Compressing objects: 88% (93/105) \r" - ], - [ - 0.000118, - "Compressing objects: 89% (94/105) \r" - ], - [ - 5.7e-05, - "Compressing objects: 90% (95/105) \rCompressing objects: 91% (96/105) \r" - ], - [ - 0.000429, - "Compressing objects: 92% (97/105) \rCompressing objects: 93% (98/105) \rCompressing objects: 94% (99/105) \r" - ], - [ - 1.5e-05, - "Compressing objects: 95% (100/105) \rCompressing objects: 96% (101/105) \rCompressing objects: 97% (102/105) \r" - ], - [ - 0.000944, - "Compressing objects: 98% (103/105) \r" - ], - [ - 0.000609, - "Compressing objects: 99% (104/105) \r" - ], - [ - 0.000117, - "Compressing objects: 100% (105/105) \r" - ], - [ - 7.5e-05, - "Compressing objects: 100% (105/105), done.\r\n" - ], - [ - 0.000147, - "Writing objects: 0% (1/120) \r" - ], - [ - 4.5e-05, - "Writing objects: 1% (2/120) \r" - ], - [ - 2.2e-05, - "Writing objects: 2% (3/120) \r" - ], - [ - 1.6e-05, - "Writing objects: 3% (4/120) \r" - ], - [ - 2.2e-05, - "Writing objects: 4% (5/120) \r" - ], - [ - 1.2e-05, - "Writing objects: 5% (6/120) \r" - ], - [ - 0.000344, - "Writing objects: 6% (8/120) \r" - ], - [ - 0.000106, - "Writing objects: 7% (9/120) \r" - ], - [ - 3.4e-05, - "Writing objects: 8% (10/120) \r" - ], - [ - 3.1e-05, - "Writing objects: 9% (11/120) \r" - ], - [ - 2.3e-05, - "Writing objects: 10% (12/120) \r" - ], - [ - 3.4e-05, - "Writing objects: 11% (14/120) \r" - ], - [ - 0.000662, - "Writing objects: 12% (15/120) \r" - ], - [ - 3.1e-05, - "Writing objects: 13% (16/120) \rWriting objects: 14% (17/120) \r" - ], - [ - 0.000202, - "Writing objects: 15% (18/120) \r" - ], - [ - 4.8e-05, - "Writing objects: 16% (20/120) \r" - ], - [ - 1.7e-05, - "Writing objects: 17% (21/120) \rWriting objects: 18% (22/120) \r" - ], - [ - 2.2e-05, - "Writing objects: 19% (23/120) \r" - ], - [ - 1.1e-05, - "Writing objects: 20% (24/120) \r" - ], - [ - 4.3e-05, - "Writing objects: 21% (26/120) \rWriting objects: 22% (27/120) \r" - ], - [ - 1.1e-05, - "Writing objects: 23% (28/120) \rWriting objects: 24% (29/120) \r" - ], - [ - 7.8e-05, - "Writing objects: 25% (30/120) \r" - ], - [ - 2.3e-05, - "Writing objects: 26% (32/120) \rWriting objects: 27% (33/120) \rWriting objects: 28% (34/120) \rWriting objects: 29% (35/120) \rWriting objects: 30% (36/120) \rWriting objects: 31% (38/120) \rWriting objects: 32% (39/120) \rWriting objects: 33% (40/120) \rWriting objects: 34% (41/120) \r" - ], - [ - 3.9e-05, - "Writing objects: 35% (42/120) \r" - ], - [ - 7.2e-05, - "Writing objects: 36% (44/120) \rWriting objects: 37% (45/120) \r" - ], - [ - 1.9e-05, - "Writing objects: 38% (46/120) \r" - ], - [ - 1.8e-05, - "Writing objects: 39% (47/120) \r" - ], - [ - 2.5e-05, - "Writing objects: 40% (48/120) \rWriting objects: 41% (50/120) \rWriting objects: 42% (51/120) \rWriting objects: 43% (52/120) \r" - ], - [ - 1e-05, - "Writing objects: 44% (53/120) \rWriting objects: 45% (54/120) \r" - ], - [ - 7.1e-05, - "Writing objects: 46% (56/120) \r" - ], - [ - 0.000534, - "Writing objects: 47% (57/120) \r" - ], - [ - 9.6e-05, - "Writing objects: 48% (58/120) \r" - ], - [ - 0.000193, - "Writing objects: 49% (59/120) \rWriting objects: 50% (60/120) \rWriting objects: 51% (62/120) \r" - ], - [ - 1.3e-05, - "Writing objects: 52% (63/120) \rWriting objects: 53% (64/120) \r" - ], - [ - 6e-05, - "Writing objects: 54% (65/120) \rWriting objects: 55% (66/120) \rWriting objects: 56% (68/120) \r" - ], - [ - 7e-05, - "Writing objects: 57% (69/120) \rWriting objects: 58% (70/120) \rWriting objects: 59% (71/120) \rWriting objects: 60% (72/120) \r" - ], - [ - 0.000191, - "Writing objects: 61% (74/120) \r" - ], - [ - 6.2e-05, - "Writing objects: 62% (75/120) \rWriting objects: 63% (76/120) \rWriting objects: 64% (77/120) \r" - ], - [ - 0.00023, - "Writing objects: 65% (78/120) \r" - ], - [ - 8.5e-05, - "Writing objects: 66% (80/120) \rWriting objects: 67% (81/120) \rWriting objects: 68% (82/120) \r" - ], - [ - 0.000103, - "Writing objects: 69% (83/120) \rWriting objects: 70% (84/120) \r" - ], - [ - 0.000194, - "Writing objects: 71% (86/120) \rWriting objects: 72% (87/120) \rWriting objects: 73% (88/120) \r" - ], - [ - 0.000514, - "Writing objects: 74% (89/120) \rWriting objects: 75% (90/120) \r" - ], - [ - 2.9e-05, - "Writing objects: 76% (92/120) \r" - ], - [ - 0.000844, - "Writing objects: 77% (93/120) \r" - ], - [ - 3.5e-05, - "Writing objects: 78% (94/120) \rWriting objects: 79% (95/120) \rWriting objects: 80% (96/120) \r" - ], - [ - 9.9e-05, - "Writing objects: 81% (98/120) \r" - ], - [ - 0.000116, - "Writing objects: 82% (99/120) \r" - ], - [ - 4.6e-05, - "Writing objects: 83% (100/120) \r" - ], - [ - 0.000493, - "Writing objects: 84% (101/120) \rWriting objects: 85% (102/120) \rWriting objects: 86% (104/120) \rWriting objects: 87% (105/120) \rWriting objects: 88% (106/120) \rWriting objects: 89% (107/120) \rWriting objects: 90% (108/120) \r" - ], - [ - 0.020626, - "Writing objects: 91% (110/120) \rWriting objects: 92% (111/120) \rWriting objects: 93% (112/120) \rWriting objects: 94% (113/120) \rWriting objects: 95% (114/120) \rWriting objects: 96% (116/120) \rWriting objects: 97% (117/120) \rWriting objects: 98% (118/120) \rWriting objects: 99% (119/120) \rWriting objects: 100% (120/120) \r" - ], - [ - 0.000125, - "Writing objects: 100% (120/120), 116.02 KiB | 4.30 MiB/s, done.\r\nTotal 120 (delta 31), reused 78 (delta 11)\r\n" - ], - [ - 0.145058, - "remote: Resolving deltas: 0% (0/31) \u001b[K\r" - ], - [ - 0.038865, - "remote: Resolving deltas: 3% (1/31) \u001b[K\rremote: Resolving deltas: 6% (2/31) \u001b[K\rremote: Resolving deltas: 9% (3/31) \u001b[K\r" - ], - [ - 0.000148, - "remote: Resolving deltas: 12% (4/31) \u001b[K\rremote: Resolving deltas: 16% (5/31) \u001b[K\r" - ], - [ - 4.8e-05, - "remote: Resolving deltas: 19% (6/31) \u001b[K\rremote: Resolving deltas: 22% (7/31) \u001b[K\rremote: Resolving deltas: 25% (8/31) \u001b[K\r" - ], - [ - 9.1e-05, - "remote: Resolving deltas: 29% (9/31) \u001b[K\rremote: Resolving deltas: 32% (10/31) \u001b[K\rremote: Resolving deltas: 35% (11/31) \u001b[K\rremote: Resolving deltas: 38% (12/31) \u001b[K\rremote: Resolving deltas: 41% (13/31) \u001b[K\rremote: Resolving deltas: 45% (14/31) \u001b[K\r" - ], - [ - 3.7e-05, - "remote: Resolving deltas: 48% (15/31) \u001b[K\rremote: Resolving deltas: 51% (16/31) \u001b[K\rremote: Resolving deltas: 54% (17/31) \u001b[K\rremote: Resolving deltas: 58% (18/31) \u001b[K\rremote: Resolving deltas: 61% (19/31) \u001b[K\rremote: Resolving deltas: 64% (20/31) \u001b[K\rremote: Resolving deltas: 67% (21/31) \u001b[K\r" - ], - [ - 0.000346, - "remote: Resolving deltas: 70% (22/31) \u001b[K\rremote: Resolving deltas: 74% (23/31) \u001b[K\rremote: Resolving deltas: 77% (24/31) \u001b[K\rremote: Resolving deltas: 80% (25/31) \u001b[K\rremote: Resolving deltas: 83% (26/31) \u001b[K\rremote: Resolving deltas: 87% (27/31) \u001b[K\rremote: Resolving deltas: 90% (28/31) \u001b[K\rremote: Resolving deltas: 93% (29/31) \u001b[K\rremote: Resolving deltas: 96% (30/31) \u001b[K\rremote: Resolving deltas: 100% (31/31) \u001b[K\rremote: Resolving deltas: 100% (31/31), completed with 30 local objects.\u001b[K\r\n" - ], - [ - 1.631427, - "To github.com:14rcole/cri-o\r\n + 1fd05c35...51fe5a83 kpod-stats -> kpod-stats (forced update)\r\n" - ], - [ - 0.001344, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.027627, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001274, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 9.1e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.9e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000387, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=" - ], - [ - 2.1e-05, - "\u001b[?2004h" - ], - [ - 291.306849, - "s" - ], - [ - 0.143831, - "\bsu" - ], - [ - 0.09576, - "d" - ], - [ - 0.079837, - "o" - ], - [ - 0.056313, - " " - ], - [ - 0.09608, - "d" - ], - [ - 0.071913, - "o" - ], - [ - 0.175697, - "k" - ], - [ - 0.11164, - "c" - ], - [ - 0.344681, - "\b \b" - ], - [ - 0.143002, - "\b \b" - ], - [ - 0.353439, - "c" - ], - [ - 0.087928, - "k" - ], - [ - 0.127562, - "e" - ], - [ - 0.064111, - "r" - ], - [ - 0.095364, - " " - ], - [ - 0.103965, - "p" - ], - [ - 0.153227, - "s" - ], - [ - 0.119176, - "\u001b[?1l\u001b>" - ], - [ - 0.000192, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004251, - "\u001b]2;sudo docker ps\u0007\u001b]1;docker\u0007" - ], - [ - 0.951818, - "[sudo] password for ryan: " - ], - [ - 2.123737, - "\r\n" - ], - [ - 0.050254, - "CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\r\n" - ], - [ - 0.00261, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.027572, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.000953, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000219, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.6e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 6.4e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.4e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.9e-05, - "\u001b[?2004h" - ], - [ - 1.758608, - "s" - ], - [ - 0.079496, - "\bsu" - ], - [ - 0.160361, - "d" - ], - [ - 0.111675, - "o" - ], - [ - 0.576293, - "\b \b" - ], - [ - 0.159908, - "\b \b" - ], - [ - 0.168126, - "\b\bs \b" - ], - [ - 0.159654, - "\b \b" - ], - [ - 0.11212, - "s" - ], - [ - 0.127338, - "\bsu" - ], - [ - 0.080917, - "d" - ], - [ - 0.215883, - " " - ], - [ - 0.103841, - "o" - ], - [ - 0.256261, - "\b \b" - ], - [ - 0.136045, - "\b" - ], - [ - 0.191849, - "o" - ], - [ - 0.143304, - " " - ], - [ - 0.0082, - "d" - ], - [ - 0.096655, - "o" - ], - [ - 0.127223, - "c" - ], - [ - 0.08778, - "k" - ], - [ - 0.072448, - "e" - ], - [ - 0.064418, - "r" - ], - [ - 0.08791, - " " - ], - [ - 0.10463, - "r" - ], - [ - 0.111928, - "u" - ], - [ - 0.175632, - "n" - ], - [ - 0.127848, - " " - ], - [ - 0.401299, - "=" - ], - [ - 0.358516, - "\b \b" - ], - [ - 0.119353, - "-" - ], - [ - 0.128809, - "d" - ], - [ - 0.208134, - " " - ], - [ - 0.57505, - "r" - ], - [ - 0.13708, - "y" - ], - [ - 0.295931, - "\b \b" - ], - [ - 0.151706, - "\b \b" - ], - [ - 0.128648, - "r" - ], - [ - 0.087732, - "y" - ], - [ - 0.128014, - "a" - ], - [ - 0.223742, - "\b \b" - ], - [ - 0.143667, - "\b \b" - ], - [ - 0.151936, - "\b \b" - ], - [ - 0.688079, - "r" - ], - [ - 0.080579, - "e" - ], - [ - 0.151405, - "d" - ], - [ - 0.08859, - "i" - ], - [ - 0.127706, - "s" - ], - [ - 0.223311, - ":" - ], - [ - 0.225389, - "a" - ], - [ - 0.119256, - "l" - ], - [ - 0.167568, - "p" - ], - [ - 0.136311, - "i" - ], - [ - 0.064759, - "n" - ], - [ - 0.519447, - "e" - ], - [ - 2.207743, - "\u001b[?1l\u001b>" - ], - [ - 7.9e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004381, - "\u001b]2;sudo docker run -d redis:alpine\u0007\u001b]1;docker\u0007" - ], - [ - 0.156868, - "7e7a6dcecb2a803420db5e51e50289160869d387d5fe002c1f968c9c5e0aff47\r\n" - ], - [ - 0.331473, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.02536, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001087, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000141, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.9e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8.7e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 8.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 0.00014, - "\u001b[?2004h" - ], - [ - 1.408725, - "s" - ], - [ - 0.072298, - "\bsu" - ], - [ - 0.120191, - "d" - ], - [ - 0.070475, - "o" - ], - [ - 0.104883, - " " - ], - [ - 0.224668, - "d" - ], - [ - 0.140195, - "o" - ], - [ - 0.083514, - "c" - ], - [ - 0.087764, - "k" - ], - [ - 0.159229, - "e" - ], - [ - 0.032016, - "r" - ], - [ - 0.145043, - " " - ], - [ - 0.079616, - "p" - ], - [ - 0.17546, - "s" - ], - [ - 0.095711, - "\u001b[?1l\u001b>" - ], - [ - 3.8e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.002462, - "\u001b]2;sudo docker ps\u0007\u001b]1;docker\u0007" - ], - [ - 0.040773, - "CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\r\n7e7a6dcecb2a " - ], - [ - 2.8e-05, - "redis:alpine \"docker-entrypoint...\" 4 seconds ago Up 3 seconds 6379/tcp angry_sammet\r\n" - ], - [ - 0.002288, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.023578, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.00174, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.00025, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 5.7e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000111, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 2.5e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 5.5e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.7e-05, - "\u001b[?2004h" - ], - [ - 2.400936, - "s" - ], - [ - 0.152528, - "\bsu" - ], - [ - 0.11187, - "d" - ], - [ - 0.104125, - "o" - ], - [ - 0.095533, - " " - ], - [ - 0.048573, - "d" - ], - [ - 0.103791, - "o" - ], - [ - 0.280474, - "c" - ], - [ - 0.375565, - "k" - ], - [ - 0.096108, - "e" - ], - [ - 0.07174, - "r" - ], - [ - 0.143903, - " " - ], - [ - 0.088133, - "p" - ], - [ - 0.136279, - "s" - ], - [ - 0.087682, - " " - ], - [ - 0.128367, - "-" - ], - [ - 0.095693, - "-" - ], - [ - 0.199987, - "n" - ], - [ - 0.095887, - "o" - ], - [ - 0.231909, - "-" - ], - [ - 0.192349, - "t" - ], - [ - 0.144555, - "r" - ], - [ - 0.079249, - "u" - ], - [ - 0.167991, - "n" - ], - [ - 0.104447, - "c" - ], - [ - 0.103816, - "\u001b[?1l\u001b>" - ], - [ - 0.000465, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004264, - "\u001b]2;sudo docker ps --no-trunc\u0007\u001b]1;docker\u0007" - ], - [ - 0.037936, - "CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\r\n7e7a6dcecb2a803420db5e51e50289160869d387d5fe002c1f968c9c5e0aff47 redis:alpine \"docker-entrypoint.sh redis-server\" 10 seconds ago Up 8 seconds 6379/tcp angry_sammet\r\n" - ], - [ - 0.003831, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.025231, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001942, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000107, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000109, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 8.2e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.2e-05, - "\u001b[?2004h" - ], - [ - 266.219062, - "g" - ], - [ - 0.16768, - "\bgi" - ], - [ - 0.095185, - "t" - ], - [ - 0.088157, - " " - ], - [ - 0.14361, - "c" - ], - [ - 0.072124, - "h" - ], - [ - 0.080042, - "e" - ], - [ - 0.08789, - "c" - ], - [ - 0.161802, - "k" - ], - [ - 0.438173, - "o" - ], - [ - 0.122346, - "ut" - ], - [ - 0.445633, - " " - ], - [ - 0.145215, - "k" - ], - [ - 0.171124, - "pod-" - ], - [ - 0.332988, - "t" - ], - [ - 0.282934, - "est-refactor\u001b[1m \u001b[0m" - ], - [ - 5.748889, - "\b\u001b[0m \b" - ], - [ - 0.000105, - "\u001b[?1l\u001b>" - ], - [ - 0.000463, - "\u001b[?2004l\r\r\n" - ], - [ - 0.005657, - "\u001b]2;git checkout kpod-test-refactor\u0007\u001b]1;git\u0007" - ], - [ - 0.037888, - "Switched to branch 'kpod-test-refactor'\r\n" - ], - [ - 0.001679, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.035158, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-test-refactor \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.002249, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 7.2e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 7e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000272, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.4e-05, - "\u001b[?1h\u001b=" - ], - [ - 1.9e-05, - "\u001b[?2004h" - ], - [ - 49.127527, - "g" - ], - [ - 0.324522, - "\b \b" - ], - [ - 0.279967, - "v" - ], - [ - 0.192132, - "\bv " - ], - [ - 0.07975, - "t" - ], - [ - 0.07264, - "e" - ], - [ - 0.164523, - "st\u001b[1m/\u001b[0m" - ], - [ - 0.330897, - "\b\u001b[0m \b" - ], - [ - 0.136168, - "\b \b" - ], - [ - 0.152143, - "\b \b" - ], - [ - 0.500013, - "\b \b" - ], - [ - 0.030003, - "\b \b" - ], - [ - 0.029676, - "\b" - ], - [ - 0.028834, - "\b \b" - ], - [ - 0.171506, - "v" - ], - [ - 0.119953, - "\bvi" - ], - [ - 0.111174, - " " - ], - [ - 0.080497, - "t" - ], - [ - 0.064222, - "e" - ], - [ - 0.174426, - "st\u001b[1m/\u001b[0m" - ], - [ - 0.314489, - "\b\u001b[0m/k" - ], - [ - 0.079352, - "pod_" - ], - [ - 0.695495, - "p" - ], - [ - 0.0322, - "u" - ], - [ - 0.141247, - "\u0007" - ], - [ - 0.000165, - "\r\r\n" - ], - [ - 5.7e-05, - "\u001b[J\u001b[0mkpod_pull.bats \u001b[Jkpod_push.bats\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\u001b[39m\r\u001b[2Cvi test/kpod_pu\u001b[K\u001b[193C\u001b[90m\u001b[39m\u001b[39m\u001b[193D" - ], - [ - 0.577866, - "s" - ], - [ - 0.192123, - "h.bats\u001b[1m \u001b[0m" - ], - [ - 2.609562, - "\b\u001b[0m \b\u001b[?1l\u001b>\u001b[?2004l\r\r\n\u001b[J" - ], - [ - 0.005239, - "\u001b]2;vim test/kpod_push.bats\u0007\u001b]1;vi\u0007" - ], - [ - 0.142495, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000713, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"test/kpod_push.bats\"" - ], - [ - 9.6e-05, - " 87L, 2371C" - ], - [ - 0.003372, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.000718, - "\u001b[1;1H\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m rm -rf /tmp/busybox\r\n\u001b[96m\u001b[47m 41 \u001b[m\u001b[93m\u001b[107m stop_crio\r\n\u001b[96m\u001b[47m 42 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 43 \r\n 44 \u001b[m\u001b[93m\u001b[107m@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 50 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 51 \u001b[m\u001b[93m\u001b[107m rm /tmp/busybox-archive\r\n\u001b[96m\u001b[47m 52 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPO" - ], - [ - 2.1e-05, - "D_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 53 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 54 \u001b[m\u001b[93m\u001b[107m stop_crio\r\n\u001b[96m\u001b[47m 55 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 56 \r\n 57 \u001b[m\u001b[93m\u001b[107m@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 58 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 59 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 60 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 61 \u001b[m\u001b[93m\u001b[107m run mkdir /tmp/oci-busybox\r\n\u001b[96m\u001b[47m 62 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 63 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 64 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS push --disable-compression \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m oci:/tmp/oci-busybox\r\n\u001b[96m\u001b[47m 65 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 66 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n" - ], - [ - 0.033108, - "\u001b[96m\u001b[47m 67 \u001b[m\u001b[93m\u001b[107m rm -rf /tmp/oci-busybox\r\n\u001b[96m\u001b[47m 68 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 69 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 70 \u001b[m\u001b[93m\u001b[107m stop_crio\r\n\u001b[96m\u001b[47m 71 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 72 \r\n 73 \u001b[m\u001b[93m\u001b[107m@test \u001b[36m\"kpod push without signatures\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 74 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 75 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 76 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 77 \u001b[m\u001b[93m\u001b[107m run mkdir /tmp/busybox\r\n\u001b[96m\u001b[47m 78 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 79 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 80 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS push --remove-signatures \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\r\n\u001b[96m\u001b[47m 81 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[" - ], - [ - 3.3e-05, - "107m\r\n\u001b[96m\u001b[47m 82 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 83 \u001b[m\u001b[93m\u001b[107m rm -rf /tmp/busybox\r\n\u001b[96m\u001b[47m 84 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 85 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 86 \u001b[m\u001b[93m\u001b[107m stop_crio\r\n\u001b[96m\u001b[47m 87 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H kpod-test-refactor \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;33H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mtest/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mkpod_push.bats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;55H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                      \u001b[m\u001b[93m\u001b[107m" - ], - [ - 0.010668, - "\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;180H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;188H\u001b[38;5;247m\u001b[48;5;236m conf\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;247m\u001b[48;5;240m  97%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 84\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5  \u001b[47;9H\u001b[?12l\u001b[?25h\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 3.900157, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;2H\u001b[96m\u001b[47m21\r\n 22\r\n 23\u001b[m\u001b[93m\u001b[107m\u001b[6Cun ${OCIC_BINARY} image remove busybox:test\u001b[4;2H\u001b[96m\u001b[47m24\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[1m\u001b[31m\u001b[106m[\u001b[m\u001b[93m\u001b[107m \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 \u001b[1m\u001b[31m\u001b[106m]\u001b[m\u001b[93m\u001b[107m\u001b[5;2H\u001b[96m\u001b[47m25\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[6;2H\u001b[96m\u001b[47m26\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[7;2H\u001b[96m\u001b[47m27\u001b[m\u001b[93m\u001b[107m\u001b[7;5H\u001b[K\u001b[8;2H\u001b[96m\u001b[47m28\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[8;37H\u001b[K\u001b[9;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[10;2H\u001b[96m\u001b[47m30\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[10;23H\u001b[K\u001b[11;2H\u001b[96m\u001b[47m31\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[11;28H\u001b[K\u001b[12;2H\u001b[96m\u001b[47m32\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[13;2H\u001b[96m\u001b[47m33\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[13;23H\u001b[K\u001b[14;2H\u001b[96m\u001b[47m34\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[14;28H\u001b[K\u001b[15;2H\u001b[96m\u001b[47m35\u001b[m\u001b[93m\u001b[107m\u001b[38Cpush \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b" - ], - [ - 6e-05, - "[107m dir:/tmp/busybox\u001b[16;2H\u001b[96m\u001b[47m36\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[16;23H\u001b[K\u001b[17;2H\u001b[96m\u001b[47m37\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[18;2H\u001b[96m\u001b[47m38\u001b[m\u001b[93m\u001b[107m\u001b[1C run ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[19;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[20;2H\u001b[96m\u001b[47m40\u001b[m\u001b[93m\u001b[107m\u001b[1C rm -rf /tmp/busybox\u001b[20;28H\u001b[K\u001b[21;2H\u001b[96m\u001b[47m41\u001b[m\u001b[93m\u001b[107m\u001b[5Cstop_crio\u001b[21;18H\u001b[K\u001b[22;2H\u001b[96m\u001b[47m42\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[22;9H\u001b[K\u001b[23;2H\u001b[96m\u001b[47m43\u001b[m\u001b[93m\u001b[107m\u001b[23;9H\u001b[K\u001b[24;2H\u001b[96m\u001b[47m44\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[25;2H\u001b[96m\u001b[47m45\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[26;2H\u001b[96m\u001b[47m46\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[26;23H\u001b[K\u001b[27;2H\u001b[96m\u001b[47m47\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[27;28H\u001b[K\u001b[28;2H\u001b[96m\u001b[47m48\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push " - ], - [ - 0.016615, - "\u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[29;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[29;23H\u001b[K\u001b[30;2H\u001b[96m\u001b[47m50\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[30;28H\u001b[K\u001b[31;2H\u001b[96m\u001b[47m51\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[31;32H\u001b[K\u001b[32;2H\u001b[96m\u001b[47m52\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[33;2H\u001b[96m\u001b[47m53\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[34;2H\u001b[96m\u001b[47m54\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[35;2H\u001b[96m\u001b[47m55\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[36;2H\u001b[96m\u001b[47m56\u001b[m\u001b[93m\u001b[107m\u001b[36;5H\u001b[K\u001b[37;2H\u001b[96m\u001b[47m57\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\u001b[37;51H\u001b[K\u001b[38;2H\u001b[96m\u001b[47m58\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[39;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[39;23H\u001b[K\u001b[40;2H\u001b[96m\u001b[47m60\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[40;28H\u001b[K\u001b[41;2H\u001b[96m\u001b[47m61\u001b[m\u001b[93m\u001b[1" - ], - [ - 5.1e-05, - "07m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[42;2H\u001b[96m\u001b[47m62\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[42;23H\u001b[K\u001b[43;2H\u001b[96m\u001b[47m63\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[43;28H\u001b[K\u001b[44;2H\u001b[96m\u001b[47m64\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push --disable-compression \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m oci:/tmp/oci-busybox\u001b[45;2H\u001b[96m\u001b[47m65\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[45;23H\u001b[K\u001b[46;2H\u001b[96m\u001b[47m66\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[47;2H\u001b[96m\u001b[47m67\u001b[m\u001b[93m\u001b[107m\u001b[6Cm -rf /tmp/oci-busybox\u001b[47;32H\u001b[K\u001b[48;2H\u001b[96m\u001b[47m68\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[49;2H\u001b[96m\u001b[47m69\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[50;2H\u001b[96m\u001b[47m70\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  28%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m24\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 1.049793, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[1;28H\u001b[K\u001b[2;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[3;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[3;28H\u001b[K\u001b[4;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${OCIC_BINARY} image remove busybox:test\u001b[5;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[6;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[7;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[8;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[8;5H\u001b[K\u001b[9;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[9;37H\u001b[K\u001b[10;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[11;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[11;23H\u001b[K\u001b[12;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[12;28H\u001b[K\u001b[13;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[14;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b" - ], - [ - 0.000196, - "[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[14;23H\u001b[K\u001b[15;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[15;28H\u001b[K\u001b[16;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[17;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[17;23H\u001b[K\u001b[18;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[18;28H\u001b[K\u001b[19;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[20;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[21;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Crm -rf /tmp/busybox\u001b[22;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[23;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[24;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[24;5H\u001b[K\u001b[25;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[25;42H\u001b[K\u001b[26;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[27;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho " - ], - [ - 0.004649, - "\u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[27;23H\u001b[K\u001b[28;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[28;28H\u001b[K\u001b[29;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[30;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[30;23H\u001b[K\u001b[31;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[31;28H\u001b[K\u001b[32;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[32;32H\u001b[K\u001b[33;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[34;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[35;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[36;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[37;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[37;5H\u001b[K\u001b[38;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\u001b[38;51H\u001b[K\u001b[39;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[40;2H\u001b" - ], - [ - 3e-05, - "[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[40;23H\u001b[K\u001b[41;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[41;28H\u001b[K\u001b[42;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[43;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[43;23H\u001b[K\u001b[44;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[44;28H\u001b[K\u001b[45;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push --disable-compression \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m oci:/tmp/oci-busybox\u001b[46;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[46;23H\u001b[K\u001b[47;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[47;28H\u001b[K\u001b[48;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[6Cm -rf /tmp/oci-busybox\u001b[48;32H\u001b[K\u001b[49;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[50;2H\u001b[96m\u001b[47m69\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  26%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m" - ], - [ - 0.002097, - "\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.206758, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;2H\u001b[96m\u001b[47m19\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[1;23H\u001b[K\u001b[2;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[2;28H\u001b[K\u001b[3;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[4;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[1m\u001b[31m\u001b[106m[\u001b[m\u001b[93m\u001b[107m \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 \u001b[1m\u001b[31m\u001b[106m]\u001b[m\u001b[93m\u001b[107m\u001b[4;28H\u001b[K\u001b[5;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${OCIC_BINARY} image remove busybox:test\u001b[6;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[7;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[8;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[9;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[9;5H\u001b[K\u001b[10;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[10;37H\u001b[K\u001b[11;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[12;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[12;23H\u001b[K\u001b[13;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$s" - ], - [ - 5.7e-05, - "tatus\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[13;28H\u001b[K\u001b[14;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[15;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[15;23H\u001b[K\u001b[16;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[16;28H\u001b[K\u001b[17;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[18;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[18;23H\u001b[K\u001b[19;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[19;28H\u001b[K\u001b[20;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[21;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[22;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Crm -rf /tmp/busybox\u001b[23;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[24;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[25;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[25;5H\u001b[K\u001b[26;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[26;42H\u001b[K" - ], - [ - 0.006686, - "\u001b[27;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[28;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[28;23H\u001b[K\u001b[29;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[29;28H\u001b[K\u001b[30;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[31;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[31;23H\u001b[K\u001b[32;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[32;28H\u001b[K\u001b[33;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[33;32H\u001b[K\u001b[34;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[35;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[36;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[37;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[38;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5H\u001b[K\u001b[39;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compre" - ], - [ - 0.000129, - "ssion\"\u001b[m\u001b[93m\u001b[107m {\u001b[39;51H\u001b[K\u001b[40;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[41;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[41;23H\u001b[K\u001b[42;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[42;28H\u001b[K\u001b[43;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[44;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[44;23H\u001b[K\u001b[45;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[45;28H\u001b[K\u001b[46;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push --disable-compression \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m oci:/tmp/oci-busybox\u001b[47;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[47;23H\u001b[K\u001b[48;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[48;28H\u001b[K\u001b[49;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[6Cm -rf /tmp/oci-busybox\u001b[49;32H\u001b[K\u001b[50;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m" - ], - [ - 0.006076, - "\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  25%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.163845, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m containers-storage:[$ROOT]busybox:test\u001b[2;2H\u001b[96m\u001b[47m19\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[2;23H\u001b[K\u001b[3;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[3;28H\u001b[K\u001b[4;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[5;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[5;28H\u001b[K\u001b[6;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${OCIC_BINARY} image remove busybox:test\u001b[7;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[8;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[9;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[10;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[10;5H\u001b[K\u001b[11;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[11;37H\u001b[K\u001b[12;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[13;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[" - ], - [ - 8.3e-05, - "36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[13;23H\u001b[K\u001b[14;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[14;28H\u001b[K\u001b[15;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[16;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[16;23H\u001b[K\u001b[17;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[17;28H\u001b[K\u001b[18;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[19;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[19;23H\u001b[K\u001b[20;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[20;28H\u001b[K\u001b[21;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[22;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[23;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Crm -rf /tmp/busybox\u001b[24;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[25;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[26;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[26;5H\u001b[K\u001b[27;3H\u001b[96m\u001b[47m4" - ], - [ - 0.007793, - "\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[27;42H\u001b[K\u001b[28;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[29;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[29;23H\u001b[K\u001b[30;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[30;28H\u001b[K\u001b[31;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[32;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[32;23H\u001b[K\u001b[33;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[33;28H\u001b[K\u001b[34;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[34;32H\u001b[K\u001b[35;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[36;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[37;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[38;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[39;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[39" - ], - [ - 3.1e-05, - ";5H\u001b[K\u001b[40;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\u001b[40;51H\u001b[K\u001b[41;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[42;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[42;23H\u001b[K\u001b[43;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[43;28H\u001b[K\u001b[44;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[45;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[45;23H\u001b[K\u001b[46;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[46;28H\u001b[K\u001b[47;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push --disable-compression \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m oci:/tmp/oci-busybox\u001b[48;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[48;23H\u001b[K\u001b[49;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[49;28H\u001b[K\u001b[50;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[6Cm -rf /tmp/oci-busybox\u001b[50;32H\u001b[K\u001b[51;195H\u001b[38;5;70m" - ], - [ - 0.002123, - "\u001b[48;5;240m  24%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.19883, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[1;28H\u001b[K\u001b[2;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m containers-storage:[$ROOT]busybox:test\u001b[3;2H\u001b[96m\u001b[47m19\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[3;23H\u001b[K\u001b[4;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[1m\u001b[31m\u001b[106m[\u001b[m\u001b[93m\u001b[107m \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 \u001b[1m\u001b[31m\u001b[106m]\u001b[m\u001b[93m\u001b[107m\u001b[4;28H\u001b[K\u001b[5;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[6;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[6;28H\u001b[K\u001b[7;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${OCIC_BINARY} image remove busybox:test\u001b[8;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[9;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[10;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[11;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[11;5H\u001b[K\u001b[12;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[12;37H\u001b[K\u001b[1" - ], - [ - 0.000124, - "3;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[14;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[14;23H\u001b[K\u001b[15;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[15;28H\u001b[K\u001b[16;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[17;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[17;23H\u001b[K\u001b[18;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[18;28H\u001b[K\u001b[19;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[20;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[20;23H\u001b[K\u001b[21;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[21;28H\u001b[K\u001b[22;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[23;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[24;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Crm -rf /tmp/busybox\u001b[25;3H" - ], - [ - 0.001207, - "\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[26;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[27;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[27;5H\u001b[K\u001b[28;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[28;42H\u001b[K\u001b[29;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[30;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[30;23H\u001b[K\u001b[31;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[31;28H\u001b[K\u001b[32;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[33;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[33;23H\u001b[K\u001b[34;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[34;28H\u001b[K\u001b[35;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[35;32H\u001b[K\u001b[36;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[37;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[" - ], - [ - 0.000194, - "93m\u001b[107m -eq 0 ]\u001b[38;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[39;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[40;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[40;5H\u001b[K\u001b[41;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\u001b[41;51H\u001b[K\u001b[42;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[43;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[43;23H\u001b[K\u001b[44;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[44;28H\u001b[K\u001b[45;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[46;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[46;23H\u001b[K\u001b[47;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[47;28H\u001b[K\u001b[48;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push --disable-compression \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m oci:/tmp/oci-busybox\u001b[49;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[49;23H\u001b[K\u001b[50;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m" - ], - [ - 0.007907, - "\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[50;28H\u001b[K\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  23%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.157382, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[1;23H\u001b[K\u001b[2;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[2;28H\u001b[K\u001b[3;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m containers-storage:[$ROOT]busybox:test\u001b[4;2H\u001b[96m\u001b[47m19\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[4;23H\u001b[K\u001b[5;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[5;28H\u001b[K\u001b[6;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[7;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[7;28H\u001b[K\u001b[8;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${OCIC_BINARY} image remove busybox:test\u001b[9;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[10;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[11;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[12;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[12;5H\u001b[K\u001b[13;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b" - ], - [ - 7.7e-05, - "[107m {\u001b[13;37H\u001b[K\u001b[14;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[15;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[15;23H\u001b[K\u001b[16;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[16;28H\u001b[K\u001b[17;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[18;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[18;23H\u001b[K\u001b[19;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[19;28H\u001b[K\u001b[20;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[21;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[21;23H\u001b[K\u001b[22;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[22;28H\u001b[K\u001b[23;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[24;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[25;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Crm -rf /tmp/busybox" - ], - [ - 0.004633, - "\u001b[26;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[27;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[28;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[28;5H\u001b[K\u001b[29;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[29;42H\u001b[K\u001b[30;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[31;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[31;23H\u001b[K\u001b[32;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[32;28H\u001b[K\u001b[33;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[34;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[34;23H\u001b[K\u001b[35;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[35;28H\u001b[K\u001b[36;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[36;32H\u001b[K\u001b[37;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[38;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$statu" - ], - [ - 2.3e-05, - "s\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[39;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[40;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[41;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[41;5H\u001b[K\u001b[42;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\u001b[42;51H\u001b[K\u001b[43;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[44;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[44;23H\u001b[K\u001b[45;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[45;28H\u001b[K\u001b[46;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[47;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[47;23H\u001b[K\u001b[48;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[48;28H\u001b[K\u001b[49;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push --disable-compression \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m oci:/tmp/oci-busybox\u001b[50;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[50;23H\u001b[K\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m" - ], - [ - 0.002017, - "  22%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m19\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.216601, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[2;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[2;23H\u001b[K\u001b[3;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[3;28H\u001b[K\u001b[4;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m containers-storage:[$ROOT]busybox:test\u001b[5;2H\u001b[96m\u001b[47m19\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[5;23H\u001b[K\u001b[6;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[6;28H\u001b[K\u001b[7;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[8;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[8;28H\u001b[K\u001b[9;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${OCIC_BINARY} image remove busybox:test\u001b[10;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[11;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[12;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[13;3H\u001b[96m\u001b[47m7\u001b[m" - ], - [ - 0.000101, - "\u001b[93m\u001b[107m\u001b[13;5H\u001b[K\u001b[14;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[14;37H\u001b[K\u001b[15;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[16;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[16;23H\u001b[K\u001b[17;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[17;28H\u001b[K\u001b[18;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[19;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[19;23H\u001b[K\u001b[20;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[20;28H\u001b[K\u001b[21;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[22;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[22;23H\u001b[K\u001b[23;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[23;28H\u001b[K\u001b[24;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[25;2H\u001b[96m" - ], - [ - 0.00763, - "\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[26;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Crm -rf /tmp/busybox\u001b[27;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[28;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[29;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[29;5H\u001b[K\u001b[30;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[30;42H\u001b[K\u001b[31;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[32;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[32;23H\u001b[K\u001b[33;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[33;28H\u001b[K\u001b[34;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[35;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[35;23H\u001b[K\u001b[36;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[36;28H\u001b[K\u001b[37;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[37;32H\u001b[K\u001b[38;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m" - ], - [ - 0.000247, - "\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[39;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[40;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[41;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[42;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[42;5H\u001b[K\u001b[43;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\u001b[43;51H\u001b[K\u001b[44;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[45;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[45;23H\u001b[K\u001b[46;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[46;28H\u001b[K\u001b[47;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[48;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[48;23H\u001b[K\u001b[49;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[49;28H\u001b[K\u001b[50;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push --disable-compression \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m oci:/tmp/oci-busybox" - ], - [ - 0.003544, - "\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  21%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.174049, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to containers/storage\"\u001b[m\u001b[93m\u001b[107m {\u001b[1;47H\u001b[K\u001b[2;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[3;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[3;23H\u001b[K\u001b[4;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[1m\u001b[31m\u001b[106m[\u001b[m\u001b[93m\u001b[107m \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 \u001b[1m\u001b[31m\u001b[106m]\u001b[m\u001b[93m\u001b[107m\u001b[4;28H\u001b[K\u001b[5;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m containers-storage:[$ROOT]busybox:test\u001b[6;2H\u001b[96m\u001b[47m19\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[6;23H\u001b[K\u001b[7;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[7;28H\u001b[K\u001b[8;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[9;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[9;28H\u001b[K\u001b[10;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${OCIC_BINARY} image remove busybox:test\u001b[11;3H\u001b[96m\u001b[47m" - ], - [ - 5.9e-05, - "4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[12;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[13;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[14;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[14;5H\u001b[K\u001b[15;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[15;37H\u001b[K\u001b[16;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[17;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[17;23H\u001b[K\u001b[18;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[18;28H\u001b[K\u001b[19;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[20;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[20;23H\u001b[K\u001b[21;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[21;28H\u001b[K\u001b[22;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[23;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[23;23H\u001b[K\u001b[24;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ " - ], - [ - 0.001075, - "\u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[24;28H\u001b[K\u001b[25;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[26;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[27;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Crm -rf /tmp/busybox\u001b[28;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[29;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[30;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[30;5H\u001b[K\u001b[31;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[31;42H\u001b[K\u001b[32;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[33;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[33;23H\u001b[K\u001b[34;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[34;28H\u001b[K\u001b[35;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[36;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[36;23H\u001b[K\u001b[37;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b" - ], - [ - 5.3e-05, - "[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[37;28H\u001b[K\u001b[38;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[38;32H\u001b[K\u001b[39;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[40;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[41;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[42;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[43;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[43;5H\u001b[K\u001b[44;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\u001b[44;51H\u001b[K\u001b[45;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[46;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[46;23H\u001b[K\u001b[47;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[47;28H\u001b[K\u001b[48;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[49;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[49;23H\u001b[K\u001b[50;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m" - ], - [ - 0.008247, - "\u001b[93m\u001b[107m -eq 0 ]\u001b[50;28H\u001b[K\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  20%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 2.125968, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[1;5H\u001b[K\u001b[2;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to containers/storage\"\u001b[m\u001b[93m\u001b[107m {\u001b[2;47H\u001b[K\u001b[3;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[4;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[4;23H\u001b[K\u001b[5;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[5;28H\u001b[K\u001b[6;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m containers-storage:[$ROOT]busybox:test\u001b[7;2H\u001b[96m\u001b[47m19\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[7;23H\u001b[K\u001b[8;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[8;28H\u001b[K\u001b[9;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[10;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[10;28H\u001b[K\u001b[11;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${OCIC_BINARY} image remove busybox:test\u001b[12;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b" - ], - [ - 7.1e-05, - "[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[13;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[14;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[15;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[15;5H\u001b[K\u001b[16;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[16;37H\u001b[K\u001b[17;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[18;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[18;23H\u001b[K\u001b[19;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[19;28H\u001b[K\u001b[20;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[21;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[21;23H\u001b[K\u001b[22;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[22;28H\u001b[K\u001b[23;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[24;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[24;23H\u001b[K\u001b[25;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"" - ], - [ - 0.007072, - "\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[25;28H\u001b[K\u001b[26;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[27;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[28;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Crm -rf /tmp/busybox\u001b[29;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[30;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[31;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[31;5H\u001b[K\u001b[32;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[32;42H\u001b[K\u001b[33;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[34;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[34;23H\u001b[K\u001b[35;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[35;28H\u001b[K\u001b[36;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[37;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[37;23H\u001b[K\u001b[38;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[3" - ], - [ - 4.4e-05, - "6m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[38;28H\u001b[K\u001b[39;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[39;32H\u001b[K\u001b[40;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[41;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[42;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[43;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[44;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[44;5H\u001b[K\u001b[45;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\u001b[45;51H\u001b[K\u001b[46;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[47;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[47;23H\u001b[K\u001b[48;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[48;28H\u001b[K\u001b[49;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[50;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[50;23H\u001b[K\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  18%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m" - ], - [ - 0.002085, - "\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.16733, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[2;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[2;5H\u001b[K\u001b[3;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to containers/storage\"\u001b[m\u001b[93m\u001b[107m {\u001b[3;47H\u001b[K\u001b[4;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[5;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[5;23H\u001b[K\u001b[6;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[6;28H\u001b[K\u001b[7;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m containers-storage:[$ROOT]busybox:test\u001b[8;2H\u001b[96m\u001b[47m19\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[8;23H\u001b[K\u001b[9;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[9;28H\u001b[K\u001b[10;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[11;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[11;28H\u001b[K\u001b[12;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${OCIC_BINARY} image remove busybox:" - ], - [ - 8.8e-05, - "test\u001b[13;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[14;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[15;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[16;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[16;5H\u001b[K\u001b[17;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[17;37H\u001b[K\u001b[18;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[19;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[19;23H\u001b[K\u001b[20;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[20;28H\u001b[K\u001b[21;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[22;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[22;23H\u001b[K\u001b[23;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[23;28H\u001b[K\u001b[24;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[25;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[25;23H\u001b[K\u001b[26;3H\u001b[96m" - ], - [ - 0.007373, - "\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[26;28H\u001b[K\u001b[27;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[28;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[29;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Crm -rf /tmp/busybox\u001b[30;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[31;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[32;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[32;5H\u001b[K\u001b[33;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[33;42H\u001b[K\u001b[34;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[35;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[35;23H\u001b[K\u001b[36;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[36;28H\u001b[K\u001b[37;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[38;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[38;23H\u001b[K\u001b" - ], - [ - 2.1e-05, - "[39;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[39;28H\u001b[K\u001b[40;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[40;32H\u001b[K\u001b[41;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[42;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[43;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[44;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[45;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[45;5H\u001b[K\u001b[46;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\u001b[46;51H\u001b[K\u001b[47;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[48;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[48;23H\u001b[K\u001b[49;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[49;28H\u001b[K\u001b[50;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  17%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;9H\u001b[?1" - ], - [ - 1e-05, - "2l\u001b[?25h" - ], - [ - 0.244303, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  18%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[5;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.495597, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[6;9H\u001b[1m\u001b[31m\u001b[106m[\u001b[17C]\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  20%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[6;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.0243, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m[ \u001b[16C]\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  21%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[7;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.030791, - "\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  22%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[8;9H" - ], - [ - 0.034862, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[9;9H\u001b[1m\u001b[31m\u001b[106m[\u001b[17C]\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  23%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m20\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[9;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.025609, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m[ \u001b[16C]\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  24%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[10;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.03403, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[11;9H\u001b[1m\u001b[31m\u001b[106m[\u001b[17C]\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  25%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[11;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.346484, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m[ \u001b[16C]\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  26%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[12;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.422162, - "\u001b[51;209H48\u001b[12;52H" - ], - [ - 0.227309, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[34m-- INSERT --\u001b[m\u001b[93m\u001b[107m\u001b[52;13H\u001b[K" - ], - [ - 0.018402, - "\u001b[51;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[51;12H kpod-test-refactor \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[51;33H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mtest/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mkpod_push.bats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[m\u001b[93m\u001b[107m\u001b[51;54H\u001b[38;5;31m\u001b[48;5;24m\u001b[51;55H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                      \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;180H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;188H\u001b[38;5;117m\u001b[48;5;24m conf\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;149m\u001b[48;5;31m  26%\u001b[m\u001b[" - ], - [ - 2.9e-05, - "93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 23\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:49 \u001b[12;53H\u001b[?12l\u001b[?25h" - ], - [ - 0.11896, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;52H\u001b[K\u001b[51;53H\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[51;57H \u001b[m\u001b[93m\u001b[107m\u001b[152C\u001b[38;5;22m\u001b[48;5;117m8\u001b[12;52H\u001b[?12l\u001b[?25h" - ], - [ - 0.496088, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;51H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[12;51H\u001b[?12l\u001b[?25h" - ], - [ - 0.031493, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;50H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[12;50H\u001b[?12l\u001b[?25h" - ], - [ - 0.027468, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;49H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[12;49H\u001b[?12l\u001b[?25h" - ], - [ - 0.033381, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;48H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[12;48H\u001b[?12l\u001b[?25h" - ], - [ - 0.034028, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;47H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[12;47H\u001b[?12l\u001b[?25h" - ], - [ - 0.027933, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;46H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[12;46H\u001b[?12l\u001b[?25h" - ], - [ - 0.032743, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;45H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;45H\u001b[?12l\u001b[?25h" - ], - [ - 0.030113, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;44H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m0\u001b[12;44H\u001b[?12l\u001b[?25h" - ], - [ - 0.034589, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;43H\u001b[K\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m39\u001b[12;43H\u001b[?12l\u001b[?25h" - ], - [ - 0.023953, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;42H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[12;42H\u001b[?12l\u001b[?25h" - ], - [ - 0.192014, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;41H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[12;41H\u001b[?12l\u001b[?25h" - ], - [ - 0.74866, - "\u001b[?25l\u001b[51;210H6\u001b[12;40H\u001b[?12l\u001b[?25h" - ], - [ - 0.498543, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;39H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[12;39H\u001b[?12l\u001b[?25h" - ], - [ - 0.025782, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;38H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[12;38H\u001b[?12l\u001b[?25h" - ], - [ - 0.031784, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;37H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[12;37H\u001b[?12l\u001b[?25h" - ], - [ - 0.03173, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;36H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[12;36H\u001b[?12l\u001b[?25h" - ], - [ - 0.029779, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;35H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;35H\u001b[?12l\u001b[?25h" - ], - [ - 0.034013, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;34H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m0\u001b[12;34H\u001b[?12l\u001b[?25h" - ], - [ - 0.028916, - "\u001b[?25l\u001b[51;209H29\u001b[12;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.028505, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;32H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[12;32H\u001b[?12l\u001b[?25h" - ], - [ - 0.029212, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;31H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[12;31H\u001b[?12l\u001b[?25h" - ], - [ - 0.035421, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;30H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[12;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.030293, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;29H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[12;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.030509, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;28H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[12;28H\u001b[?12l\u001b[?25h" - ], - [ - 0.029979, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;14H\u001b[1m\u001b[31m\u001b[106m{\u001b[11C}\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[12;27H\u001b[?12l\u001b[?25h" - ], - [ - 0.03119, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;26H\u001b[K\u001b[12;14H{O\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[12;26H\u001b[?12l\u001b[?25h" - ], - [ - 0.032258, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;25H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;25H\u001b[?12l\u001b[?25h" - ], - [ - 0.02545, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;24H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m0\u001b[12;24H\u001b[?12l\u001b[?25h" - ], - [ - 0.035798, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;23H\u001b[K\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m19\u001b[12;23H\u001b[?12l\u001b[?25h" - ], - [ - 0.02867, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;22H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[12;22H\u001b[?12l\u001b[?25h" - ], - [ - 0.031704, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;21H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[12;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.0333, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;20H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[12;20H\u001b[?12l\u001b[?25h" - ], - [ - 0.024988, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;19H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[12;19H\u001b[?12l\u001b[?25h" - ], - [ - 0.02881, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;18H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[12;18H\u001b[?12l\u001b[?25h" - ], - [ - 0.033247, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;17H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[12;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.032484, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;16H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[12;16H\u001b[?12l\u001b[?25h" - ], - [ - 0.031784, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;15H\u001b[K\u001b[12;14H\u001b[1m\u001b[31m\u001b[106m{\u001b[15;5H}\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;15H\u001b[?12l\u001b[?25h" - ], - [ - 0.453651, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;14H\u001b[K\u001b[15;5H}\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m0\u001b[12;14H\u001b[?12l\u001b[?25h" - ], - [ - 0.192402, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;13H\u001b[K\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m9 \u001b[12;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.437798, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mk\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m10\u001b[12;14H\u001b[?12l\u001b[?25h" - ], - [ - 0.254702, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mp\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;15H\u001b[?12l\u001b[?25h" - ], - [ - 0.30839, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;14H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m0\u001b[12;14H\u001b[?12l\u001b[?25h" - ], - [ - 0.132923, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;13H\u001b[K\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m9 \u001b[12;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.318686, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m$\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m10\u001b[12;14H\u001b[?12l\u001b[?25h" - ], - [ - 0.283056, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m(\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;15H\u001b[?12l\u001b[?25h" - ], - [ - 0.330165, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;14H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m0\u001b[12;14H\u001b[?12l\u001b[?25h" - ], - [ - 0.516215, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{\b\u001b[1m\u001b[31m\u001b[106m{\u001b[15;5H}\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;15H\u001b[?12l\u001b[?25h" - ], - [ - 0.658201, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mK\b\b{K\u001b[15;5H}\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[12;16H\u001b[?12l\u001b[?25h" - ], - [ - 0.143261, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mP\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[12;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.123887, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mO\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[12;18H\u001b[?12l\u001b[?25h" - ], - [ - 0.169592, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mD\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[12;19H\u001b[?12l\u001b[?25h" - ], - [ - 0.345485, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m_\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[12;20H\u001b[?12l\u001b[?25h" - ], - [ - 0.181625, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mB\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[12;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.084398, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mI\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[12;22H\u001b[?12l\u001b[?25h" - ], - [ - 0.111056, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mN\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[12;23H\u001b[?12l\u001b[?25h" - ], - [ - 0.128088, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mA\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m20\u001b[12;24H\u001b[?12l\u001b[?25h" - ], - [ - 0.131002, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mR\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;25H\u001b[?12l\u001b[?25h" - ], - [ - 0.165526, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mY\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[12;26H\u001b[?12l\u001b[?25h" - ], - [ - 0.657525, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m}\u001b[12;14H\u001b[1m\u001b[31m\u001b[106m{\u001b[11C}\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[12;27H\u001b[?12l\u001b[?25h" - ], - [ - 0.183629, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;14H{K\u001b[10C} \u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[12;28H\u001b[?12l\u001b[?25h" - ], - [ - 0.447484, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m$\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[12;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.17344, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mK\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[12;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.115871, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mP\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[12;31H\u001b[?12l\u001b[?25h" - ], - [ - 0.090564, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mO\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[12;32H\u001b[?12l\u001b[?25h" - ], - [ - 0.114158, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mD\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[12;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.141443, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m_\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m30\u001b[12;34H\u001b[?12l\u001b[?25h" - ], - [ - 0.23065, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mO\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;35H\u001b[?12l\u001b[?25h" - ], - [ - 0.144279, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mP\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[12;36H\u001b[?12l\u001b[?25h" - ], - [ - 0.096025, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mT\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[12;37H\u001b[?12l\u001b[?25h" - ], - [ - 0.093663, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mI\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[12;38H\u001b[?12l\u001b[?25h" - ], - [ - 0.079633, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mO\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[12;39H\u001b[?12l\u001b[?25h" - ], - [ - 0.081562, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mN\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[12;40H\u001b[?12l\u001b[?25h" - ], - [ - 0.084293, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mS\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[12;41H\u001b[?12l\u001b[?25h" - ], - [ - 0.137772, - "\u001b[?25l\u001b[51;210H8\u001b[12;42H\u001b[?12l\u001b[?25h" - ], - [ - 0.094796, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mr\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[12;43H\u001b[?12l\u001b[?25h" - ], - [ - 0.14112, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mm\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m40\u001b[12;44H\u001b[?12l\u001b[?25h" - ], - [ - 0.100405, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mi\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;45H\u001b[?12l\u001b[?25h" - ], - [ - 0.104584, - "\u001b[?25l\u001b[51;210H2\u001b[12;46H\u001b[?12l\u001b[?25h" - ], - [ - 0.138653, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m$\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[12;47H\u001b[?12l\u001b[?25h" - ], - [ - 0.344074, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;46H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[12;46H\u001b[?12l\u001b[?25h" - ], - [ - 0.199704, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\"\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[12;47H\u001b[?12l\u001b[?25h" - ], - [ - 0.127088, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m$\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[12;48H\u001b[?12l\u001b[?25h" - ], - [ - 0.143183, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mK\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[12;49H\u001b[?12l\u001b[?25h" - ], - [ - 0.328141, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;48H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[12;48H\u001b[?12l\u001b[?25h" - ], - [ - 0.170702, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mI\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[12;49H\u001b[?12l\u001b[?25h" - ], - [ - 0.084435, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mM\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[12;50H\u001b[?12l\u001b[?25h" - ], - [ - 0.098537, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mA\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[12;51H\u001b[?12l\u001b[?25h" - ], - [ - 0.163497, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mG\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[12;52H\u001b[?12l\u001b[?25h" - ], - [ - 0.105928, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mE\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[12;53H\u001b[?12l\u001b[?25h" - ], - [ - 0.145464, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;46H\u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m50\u001b[12;54H\u001b[?12l\u001b[?25h" - ], - [ - 0.391443, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K" - ], - [ - 0.007246, - "\u001b[51;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H kpod-test-refactor \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;33H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mtest/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mkpod_push.bats\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[51;56H\u001b[38;5;240m\u001b[48;5;236m\u001b[51;57H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                    \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;180H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;188H\u001b[38;5;247m\u001b[48;5;236m conf\u001b" - ], - [ - 2.4e-05, - "[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  26%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 23\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:49 \u001b[12;53H\u001b[?12l\u001b[?25h" - ], - [ - 0.297107, - "\u001b[?25l\u001b[52;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.311391, - "w\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.071982, - "q" - ], - [ - 7.5e-05, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.127973, - "\r" - ], - [ - 0.000142, - "\u001b[?25l\u001b[?2004l" - ], - [ - 0.000165, - "\"test/kpod_push.bats\"" - ], - [ - 0.006449, - " 87L, 2372C written" - ], - [ - 0.013487, - "\r\r\r\n\u001b[39;49m\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.002115, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.022207, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-test-refactor* \u001b[39m \u001b[33m25s\u001b[39m\r\n" - ], - [ - 0.000932, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000102, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000173, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.1e-05, - "\u001b[?1h\u001b=" - ], - [ - 1.9e-05, - "\u001b[?2004h" - ], - [ - 0.394575, - "g" - ], - [ - 0.216017, - "\bgi" - ], - [ - 0.088382, - "t" - ], - [ - 0.103545, - " " - ], - [ - 0.128191, - "c" - ], - [ - 0.000178, - "o" - ], - [ - 0.079694, - "m" - ], - [ - 0.279792, - "m" - ], - [ - 0.248528, - "i" - ], - [ - 0.111816, - "t" - ], - [ - 0.087626, - " " - ], - [ - 0.128727, - "-" - ], - [ - 0.095251, - "a" - ], - [ - 0.096123, - " " - ], - [ - 0.104484, - "-" - ], - [ - 0.143571, - "-" - ], - [ - 0.087942, - "a" - ], - [ - 0.104175, - "m" - ], - [ - 0.112113, - "e" - ], - [ - 0.103848, - "n" - ], - [ - 0.112584, - "d" - ], - [ - 0.247762, - "\u001b[?1l\u001b>" - ], - [ - 0.000208, - "\u001b[?2004l\r\r\n" - ], - [ - 0.005064, - "\u001b]2;git commit -a --amend\u0007\u001b]1;git\u0007" - ], - [ - 0.032749, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000317, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"~/Development/Go/src/github.com/kubernetes-incubator/cri-o/.git/COMMIT_EDITMSG\"" - ], - [ - 5.3e-05, - " 24L, 716C" - ], - [ - 0.000154, - "\u001b[1;1HRefactor kpod tests\r\n\r\nMove kpod tests from kpod.bats to kpod_[commandname].bats\r\n\r\nSigned-off-by: Ryan Cole \r\n\r\n# Please enter the commit message for your changes. Lines starting\r\n# with '#' will be ignored, and an empty message aborts the commit.\r\n#\r\n# Date: Mon Aug 14 09:15:22 2017 -0400\r\n#\r\n# On branch kpod-test-refactor\r\n# Changes to be committed:\r\n#\u001b[7Cdeleted: test/kpod.bats\r\n#\u001b[7Cmodified: test/kpod_diff.bats\r\n#\u001b[7Cnew file: test/kpod_history.bats\r\n#\u001b[7Cnew file: test/kpod_images.bats\r\n#\u001b[7Cnew file: test/kpod_inspect.bats\r\n#\u001b[7Cmodified: test/kpod_load.bats\r\n#\u001b[7Cnew file: test/kpod_pull.bats\r\n#\u001b[7Cnew file: test/kpod_push.bats\r\n#\u001b[7Cmodified: test/kpod_save.bats\r\n#\u001b[7Cnew file: test/kpod_version.bats\r\n#\r\n\u001b[94m~ \u001b[26;1H~ " - ], - [ - 2.5e-05, - " \u001b[27;1H~ \u001b[28;1H~ \u001b[29;1H~ \u001b[30;1H~ " - ], - [ - 5.1e-05, - " \u001b[31;1H~ \u001b[32;1H~ \u001b[33;1H~ \u001b[34;1H~ \u001b[35;1H~ " - ], - [ - 1.9e-05, - " \u001b[36;1H~ \u001b[37;1H~ \u001b[38;1H~ \u001b[39;1H~ \u001b[40;1H~ " - ], - [ - 1.7e-05, - " \u001b[41;1H~ \u001b[42;1H~ \u001b[43;1H~ \u001b[44;1H~ " - ], - [ - 1.6e-05, - " \u001b[45;1H~ \u001b[46;1H~ \u001b[47;1H~ \u001b[48;1H~ \u001b[49;1H~ " - ], - [ - 1.8e-05, - " \u001b[50;1H~ \u001b[51;1H~ \u001b[1;1H\u001b[?12l\u001b[?25h" - ], - [ - 0.25699, - "\u001b[?25l\u001b[m\u001b[52;1H\u001b[K\u001b[52;1H:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.159615, - "w" - ], - [ - 0.06399, - "q" - ], - [ - 0.072292, - "\r\u001b[?25l\u001b[?2004l\".git/COMMIT_EDITMSG\"" - ], - [ - 0.006984, - " 24L, 716C written" - ], - [ - 0.000191, - "\r\r\r\n\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.004761, - "[kpod-test-refactor 72c6c49b] Refactor kpod tests\r\n Date: Mon Aug 14 09:15:22 2017 -0400\r\n 10 files changed, 335 insertions(+), 253 deletions(-)\r\n delete mode 100644 test/kpod.bats\r\n create mode 100644 test/kpod_history.bats\r\n create mode 100644 test/kpod_images.bats\r\n create mode 100644 test/kpod_inspect.bats\r\n create mode 100644 test/kpod_pull.bats\r\n create mode 100644 test/kpod_push.bats\r\n create mode 100644 test/kpod_version.bats\r\n\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.02706, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-test-refactor \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.003213, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000199, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.9e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000122, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 5.3e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.6e-05, - "\u001b[?2004h" - ], - [ - 0.141189, - "g" - ], - [ - 0.128024, - "\bgi" - ], - [ - 0.071759, - "t" - ], - [ - 0.10427, - " " - ], - [ - 0.096092, - "p" - ], - [ - 0.09589, - "u" - ], - [ - 0.112424, - "s" - ], - [ - 0.1201, - "h" - ], - [ - 0.111109, - " " - ], - [ - 0.088607, - "-" - ], - [ - 0.119704, - "f" - ], - [ - 0.128014, - " " - ], - [ - 0.11191, - "o" - ], - [ - 0.112657, - "r" - ], - [ - 0.103811, - "i" - ], - [ - 0.119689, - "g" - ], - [ - 0.096661, - "i" - ], - [ - 0.039472, - "n" - ], - [ - 0.095541, - " " - ], - [ - 0.12086, - "k" - ], - [ - 0.085614, - "pod-" - ], - [ - 0.185148, - "t" - ], - [ - 0.048017, - "e" - ], - [ - 0.205899, - "st-refactor\u001b[1m \u001b[0m" - ], - [ - 0.179325, - "\b\u001b[0m r" - ], - [ - 0.112012, - "e" - ], - [ - 0.271558, - "\b \b" - ], - [ - 0.127775, - "\b \b" - ], - [ - 0.160077, - "\b" - ], - [ - 0.144317, - "\b \b" - ], - [ - 0.455941, - "r" - ], - [ - 0.336133, - "\u001b[?1l\u001b>" - ], - [ - 0.001355, - "\u001b[?2004l\r\r\n" - ], - [ - 0.003918, - "\u001b]2;git push -f origin kpod-test-refactor\u0007\u001b]1;git\u0007" - ], - [ - 1.183521, - "Counting objects: 12, done.\r\n" - ], - [ - 0.000161, - "Delta compression using up to 4 threads.\r\n" - ], - [ - 8.9e-05, - "Compressing objects: 8% (1/12) \rCompressing objects: 16% (2/12) \r" - ], - [ - 0.00016, - "Compressing objects: 25% (3/12) \r" - ], - [ - 2.3e-05, - "Compressing objects: 33% (4/12) \r" - ], - [ - 3.7e-05, - "Compressing objects: 41% (5/12) \r" - ], - [ - 6.9e-05, - "Compressing objects: 50% (6/12) \r" - ], - [ - 0.000102, - "Compressing objects: 58% (7/12) \r" - ], - [ - 5.4e-05, - "Compressing objects: 66% (8/12) \r" - ], - [ - 8.2e-05, - "Compressing objects: 75% (9/12) \r" - ], - [ - 5.7e-05, - "Compressing objects: 83% (10/12) \r" - ], - [ - 5.8e-05, - "Compressing objects: 91% (11/12) \r" - ], - [ - 5.4e-05, - "Compressing objects: 100% (12/12) \r" - ], - [ - 4.4e-05, - "Compressing objects: 100% (12/12), done.\r\n" - ], - [ - 0.000167, - "Writing objects: 8% (1/12) \r" - ], - [ - 5.7e-05, - "Writing objects: 16% (2/12) \r" - ], - [ - 0.000171, - "Writing objects: 25% (3/12) \r" - ], - [ - 5e-05, - "Writing objects: 33% (4/12) \r" - ], - [ - 0.000129, - "Writing objects: 41% (5/12) \r" - ], - [ - 0.000182, - "Writing objects: 58% (7/12) \r" - ], - [ - 5.9e-05, - "Writing objects: 66% (8/12) \r" - ], - [ - 7e-05, - "Writing objects: 75% (9/12) \r" - ], - [ - 7.9e-05, - "Writing objects: 83% (10/12) \r" - ], - [ - 3.8e-05, - "Writing objects: 91% (11/12) \r" - ], - [ - 4.3e-05, - "Writing objects: 100% (12/12) \r" - ], - [ - 4.1e-05, - "Writing objects: 100% (12/12), 2.57 KiB | 2.57 MiB/s, done.\r\nTotal 12 (delta 9), reused 0 (delta 0)\r\n" - ], - [ - 0.086353, - "remote: Resolving deltas: 0% (0/9) \u001b[K\r" - ], - [ - 0.040457, - "remote: Resolving deltas: 22% (2/9) \u001b[K\rremote: Resolving deltas: 44% (4/9) \u001b[K\rremote: Resolving deltas: 55% (5/9) \u001b[K\rremote: Resolving deltas: 66% (6/9) \u001b[K\rremote: Resolving deltas: 77% (7/9) \u001b[K\rremote: Resolving deltas: 88% (8/9) \u001b[K\rremote: Resolving deltas: 100% (9/9) \u001b[K\rremote: Resolving deltas: 100% (9/9), completed with 5 local objects.\u001b[K\r\n" - ], - [ - 1.343638, - "To github.com:14rcole/cri-o\r\n + 9327604d...72c6c49b kpod-test-refactor -> kpod-test-refactor (forced update)\r\n" - ], - [ - 0.003519, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.027554, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-test-refactor \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.002119, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000223, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 2.1e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 8.9e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.1e-05, - "\u001b[?2004h" - ], - [ - 73.029405, - "g" - ], - [ - 0.116068, - "\bgi" - ], - [ - 0.083652, - "t" - ], - [ - 0.111933, - " " - ], - [ - 0.116198, - "c" - ], - [ - 0.095668, - "h" - ], - [ - 0.100371, - "e" - ], - [ - 0.075276, - "c" - ], - [ - 0.069067, - "k" - ], - [ - 0.155349, - "o" - ], - [ - 0.080863, - "u" - ], - [ - 0.107261, - "t" - ], - [ - 0.143931, - " " - ], - [ - 6.94391, - "k" - ], - [ - 0.367099, - "pod-" - ], - [ - 0.46577, - "r" - ], - [ - 0.096061, - "e" - ], - [ - 0.095387, - "n" - ], - [ - 0.18511, - "ame\u001b[1m \u001b[0m" - ], - [ - 0.335066, - "\b\u001b[0m \b" - ], - [ - 0.00018, - "\u001b[?1l\u001b>" - ], - [ - 8.1e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004082, - "\u001b]2;git checkout kpod-rename\u0007\u001b]1;git\u0007" - ], - [ - 0.040772, - "Switched to branch 'kpod-rename'\r\n" - ], - [ - 0.000659, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.028625, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-rename \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.002148, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 8.7e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000293, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 5.1e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.6e-05, - "\u001b[?2004h" - ], - [ - 2.266962, - "2" - ], - [ - 0.199874, - "\b27" - ], - [ - 0.248374, - "G" - ], - [ - 0.479833, - "\b \b" - ], - [ - 0.151084, - "\b\b2 \b" - ], - [ - 0.168998, - "\b \b" - ], - [ - 0.976089, - "v" - ], - [ - 0.102867, - "\bvi" - ], - [ - 0.05621, - " " - ], - [ - 0.096671, - "t" - ], - [ - 0.072195, - "e" - ], - [ - 0.167866, - "t" - ], - [ - 0.199366, - "k" - ], - [ - 0.351527, - "\b \b" - ], - [ - 0.113612, - "\u0007" - ], - [ - 0.454797, - "\b \b" - ], - [ - 0.11181, - "s" - ], - [ - 0.178442, - "t\u001b[1m/\u001b[0m" - ], - [ - 0.39881, - "\b\u001b[0m/k" - ], - [ - 0.096645, - "pod" - ], - [ - 0.622458, - "\b \b" - ], - [ - 0.616269, - "f" - ], - [ - 0.424159, - "\b \b" - ], - [ - 0.136035, - "d" - ], - [ - 0.191761, - "_" - ], - [ - 0.255541, - "r" - ], - [ - 0.096112, - "e" - ], - [ - 0.224777, - "n" - ], - [ - 0.139367, - "ame.bats\u001b[1m \u001b[0m" - ], - [ - 0.909463, - "\b\u001b[0m \b\u001b[?1l\u001b>\u001b[?2004l\r\r\n" - ], - [ - 0.001881, - "\u001b]2;vim test/kpod_rename.bats\u0007\u001b]1;vi\u0007" - ], - [ - 0.132692, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000558, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"test/kpod_rename.bats\"" - ], - [ - 0.000156, - " 35L, 907C" - ], - [ - 0.003097, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.000674, - "\u001b[1;1H\u001b[96m\u001b[47m 1 \u001b[m\u001b[93m\u001b[107m\u001b[96m#!/usr/bin/env bats\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 2 \r\n 3 \u001b[m\u001b[93m\u001b[107mload helpers\r\n\u001b[96m\u001b[47m 4 \r\n 5 \u001b[m\u001b[93m\u001b[107mIMAGE=\u001b[36m\"redis:alpine\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 6 \u001b[m\u001b[93m\u001b[107mROOT=\u001b[36m\"$TESTDIR/crio\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 7 \u001b[m\u001b[93m\u001b[107mRUNROOT=\u001b[36m\"$TESTDIR/crio-run\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 8 \u001b[m\u001b[93m\u001b[107mKPOD_OPTIONS=\u001b[36m\"--root $ROOT --runroot $RUNROOT $STORAGE_OPTS\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 9 \u001b[m\u001b[93m\u001b[107mNEW_NAME=\u001b[36m\"rename-test\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 10 \r\n 11 \u001b[m\u001b[93m\u001b[107mfunction teardown() {\r\n\u001b[96m\u001b[47m 12 \u001b[m\u001b[93m\u001b[107m cleanup_test\r\n\u001b[96m\u001b[47m 13 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 14 \r\n 15 \u001b[m\u001b[93m\u001b[107m@test \u001b[36m\"kpod rename successful\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 16 \u001b[m\u001b[93m\u001b[107m start_crio\r\n\u001b[96m\u001b[47m 17 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE\r\n\u001b[96m\u001b[47m 18 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m run crioctl pod run --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m" - ], - [ - 1.2e-05, - "\u001b[107m/sandbox_config.json\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m pod_id=\u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m run ${OCIC_BINARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m ctr_id=\u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS rename \u001b[36m\"$ctr_id\"\u001b[m\u001b[93m\u001b[107m \u001b[36m\"$NEW_NAME\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 28 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS inspect \u001b[36m\"$ctr_id\"\u001b[m\u001b[93m\u001b[107m --format {{.Name}}\r\n\u001b[96m\u001b[47m 29 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 30 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 31 " - ], - [ - 7.9e-05, - "\u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m == \u001b[36m\"$NEW_NAME\"\u001b[m\u001b[93m\u001b[107m ]\r\n\u001b[96m\u001b[47m 32 \u001b[m\u001b[93m\u001b[107m cleanup_ctrs\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m cleanup_pods\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m stop_crio\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[1m\u001b[94m~ \u001b[37;1H~ \u001b[38;1H~ \u001b[39;1H~ " - ], - [ - 1.1e-05, - " \u001b[40;1H~ \u001b[41;1H~ \u001b[42;1H~ \u001b[43;1H~ \u001b[44;1H~ " - ], - [ - 0.029235, - " \u001b[45;1H~ \u001b[46;1H~ \u001b[47;1H~ \u001b[48;1H~ " - ], - [ - 9.4e-05, - " \u001b[49;1H~ \u001b[50;1H~ \u001b[m\u001b[93m\u001b[107m\u001b[51;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H kpod-rename \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;26H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mtest/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mkpod_rename.bats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;50H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                            " - ], - [ - 0.011325, - "                                               \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;180H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;188H\u001b[38;5;247m\u001b[48;5;236m conf\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 19\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5  \u001b[19;9H\u001b[?12l\u001b[?25h\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 2.388427, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[27;9H\u001b[1m\u001b[31m\u001b[106m[\u001b[17C]\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  77%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m27\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[27;9H\u001b[?12l\u001b[?25h" - ], - [ - 2.299389, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m[ \u001b[16C]\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  74%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[26;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.528255, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[27;9H\u001b[1m\u001b[31m\u001b[106m[\u001b[17C]\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  77%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[27;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.321714, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m[ \u001b[16C]\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  74%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[26;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.403252, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  71%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[25;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.215177, - "\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  74%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[26;9H" - ], - [ - 27.227284, - "\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  71%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[25;9H" - ], - [ - 0.172124, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  69%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[24;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.422664, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  66%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[23;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.245489, - "\u001b[51;209H4\u001b[23;8H" - ], - [ - 0.280877, - "\u001b[51;209H5\u001b[23;9H" - ], - [ - 0.495366, - "\u001b[51;209H6\u001b[23;10H" - ], - [ - 0.032687, - "\u001b[51;209H7\u001b[23;11H" - ], - [ - 0.030514, - "\u001b[51;209H8\u001b[23;12H" - ], - [ - 0.031651, - "\u001b[51;209H9\u001b[23;13H" - ], - [ - 0.033388, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m$\u001b[1m\u001b[31m\u001b[106m{\u001b[11C}\u001b[m\u001b[93m\u001b[107m\u001b[51;209H\u001b[38;5;22m\u001b[48;5;252m10\u001b[23;14H\u001b[?12l\u001b[?25h" - ], - [ - 0.027913, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{O\u001b[10C} \u001b[51;210H\u001b[38;5;22m\u001b[48;5;252m1\u001b[23;15H\u001b[?12l\u001b[?25h" - ], - [ - 0.155223, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[31m\u001b[106m{\u001b[11C}\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;252m0\u001b[23;14H\u001b[?12l\u001b[?25h" - ], - [ - 0.174127, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{O\u001b[10C} \u001b[51;209H\u001b[38;5;22m\u001b[48;5;252m9 \u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.191338, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107mOCIC_BINARY\u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;96H\u001b[K\u001b[51;48H\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;52H \u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.500617, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mOCIC_BINARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;95H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.021176, - "\u001b[?25lCIC_BINARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;94H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.035102, - "\u001b[?25lIC_BINARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;93H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.028745, - "\u001b[?25lC_BINARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;92H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.026326, - "\u001b[?25l_BINARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;91H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.034424, - "\u001b[?25lBINARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;90H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.029844, - "\u001b[?25lINARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;89H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.031125, - "\u001b[?25lNARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;88H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.03287, - "\u001b[?25lARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;87H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.196796, - "\u001b[?25lRY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;86H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.182819, - "\u001b[?25lY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;85H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.433786, - "\u001b[?25l\u001b[15;36H\u001b[1m\u001b[31m\u001b[106m{\u001b[23;13H}\u001b[m\u001b[93m\u001b[107m ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;84H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.196117, - "\u001b[?25l\u001b[52;1H\u001b[34m-- INSERT --\u001b[m\u001b[93m\u001b[107m\u001b[52;13H\u001b[K" - ], - [ - 0.015372, - "\u001b[23;13H\u001b[1m\u001b[31m\u001b[106m \u001b[m\u001b[93m\u001b[107mctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;83H\u001b[K\u001b[15;36H{\u001b[23;13H c\u001b[51;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[51;12H kpod-rename \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[51;26H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mtest/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mkpod_rename.bats\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[m\u001b[93m\u001b[107m\u001b[51;51H\u001b[38;5;31m\u001b[48;5;24m\u001b[51;52H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                         \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107" - ], - [ - 4.8e-05, - "m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;180H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;188H\u001b[38;5;117m\u001b[48;5;24m conf\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;186m\u001b[48;5;31m  66%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 23\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:9  \u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.860848, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mc ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m10\u001b[23;14H\u001b[?12l\u001b[?25h" - ], - [ - 0.195551, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mr ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[23;15H\u001b[?12l\u001b[?25h" - ], - [ - 0.048944, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mi ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[23;16H\u001b[?12l\u001b[?25h" - ], - [ - 0.106817, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mo ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[23;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.070042, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mc ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[23;18H\u001b[?12l\u001b[?25h" - ], - [ - 0.207299, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mt ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[23;19H\u001b[?12l\u001b[?25h" - ], - [ - 0.084867, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107ml ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[23;20H\u001b[?12l\u001b[?25h" - ], - [ - 0.230817, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K" - ], - [ - 0.005085, - "\u001b[51;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H kpod-rename \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;26H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mtest/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mkpod_rename.bats\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[51;51H\u001b[38;5;240m\u001b[48;5;236m\u001b[51;52H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                         \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;180H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;188H\u001b[38;5;247m\u001b[48;5;236m " - ], - [ - 4.1e-05, - "conf\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  66%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 23\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:15 \u001b[23;19H\u001b[?12l\u001b[?25h" - ], - [ - 0.30678, - "\u001b[?25l\u001b[52;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.127132, - "w" - ], - [ - 3.3e-05, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.056701, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.079826, - "\r" - ], - [ - 0.000364, - "\u001b[?25l\u001b[?2004l\"test/kpod_rename.bats\"" - ], - [ - 0.012084, - " 35L, 900C written" - ], - [ - 0.014515, - "\r\r\r\n\u001b[39;49m\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.002629, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024698, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-rename* \u001b[39m \u001b[33m40s\u001b[39m\r\n" - ], - [ - 0.000938, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000106, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 9.8e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 1.6e-05, - "\u001b[?2004h" - ], - [ - 1.169246, - "g" - ], - [ - 0.271442, - "\bgi" - ], - [ - 0.079187, - "t" - ], - [ - 0.145049, - " " - ], - [ - 0.112281, - "c" - ], - [ - 0.048316, - "o" - ], - [ - 0.091902, - "m" - ], - [ - 0.743999, - "m" - ], - [ - 0.143304, - "i" - ], - [ - 0.107889, - "t" - ], - [ - 0.096264, - " " - ], - [ - 0.132679, - "-" - ], - [ - 0.103518, - "a" - ], - [ - 0.107512, - " " - ], - [ - 0.131904, - "-" - ], - [ - 0.116215, - "-" - ], - [ - 0.115981, - "a" - ], - [ - 0.204267, - "e" - ], - [ - 0.163917, - "n" - ], - [ - 0.392189, - "\b \b" - ], - [ - 0.151483, - "\b \b" - ], - [ - 0.176794, - "m" - ], - [ - 0.103142, - "e" - ], - [ - 0.111978, - "n" - ], - [ - 0.119562, - "d" - ], - [ - 0.112961, - "\u001b[?1l\u001b>" - ], - [ - 0.001554, - "\u001b[?2004l\r\r\n" - ], - [ - 0.00586, - "\u001b]2;git commit -a --amend\u0007\u001b]1;git\u0007" - ], - [ - 0.024458, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.003473, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"~/Development/Go/src/github.com/kubernetes-incubator/cri-o/.git/COMMIT_EDITMSG\"" - ], - [ - 0.000192, - " 23L, 623C" - ], - [ - 0.000113, - "\u001b[1;1Himplement kpod rename\r\n\r\nrename a container\r\n\r\nSigned-off-by: Ryan Cole \r\n\r\n# Please enter the commit message for your changes. Lines starting\r\n# with '#' will be ignored, and an empty message aborts the commit.\r\n#\r\n# Date: Mon Aug 14 13:30:24 2017 -0400\r\n#\r\n# On branch kpod-rename\r\n# Changes to be committed:\r\n#\u001b[7Cmodified: cmd/kpod/common.go\r\n#\u001b[7Cmodified: cmd/kpod/main.go\r\n#\u001b[7Cnew file: cmd/kpod/rename.go\r\n#\u001b[7Cmodified: completions/bash/kpod\r\n#\u001b[7Cnew file: docs/kpod-rename.1.md\r\n#\u001b[7Cmodified: docs/kpod.1.md\r\n#\u001b[7Cnew file: libkpod/rename.go\r\n#\u001b[7Cmodified: oci/container.go\r\n#\u001b[7Cnew file: test/kpod_rename.bats\r\n#\r\n\u001b[94m~ \u001b[25;1H~ " - ], - [ - 5.1e-05, - " \u001b[26;1H~ \u001b[27;1H~ \u001b[28;1H~ \u001b[29;1H~ \u001b[30;1H~ " - ], - [ - 3.6e-05, - " \u001b[31;1H~ \u001b[32;1H~ \u001b[33;1H~ \u001b[34;1H~ " - ], - [ - 3.3e-05, - " \u001b[35;1H~ \u001b[36;1H~ \u001b[37;1H~ \u001b[38;1H~ \u001b[39;1H~ " - ], - [ - 9.6e-05, - " \u001b[40;1H~ \u001b[41;1H~ \u001b[42;1H~ \u001b[43;1H~ \u001b[44;1H~ " - ], - [ - 2.8e-05, - " \u001b[45;1H~ \u001b[46;1H~ \u001b[47;1H~ \u001b[48;1H~ \u001b[49;1H~ " - ], - [ - 1.7e-05, - " \u001b[50;1H~ \u001b[51;1H~ \u001b[1;1H\u001b[?12l\u001b[?25h" - ], - [ - 0.324118, - "\u001b[?25l\u001b[m\u001b[52;1H\u001b[K\u001b[52;1H:\u001b[?2004h" - ], - [ - 0.000183, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.183, - "w" - ], - [ - 0.056247, - "q" - ], - [ - 0.073571, - "\r\u001b[?25l\u001b[?2004l\".git/COMMIT_EDITMSG\"" - ], - [ - 0.012337, - " 23L, 623C written" - ], - [ - 9.6e-05, - "\r\r\r\n\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.003601, - "[kpod-rename 1eb21f8e] implement kpod rename\r\n Date: Mon Aug 14 13:30:24 2017 -0400\r\n" - ], - [ - 0.000896, - " 9 files changed, 261 insertions(+), 1 deletion(-)\r\n create mode 100644 cmd/kpod/rename.go\r\n create mode 100644 docs/kpod-rename.1.md\r\n create mode 100644 libkpod/rename.go\r\n create mode 100644 test/kpod_rename.bats\r\n" - ], - [ - 0.000559, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.027462, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-rename \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001241, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000117, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.4e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 9.9e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.3e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.7e-05, - "\u001b[?2004h" - ], - [ - 0.12051, - "g" - ], - [ - 0.119913, - "\bgi" - ], - [ - 0.112417, - "t" - ], - [ - 0.143512, - " " - ], - [ - 0.095889, - "p" - ], - [ - 0.079922, - "u" - ], - [ - 0.088511, - "s" - ], - [ - 0.127486, - "h" - ], - [ - 0.087736, - " " - ], - [ - 0.151933, - "-" - ], - [ - 0.128526, - "f" - ], - [ - 0.112515, - " " - ], - [ - 0.06351, - "o" - ], - [ - 0.136509, - "r" - ], - [ - 0.103539, - "i" - ], - [ - 0.128398, - "g" - ], - [ - 0.063485, - "i" - ], - [ - 0.080165, - "n" - ], - [ - 0.064001, - " " - ], - [ - 0.1677, - "k" - ], - [ - 0.091269, - "pod-" - ], - [ - 0.293058, - "r" - ], - [ - 0.11185, - "e" - ], - [ - 0.135834, - "n" - ], - [ - 0.171262, - "ame\u001b[1m \u001b[0m" - ], - [ - 0.164965, - "\b\u001b[0m \b" - ], - [ - 0.000197, - "\u001b[?1l\u001b>" - ], - [ - 0.000188, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004459, - "\u001b]2;git push -f origin kpod-rename\u0007\u001b]1;git\u0007" - ], - [ - 0.74388, - "Counting objects: 19, done.\r\n" - ], - [ - 0.000183, - "Delta compression using up to 4 threads.\r\n" - ], - [ - 3.8e-05, - "Compressing objects: 5% (1/17) \rCompressing objects: 11% (2/17) \r" - ], - [ - 0.000177, - "Compressing objects: 17% (3/17) \r" - ], - [ - 8.9e-05, - "Compressing objects: 23% (4/17) \r" - ], - [ - 4.7e-05, - "Compressing objects: 29% (5/17) \r" - ], - [ - 0.000114, - "Compressing objects: 35% (6/17) \r" - ], - [ - 0.000132, - "Compressing objects: 41% (7/17) \r" - ], - [ - 9.1e-05, - "Compressing objects: 47% (8/17) \r" - ], - [ - 9.3e-05, - "Compressing objects: 52% (9/17) \r" - ], - [ - 6.5e-05, - "Compressing objects: 58% (10/17) \r" - ], - [ - 3.6e-05, - "Compressing objects: 64% (11/17) \r" - ], - [ - 3.2e-05, - "Compressing objects: 70% (12/17) \r" - ], - [ - 3e-05, - "Compressing objects: 76% (13/17) \r" - ], - [ - 3.8e-05, - "Compressing objects: 82% (14/17) \r" - ], - [ - 3e-05, - "Compressing objects: 88% (15/17) \r" - ], - [ - 2.4e-05, - "Compressing objects: 94% (16/17) \r" - ], - [ - 4.1e-05, - "Compressing objects: 100% (17/17) \r" - ], - [ - 3.5e-05, - "Compressing objects: 100% (17/17), done.\r\n" - ], - [ - 0.000128, - "Writing objects: 5% (1/19) \r" - ], - [ - 6.2e-05, - "Writing objects: 10% (2/19) \r" - ], - [ - 4.2e-05, - "Writing objects: 15% (3/19) \r" - ], - [ - 4.8e-05, - "Writing objects: 21% (4/19) \r" - ], - [ - 4.5e-05, - "Writing objects: 26% (5/19) \r" - ], - [ - 4.6e-05, - "Writing objects: 31% (6/19) \r" - ], - [ - 0.000103, - "Writing objects: 36% (7/19) \r" - ], - [ - 5.3e-05, - "Writing objects: 42% (8/19) \r" - ], - [ - 5.2e-05, - "Writing objects: 47% (9/19) \r" - ], - [ - 8.6e-05, - "Writing objects: 52% (10/19) \r" - ], - [ - 1.7e-05, - "Writing objects: 57% (11/19) \r" - ], - [ - 8.1e-05, - "Writing objects: 63% (12/19) \r" - ], - [ - 4.6e-05, - "Writing objects: 68% (13/19) \r" - ], - [ - 4.2e-05, - "Writing objects: 73% (14/19) \r" - ], - [ - 0.000145, - "Writing objects: 78% (15/19) \r" - ], - [ - 4.1e-05, - "Writing objects: 84% (16/19) \r" - ], - [ - 4.9e-05, - "Writing objects: 89% (17/19) \r" - ], - [ - 3.8e-05, - "Writing objects: 94% (18/19) \r" - ], - [ - 9.1e-05, - "Writing objects: 100% (19/19) \r" - ], - [ - 4.5e-05, - "Writing objects: 100% (19/19), 3.69 KiB | 3.69 MiB/s, done.\r\nTotal 19 (delta 12), reused 0 (delta 0)\r\n" - ], - [ - 0.04705, - "remote: Resolving deltas: 0% (0/12) \u001b[K\r" - ], - [ - 0.038152, - "remote: Resolving deltas: 8% (1/12) \u001b[K\rremote: Resolving deltas: 16% (2/12) \u001b[K\rremote: Resolving deltas: 25% (3/12) \u001b[K\rremote: Resolving deltas: 33% (4/12) \u001b[K\rremote: Resolving deltas: 41% (5/12) \u001b[K\rremote: Resolving deltas: 50% (6/12) \u001b[K\rremote: Resolving deltas: 58% (7/12) \u001b[K\rremote: Resolving deltas: 66% (8/12) \u001b[K\rremote: Resolving deltas: 75% (9/12) \u001b[K\rremote: Resolving deltas: 83% (10/12) \u001b[K\rremote: Resolving deltas: 91% (11/12) \u001b[K\rremote: Resolving deltas: 100% (12/12) \u001b[K\rremote: Resolving deltas: 100% (12/12), completed with 12 local objects.\u001b[K\r\n" - ], - [ - 1.420422, - "To github.com:14rcole/cri-o\r\n + 2a874565...1eb21f8e kpod-rename -> kpod-rename (forced update)\r\n" - ], - [ - 0.002366, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.029573, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-rename \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.002048, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.00012, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 3.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000179, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000127, - "\u001b[?1h\u001b=" - ], - [ - 3.6e-05, - "\u001b[?2004h" - ], - [ - 1969.422781, - "k" - ], - [ - 0.16807, - "\bkp" - ], - [ - 0.09268, - "o" - ], - [ - 0.156049, - "d" - ], - [ - 0.087661, - " " - ], - [ - 3.123898, - "r" - ], - [ - 0.360843, - "m" - ], - [ - 0.942902, - "\b \b" - ], - [ - 0.16075, - "\b \b" - ], - [ - 0.151174, - "\b" - ], - [ - 0.176905, - "\b \b" - ], - [ - 0.159513, - "\b \b" - ], - [ - 0.168379, - "\b\bk \b" - ], - [ - 0.15094, - "\b \b" - ], - [ - 0.128879, - "s" - ], - [ - 0.223768, - "\bsd" - ], - [ - 0.280126, - "\b\bs \b" - ], - [ - 0.247922, - "\bsu" - ], - [ - 0.144548, - "d" - ], - [ - 0.095353, - "o" - ], - [ - 0.063452, - " " - ], - [ - 0.073201, - "c" - ], - [ - 0.176123, - "r" - ], - [ - 0.10351, - "i" - ], - [ - 0.071248, - "o" - ], - [ - 0.056543, - "c" - ], - [ - 0.208591, - "t" - ], - [ - 0.071256, - "l" - ], - [ - 0.120819, - " " - ], - [ - 0.224225, - "c" - ], - [ - 0.215122, - "t" - ], - [ - 0.184575, - "r" - ], - [ - 0.199863, - " " - ], - [ - 0.223807, - "l" - ], - [ - 0.104351, - "i" - ], - [ - 0.031403, - "s" - ], - [ - 0.095857, - "t" - ], - [ - 0.176783, - "\u001b[?1l\u001b>" - ], - [ - 0.000303, - "\u001b[?2004l\r\r\n" - ], - [ - 0.006555, - "\u001b]2;sudo crioctl ctr list\u0007\u001b]1;crioctl\u0007" - ], - [ - 0.954158, - "[sudo] password for ryan: " - ], - [ - 1.966785, - "\r\n" - ], - [ - 0.091087, - "ID: d0ddf13569c69f6fed1934eb0e5e9d41b0cf37477c3d42c63931e9b9d2e8ab41\r\nPod: 75d9cdb9e450cd7b67e71136c35e6fa850edfd783b1893f61a69659e2eb8cb77\r\nName: podsandbox1-redis\r\nAttempt: 0\r\nStatus: CONTAINER_RUNNING\r\nImage: redis:alpine\r\nCreated: 2017-08-14 14:45:11.278672966 -0400 EDT\r\nLabels:\r\n\ttier -> backend\r\nAnnotations:\r\n" - ], - [ - 3.3e-05, - "\tpod -> podsandbox1\r\n\r\n" - ], - [ - 0.003294, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.022039, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-rename \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.002127, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000384, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 6.2e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000872, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 5.3e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 3.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 7.8e-05, - "\u001b[?2004h" - ], - [ - 0.48893, - "s" - ], - [ - 0.110979, - "\bsu" - ], - [ - 0.0795, - "d" - ], - [ - 0.080647, - "o" - ], - [ - 0.10398, - " " - ], - [ - 0.087979, - "k" - ], - [ - 0.103896, - "p" - ], - [ - 0.072293, - "o" - ], - [ - 0.063704, - "d" - ], - [ - 0.095893, - " " - ], - [ - 0.07231, - "r" - ], - [ - 0.08694, - "m" - ], - [ - 0.127982, - "i" - ], - [ - 0.072625, - " " - ], - [ - 0.087713, - "r" - ], - [ - 0.080339, - "e" - ], - [ - 0.151296, - "d" - ], - [ - 0.096924, - "i" - ], - [ - 0.119446, - "s" - ], - [ - 0.144858, - ":" - ], - [ - 0.232002, - "a" - ], - [ - 0.087086, - "l" - ], - [ - 0.152797, - "p" - ], - [ - 0.071877, - "i" - ], - [ - 0.080055, - "n" - ], - [ - 0.072073, - "e" - ], - [ - 0.104314, - "\u001b[?1l\u001b>" - ], - [ - 0.0004, - "\u001b[?2004l\r\r\n" - ], - [ - 0.002847, - "\u001b]2;sudo kpod rmi redis:alpine\u0007\u001b]1;kpod\u0007" - ], - [ - 0.089244, - "Could not remove image \"redis:alpine\" (must force) - one or more containers are using its reference image%!(EXTRA int=0)\r\n" - ], - [ - 0.003327, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.023875, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-rename \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.002907, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000104, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 6.8e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m " - ], - [ - 2.4e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.9e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.6e-05, - "\u001b[?2004h" - ], - [ - 4.955818, - "g" - ], - [ - 0.128566, - "\bgi" - ], - [ - 0.064574, - "t" - ], - [ - 0.095493, - " " - ], - [ - 0.167938, - "c" - ], - [ - 0.112316, - "h" - ], - [ - 0.103897, - "e" - ], - [ - 0.072405, - "c" - ], - [ - 0.054921, - "k" - ], - [ - 0.120936, - "o" - ], - [ - 0.103471, - "u" - ], - [ - 0.063554, - "t" - ], - [ - 0.088583, - " " - ], - [ - 0.176206, - "m" - ], - [ - 0.104132, - "a" - ], - [ - 0.063023, - "s" - ], - [ - 0.251503, - "ter\u001b[1m \u001b[0m" - ], - [ - 0.341276, - "\b\u001b[0m \b" - ], - [ - 0.0001, - "\u001b[?1l\u001b>" - ], - [ - 0.000196, - "\u001b[?2004l" - ], - [ - 0.000167, - "\r\r\n" - ], - [ - 0.004807, - "\u001b]2;git checkout master\u0007\u001b]1;git\u0007" - ], - [ - 0.031022, - "Switched to branch 'master'\r\n" - ], - [ - 5.4e-05, - "Your branch is up-to-date with 'origin/master'.\r\n" - ], - [ - 0.000548, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.02297, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001023, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000111, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.6e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 9.6e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.7e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.6e-05, - "\u001b[?2004h" - ], - [ - 0.154245, - "g" - ], - [ - 0.176512, - "\bgi" - ], - [ - 0.016088, - "t" - ], - [ - 0.104093, - " " - ], - [ - 0.032212, - "p" - ], - [ - 0.103747, - "u" - ], - [ - 0.903255, - "l" - ], - [ - 0.160769, - "l" - ], - [ - 0.118954, - " " - ], - [ - 0.104575, - "-" - ], - [ - 0.151817, - "-" - ], - [ - 0.088524, - "r" - ], - [ - 0.32749, - "\b \b" - ], - [ - 0.14449, - "\b \b" - ], - [ - 0.151997, - "\b \b" - ], - [ - 0.679562, - "u" - ], - [ - 0.05667, - "p" - ], - [ - 0.147276, - "stream\u001b[1m \u001b[0m" - ], - [ - 0.324463, - "\b\u001b[0m m" - ], - [ - 0.088344, - "a" - ], - [ - 0.096029, - "s" - ], - [ - 0.187689, - "ter\u001b[1m:\u001b[0m" - ], - [ - 0.091664, - "\b\u001b[0m \b" - ], - [ - 0.000172, - "\u001b[?1l\u001b>" - ], - [ - 0.000159, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004194, - "\u001b]2;git pull upstream master\u0007\u001b]1;git\u0007" - ], - [ - 1.697902, - "From github.com:kubernetes-incubator/cri-o\r\n * branch master -> FETCH_HEAD\r\n" - ], - [ - 0.057748, - "Updating a69631c1..6ca462a3\r\n" - ], - [ - 0.026098, - "Fast-forward" - ], - [ - 4.3e-05, - "\r\n" - ], - [ - 0.005314, - " README.md | 3 \u001b[32m+\u001b[m\u001b[31m-\u001b[m\r\n cmd/kpod/logs.go | 88 \u001b[32m+++++++++++++++++++++++\u001b[m\r\n cmd/kpod/main.go | 1 \u001b[32m+\u001b[m\r\n code-of-conduct.md | 20 \u001b[32m+++\u001b[m\u001b[31m---\u001b[m\r\n" - ], - [ - 0.000167, - " completions/bash/kpod | 25 \u001b[32m+++++++\u001b[m\r\n docs/kpod-logs.1.md | 61 \u001b[32m++++++++++++++++\u001b[m\r\n libkpod/image/copy.go | 18 \u001b[32m++++\u001b[m\u001b[31m-\u001b[m\r\n libkpod/logs.go | 80 \u001b[32m+++++++++++++++++++++\u001b[m\r\n test/kpod_logs.bats | 77 \u001b[32m++++++++++++++++++++\u001b[m\r\n vendor.conf | 3 \u001b[32m+\u001b[m\r\n vendor/github.com/hpcloud/tail/LICENSE.txt | 21 \u001b[32m++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/README.md | 28 \u001b[32m++++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go | 97 \u001b[32m+++++++++++++++++++++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/ratelimiter/memory.go | 58 \u001b[32m+++++++++++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/ratelimiter/storage.go | 6 \u001b[32m++\u001b[m\r\n vendor/github.com/hpcloud/tail/tail.go | 438 " - ], - [ - 2.2e-05, - "\u001b[32m+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/tail_posix.go | 11 \u001b[32m+++\u001b[m\r\n vendor/github.com/hpcloud/tail/tail_windows.go | 12 \u001b[32m++++\u001b[m\r\n vendor/github.com/hpcloud/tail/util/util.go | 48 \u001b[32m+++++++++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/watch/filechanges.go | 36 \u001b[32m++++++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/watch/inotify.go | 128 \u001b[32m+++++++++++++++++++++++++++++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/watch/inotify_tracker.go | 260 \u001b[32m+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/watch/polling.go | 118 \u001b[32m+++++++++++++++++++++++++++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/watch/watch.go | 20 \u001b[32m++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/winfile/winfile.go | 92 \u001b[32m++++++++++++++++++++++++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/LICENS" - ], - [ - 1.7e-05, - "E | 28 \u001b[32m++++++++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/README.md | 50 \u001b[32m+++++++++++++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/fen.go | 37 \u001b[32m++++++++++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/fsnotify.go | 62 \u001b[32m++++++++++++++++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/inotify.go | 325 \u001b[32m++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/inotify_poller.go | 187 \u001b[32m+++++++++++++++++++++++++++++++++++++++++++++++++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/kqueue.go | 503 \u001b[32m++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go | 11 \u001b[32m+++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go | 12 \u001b[32m++++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/windows.go | 561 \u001b[" - ], - [ - 1.6e-05, - "32m+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\u001b[m\r\n vendor/gopkg.in/tomb.v1/LICENSE | 29 \u001b[32m++++++++\u001b[m\r\n vendor/gopkg.in/tomb.v1/README.md | 4 \u001b[32m++\u001b[m\r\n vendor/gopkg.in/tomb.v1/tomb.go | 176 \u001b[32m++++++++++++++++++++++++++++++++++++++++++++++\u001b[m\r\n 38 files changed, 3722 insertions(+), 12 deletions(-)\r\n create mode 100644 cmd/kpod/logs.go\r\n create mode 100644 docs/kpod-logs.1.md\r\n create mode 100644 libkpod/logs.go\r\n create mode 100644 test/kpod_logs.bats\r\n create mode 100644 vendor/github.com/hpcloud/tail/LICENSE.txt\r\n create mode 100644 vendor/github.com/hpcloud/tail/README.md\r\n create mode 100644 vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/ratelimiter/memory.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/ratelimiter/storage.go\r\n create mode 100644 ve" - ], - [ - 0.000117, - "ndor/github.com/hpcloud/tail/tail.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/tail_posix.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/tail_windows.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/util/util.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/watch/filechanges.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/watch/inotify.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/watch/inotify_tracker.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/watch/polling.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/watch/watch.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/winfile/winfile.go\r\n create mode 100644 vendor/gopkg.in/fsnotify.v1/LICENSE\r\n create mode 100644 vendor/gopkg.in/fsnotify.v1/README.md\r\n create mode 100644 vendor/gopkg.in/fsnotify.v1/fen.go\r\n create mode 100644 vendor/gopkg.in/fsnotify.v1/fsnotify.go\r\n create mode 100644 vendor/gopkg.in/fsnotify.v1/inotify.go\r\n create mode 100644 vendor/gopkg.in/fsnotify.v1/inotify_poller.go\r\n " - ], - [ - 3.1e-05, - "create mode 100644 vendor/gopkg.in/fsnotify.v1/kqueue.go\r\n create mode 100644 vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go\r\n create mode 100644 vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go\r\n create mode 100644 vendor/gopkg.in/fsnotify.v1/windows.go\r\n create mode 100644 vendor/gopkg.in/tomb.v1/LICENSE\r\n create mode 100644 vendor/gopkg.in/tomb.v1/README.md\r\n create mode 100644 vendor/gopkg.in/tomb.v1/tomb.go\r\n" - ], - [ - 0.001059, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.038598, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001722, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 1.798324, - "v" - ], - [ - 0.088099, - "\bvi" - ], - [ - 0.095471, - " " - ], - [ - 0.313127, - "m" - ], - [ - 0.239446, - "\b \b" - ], - [ - 0.103855, - "c" - ], - [ - 0.104993, - "m" - ], - [ - 0.117338, - "d\u001b[1m/\u001b[0m" - ], - [ - 0.07865, - "\b\u001b[0m/k" - ], - [ - 0.11481, - "pod\u001b[1m/\u001b[0m" - ], - [ - 0.137367, - "\b\u001b[0m/m" - ], - [ - 0.287656, - "\b \b" - ], - [ - 0.135144, - "r" - ], - [ - 0.068676, - "e" - ], - [ - 0.148163, - "n" - ], - [ - 0.157283, - "\u0007" - ], - [ - 0.706737, - "a" - ], - [ - 0.225253, - "\u0007" - ], - [ - 1.09834, - "\b \b" - ], - [ - 0.584105, - "\b \b" - ], - [ - 0.248355, - "m" - ], - [ - 0.259974, - "\b \b" - ], - [ - 0.123623, - "\b \b" - ], - [ - 0.144232, - "m" - ], - [ - 0.159494, - "i" - ], - [ - 0.154991, - ".go\u001b[1m \u001b[0m" - ], - [ - 0.373147, - "\b\u001b[0m \b" - ], - [ - 5.9e-05, - "\u001b[?1l\u001b>" - ], - [ - 0.000152, - "\u001b[?2004l\r\r\n" - ], - [ - 0.003812, - "\u001b]2;vim cmd/kpod/rmi.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.140952, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000561, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"cmd/kpod/rmi.go\"" - ], - [ - 0.000112, - " 123L, 3096C" - ], - [ - 0.007972, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.002019, - "\u001b[1;1H\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m rmiCommand = cli.Command{\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m\u001b[8CName:\u001b[8C\u001b[36m\"rmi\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m\u001b[8CUsage:\u001b[7C\u001b[36m\"removes one or more images from local storage\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m\u001b[8CDescription: rmiDescription,\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[8CAction: rmiCmd,\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m\u001b[8CArgsUsage: \u001b[36m\"IMAGE-NAME-OR-ID [...]\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m\u001b[8CFlags:\u001b[7CrmiFlags,\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 28 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 29 \r\n 30 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m rmiCmd(c *cli.Context) \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 31 \r\n 32 \u001b[m\u001b[93m\u001b[107m force := \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m c.IsSet(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m\u001b[8Cforce = c.Bool(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 36 \r\n 37 \u001b[m\u001b[93m\u001b[107m args := c.Args()\r\n\u001b[96m\u001b[47m 3" - ], - [ - 2.1e-05, - "8 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(args) == \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Errorf(\u001b[36m\"image name or ID must be specified\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 41 \r\n 42 \u001b[m\u001b[93m\u001b[107m config, err := getConfig(c)\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Could not get config\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m store, err := getStore(config)\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 50 \r\n 51 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, id := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m args {\r\n\u001b[96m\u001b[47m 52 \u001b[m\u001b[93m\u001b[107m\u001b[8Cimage, err := libkpodimage.FindImage(store, id)\r\n" - ], - [ - 0.032607, - "\u001b[96m\u001b[47m 53 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 54 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"could not get image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 55 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 56 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m image != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 57 \u001b[m\u001b[93m\u001b[107m\u001b[12CctrIDs, err := runningContainers(image, store)\r\n\u001b[96m\u001b[47m 58 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 59 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"error getting running containers for image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 60 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 61 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(ctrIDs) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m && \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(image.Names) <= \u001b[36m1\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 62 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m force {\r\n\u001b[96m\u001b[47m 63 \u001b[" - ], - [ - 0.000108, - "m\u001b[93m\u001b[107m\u001b[20CremoveContainers(ctrIDs, store)\r\n\u001b[96m\u001b[47m 64 \u001b[m\u001b[93m\u001b[107m\u001b[16C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 65 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mfor\u001b[m\u001b[93m\u001b[107m ctrID := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\r\n\u001b[96m\u001b[47m 66 \u001b[m\u001b[93m\u001b[107m\u001b[24C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m fmt.Errorf(\u001b[36m\"Could not remove image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m (must force) - container \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\r\n\u001b[96m\u001b[47m 67 \u001b[m\u001b[93m\u001b[107m\u001b[20C}\r\n\u001b[96m\u001b[47m 68 \u001b[m\u001b[93m\u001b[107m\u001b[16C}\r\n\u001b[96m\u001b[47m 69 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;39H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                           " - ], - [ - 0.009234, - "                                                                                                             \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:25 \u001b[47;29H\u001b[?12l\u001b[?25h\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 1.74193, - "\u001b[?25l\u001b[51;210H4\u001b[47;28H\u001b[?12l\u001b[?25h" - ], - [ - 0.184955, - "\u001b[51;210H5\u001b[47;29H" - ], - [ - 0.501563, - "\u001b[51;210H6\u001b[47;30H" - ], - [ - 0.027652, - "\u001b[51;210H7\u001b[47;31H" - ], - [ - 0.031729, - "\u001b[51;210H8\u001b[47;32H" - ], - [ - 0.032977, - "\u001b[51;210H9\u001b[47;33H" - ], - [ - 0.028374, - "\u001b[51;209H30\u001b[47;34H" - ], - [ - 0.029832, - "\u001b[51;210H1\u001b[47;35H" - ], - [ - 0.032526, - "\u001b[51;210H2\u001b[47;36H" - ], - [ - 0.029166, - "\u001b[51;210H3\u001b[47;37H" - ], - [ - 0.031288, - "\u001b[51;210H4\u001b[47;38H" - ], - [ - 0.031594, - "\u001b[51;210H5\u001b[47;39H" - ], - [ - 0.030975, - "\u001b[51;210H6\u001b[47;40H" - ], - [ - 0.029298, - "\u001b[51;210H7\u001b[47;41H" - ], - [ - 0.029023, - "\u001b[51;210H8\u001b[47;42H" - ], - [ - 0.03314, - "\u001b[51;210H9\u001b[47;43H" - ], - [ - 0.031948, - "\u001b[51;209H40\u001b[47;44H" - ], - [ - 0.028742, - "\u001b[51;210H1\u001b[47;45H" - ], - [ - 0.034439, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mf\u001b[1m\u001b[31m\u001b[106m(\u001b[95C)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;252m2\u001b[47;46H\u001b[?12l\u001b[?25h" - ], - [ - 0.028925, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[94C)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;252m3\u001b[47;47H\u001b[?12l\u001b[?25h" - ], - [ - 0.030179, - "\u001b[51;210H4\u001b[47;48H" - ], - [ - 0.030092, - "\u001b[51;210H5\u001b[47;49H" - ], - [ - 0.03015, - "\u001b[51;210H6\u001b[47;50H" - ], - [ - 0.029603, - "\u001b[51;210H7\u001b[47;51H" - ], - [ - 0.032153, - "\u001b[51;210H8\u001b[47;52H" - ], - [ - 0.030144, - "\u001b[51;210H9\u001b[47;53H" - ], - [ - 0.03006, - "\u001b[51;209H50\u001b[47;54H" - ], - [ - 0.030687, - "\u001b[51;210H1\u001b[47;55H" - ], - [ - 0.02981, - "\u001b[51;210H2\u001b[47;56H" - ], - [ - 0.037596, - "\u001b[51;210H3\u001b[47;57H" - ], - [ - 0.027628, - "\u001b[51;210H4\u001b[47;58H" - ], - [ - 0.026376, - "\u001b[51;210H5\u001b[47;59H" - ], - [ - 0.031146, - "\u001b[51;210H6\u001b[47;60H" - ], - [ - 0.032485, - "\u001b[51;210H7\u001b[47;61H" - ], - [ - 0.026451, - "\u001b[51;210H8\u001b[47;62H" - ], - [ - 0.030128, - "\u001b[51;210H9\u001b[47;63H" - ], - [ - 0.031166, - "\u001b[51;209H60\u001b[47;64H" - ], - [ - 0.036072, - "\u001b[51;210H1\u001b[47;65H" - ], - [ - 0.024924, - "\u001b[51;210H2\u001b[47;66H" - ], - [ - 0.032987, - "\u001b[51;210H3\u001b[47;67H" - ], - [ - 0.029506, - "\u001b[51;210H4\u001b[47;68H" - ], - [ - 0.031373, - "\u001b[51;210H5\u001b[47;69H" - ], - [ - 0.033657, - "\u001b[51;210H6\u001b[47;70H" - ], - [ - 0.02801, - "\u001b[51;210H7\u001b[47;71H" - ], - [ - 0.031093, - "\u001b[51;210H8\u001b[47;72H" - ], - [ - 0.0306, - "\u001b[51;210H9\u001b[47;73H" - ], - [ - 0.034356, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[1m\u001b[31m\u001b[106m(\u001b[10C)\u001b[m\u001b[93m\u001b[107m\u001b[51;209H\u001b[38;5;22m\u001b[48;5;252m70\u001b[47;74H\u001b[?12l\u001b[?25h" - ], - [ - 0.026368, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36m(m\u001b[9C) \u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;252m1\u001b[47;75H\u001b[?12l\u001b[?25h" - ], - [ - 0.031718, - "\u001b[51;210H2\u001b[47;76H" - ], - [ - 0.032486, - "\u001b[51;210H3\u001b[47;77H" - ], - [ - 0.025168, - "\u001b[51;210H4\u001b[47;78H" - ], - [ - 0.036429, - "\u001b[51;210H5\u001b[47;79H" - ], - [ - 0.028144, - "\u001b[51;210H6\u001b[47;80H" - ], - [ - 0.031687, - "\u001b[51;210H7\u001b[47;81H" - ], - [ - 0.031436, - "\u001b[51;210H8\u001b[47;82H" - ], - [ - 0.028164, - "\u001b[51;210H9\u001b[47;83H" - ], - [ - 0.033254, - "\u001b[51;209H80\u001b[47;84H" - ], - [ - 0.033571, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[47;74H\u001b[1m\u001b[31m\u001b[106m(\u001b[10C)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;252m1\u001b[47;85H\u001b[?12l\u001b[?25h" - ], - [ - 0.028404, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[47;74H\u001b[36m(m\u001b[9C) \u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;252m2\u001b[47;86H\u001b[?12l\u001b[?25h" - ], - [ - 0.029114, - "\u001b[51;210H3\u001b[47;87H" - ], - [ - 0.030981, - "\u001b[51;210H4\u001b[47;88H" - ], - [ - 0.034571, - "\u001b[51;210H5\u001b[47;89H" - ], - [ - 0.02456, - "\u001b[51;210H6\u001b[47;90H" - ], - [ - 0.424165, - "\u001b[51;210H5\u001b[47;89H" - ], - [ - 0.191612, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36montainer \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;142H\u001b[K\u001b[51;37H\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;41H \u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.501563, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mntainer \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;141H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.019326, - "\u001b[?25l\u001b[36mtainer \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;140H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.032305, - "\u001b[?25l\u001b[36mainer \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;139H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.027042, - "\u001b[?25l\u001b[36miner \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;138H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.033786, - "\u001b[?25l\u001b[36mner \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;137H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.032065, - "\u001b[?25l\u001b[36mer \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;136H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.02938, - "\u001b[?25l\u001b[36mr \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;135H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.031174, - "\u001b[?25l\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;134H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.032572, - "\u001b[?25l\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;133H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.025383, - "\u001b[?25l\u001b[36mq is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;132H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.209138, - "\u001b[?25l\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;131H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.199764, - "\u001b[?25l\u001b[36mis using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;130H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.172621, - "\u001b[?25l\u001b[36ms using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;129H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.186391, - "\u001b[?25l\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;128H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.397295, - "\u001b[?25l\u001b[52;1H\u001b[34m-- INSERT --\u001b[m\u001b[93m\u001b[107m\u001b[52;13H\u001b[K" - ], - [ - 0.04224, - "\u001b[51;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[m\u001b[93m\u001b[107m\u001b[51;40H\u001b[38;5;31m\u001b[48;5;24m\u001b[51;41H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                                      \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;117m\u001b[48;5;24m" - ], - [ - 3.5e-05, - " go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;186m\u001b[48;5;31m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:85 \u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.318684, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mo using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[47;90H\u001b[?12l\u001b[?25h" - ], - [ - 0.085993, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mn using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[47;91H\u001b[?12l\u001b[?25h" - ], - [ - 0.102417, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[47;92H\u001b[?12l\u001b[?25h" - ], - [ - 0.096152, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[47;93H\u001b[?12l\u001b[?25h" - ], - [ - 0.172551, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mm using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m90\u001b[47;94H\u001b[?12l\u001b[?25h" - ], - [ - 0.084589, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mo using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[47;95H\u001b[?12l\u001b[?25h" - ], - [ - 0.266563, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;133H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m0\u001b[47;94H\u001b[?12l\u001b[?25h" - ], - [ - 0.134027, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;132H\u001b[K\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m89\u001b[47;93H\u001b[?12l\u001b[?25h" - ], - [ - 0.160501, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mo using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m90\u001b[47;94H\u001b[?12l\u001b[?25h" - ], - [ - 0.158121, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mr using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[47;95H\u001b[?12l\u001b[?25h" - ], - [ - 0.064495, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[47;96H\u001b[?12l\u001b[?25h" - ], - [ - 0.178203, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mm using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[47;97H\u001b[?12l\u001b[?25h" - ], - [ - 0.070967, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mo using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[47;98H\u001b[?12l\u001b[?25h" - ], - [ - 0.049728, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mr using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[47;99H\u001b[?12l\u001b[?25h" - ], - [ - 0.085073, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[47;100H\u001b[?12l\u001b[?25h" - ], - [ - 0.064728, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[47;101H\u001b[?12l\u001b[?25h" - ], - [ - 0.115396, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mc using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[47;102H\u001b[?12l\u001b[?25h" - ], - [ - 0.109608, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mo using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[47;103H\u001b[?12l\u001b[?25h" - ], - [ - 0.07234, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mn using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;175H\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;181H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;189H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;194H\u001b[38;5;186m\u001b[48;5;31m \u001b[1C54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;201H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:100\u001b[47;104H\u001b[?12l\u001b[?25h" - ], - [ - 0.106017, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mt using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[47;105H\u001b[?12l\u001b[?25h" - ], - [ - 0.037147, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36ma using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[47;106H\u001b[?12l\u001b[?25h" - ], - [ - 0.120393, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mi using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[47;107H\u001b[?12l\u001b[?25h" - ], - [ - 0.054877, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mn using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[47;108H\u001b[?12l\u001b[?25h" - ], - [ - 0.055511, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[47;109H\u001b[?12l\u001b[?25h" - ], - [ - 0.044523, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mr using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[47;110H\u001b[?12l\u001b[?25h" - ], - [ - 0.164345, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36ms using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[47;111H\u001b[?12l\u001b[?25h" - ], - [ - 0.113242, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[47;112H\u001b[?12l\u001b[?25h" - ], - [ - 0.065749, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36ma using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[47;113H\u001b[?12l\u001b[?25h" - ], - [ - 0.067013, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mr using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m10\u001b[47;114H\u001b[?12l\u001b[?25h" - ], - [ - 0.082416, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[47;115H\u001b[?12l\u001b[?25h" - ], - [ - 0.391867, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K" - ], - [ - 0.002802, - "\u001b[51;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[51;40H\u001b[38;5;240m\u001b[48;5;236m\u001b[51;41H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                                     \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;181H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;189H\u001b[38;5;247m" - ], - [ - 4.4e-05, - "\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;194H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;201H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:110 \u001b[47;114H\u001b[?12l\u001b[?25h" - ], - [ - 0.330025, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[47;46H\u001b[1m\u001b[31m\u001b[106m(\u001b[106C)\u001b[m\u001b[93m\u001b[107m\u001b[51;209H\u001b[38;5;22m\u001b[48;5;252m49\u001b[47;153H\u001b[?12l\u001b[?25h" - ], - [ - 0.483853, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[34m-- INSERT --" - ], - [ - 0.009014, - "\u001b[m\u001b[93m\u001b[107m\u001b[51;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[m\u001b[93m\u001b[107m\u001b[51;40H\u001b[38;5;31m\u001b[48;5;24m\u001b[51;41H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                                     \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;181H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;189H\u001b[38;5;11" - ], - [ - 8.4e-05, - "7m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;194H\u001b[38;5;186m\u001b[48;5;31m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;201H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:149 \u001b[47;153H\u001b[?12l\u001b[?25h" - ], - [ - 0.255014, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b)\u001b[47;153H\u001b[K\u001b[47;152H\u001b[1m\u001b[31m\u001b[106m)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[47;152H\u001b[?12l\u001b[?25h" - ], - [ - 0.143393, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b)\u001b[47;152H\u001b[K\u001b[47;151H\u001b[1m\u001b[31m\u001b[106m)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[47;151H\u001b[?12l\u001b[?25h" - ], - [ - 0.173726, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b)\u001b[47;151H\u001b[K\u001b[47;150H\u001b[1m\u001b[31m\u001b[106m)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[47;150H\u001b[?12l\u001b[?25h" - ], - [ - 0.151736, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b)\u001b[47;150H\u001b[K\u001b[47;149H\u001b[1m\u001b[31m\u001b[106m)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[47;149H\u001b[?12l\u001b[?25h" - ], - [ - 0.17734, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b)\u001b[47;149H\u001b[K\u001b[47;148H\u001b[1m\u001b[31m\u001b[106m)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[47;148H\u001b[?12l\u001b[?25h" - ], - [ - 0.158422, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b)\u001b[47;148H\u001b[K\u001b[47;147H\u001b[1m\u001b[31m\u001b[106m)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[47;147H\u001b[?12l\u001b[?25h" - ], - [ - 0.144092, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b)\u001b[47;147H\u001b[K\u001b[47;146H\u001b[1m\u001b[31m\u001b[106m)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[47;146H\u001b[?12l\u001b[?25h" - ], - [ - 0.211601, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K" - ], - [ - 0.009089, - "\u001b[47;46H(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[98C)\u001b[51;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[51;40H\u001b[38;5;240m\u001b[48;5;236m\u001b[51;41H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                                     \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;181H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m " - ], - [ - 3.1e-05, - "\u001b[m\u001b[93m\u001b[107m\u001b[51;189H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;194H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;201H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:141 \u001b[47;145H\u001b[?12l\u001b[?25h" - ], - [ - 0.214474, - "\u001b[?25l\u001b[52;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.143718, - "w\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.056661, - "q" - ], - [ - 7.6e-05, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.071657, - "\r" - ], - [ - 7.3e-05, - "\u001b[?25l\u001b[?2004l" - ], - [ - 0.023973, - "\"cmd/kpod/rmi.go\"" - ], - [ - 0.003962, - " 123L, 3100C written" - ], - [ - 0.012358, - "\r\r\r\n\u001b[39;49m\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.001868, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.016767, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m13s\u001b[39m\r\n" - ], - [ - 0.001258, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 5.2e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.6e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 7e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 5.8e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.4e-05, - "\u001b[?2004h" - ], - [ - 2.403767, - "s" - ], - [ - 0.072404, - "\bsu" - ], - [ - 0.087776, - "d" - ], - [ - 0.10411, - "o" - ], - [ - 0.103535, - " " - ], - [ - 0.112565, - "m" - ], - [ - 0.103814, - "a" - ], - [ - 0.127747, - "k" - ], - [ - 0.104285, - "e" - ], - [ - 0.055181, - " " - ], - [ - 0.096552, - "k" - ], - [ - 0.695888, - "\b \b" - ], - [ - 0.499894, - "\b" - ], - [ - 0.031116, - "\b \b" - ], - [ - 0.031334, - "\b \b" - ], - [ - 0.03129, - "\b \b" - ], - [ - 0.030503, - "\b \b" - ], - [ - 0.029752, - "\b" - ], - [ - 0.03046, - "\b \b" - ], - [ - 0.029619, - "\b \b" - ], - [ - 0.031117, - "\b\bs \b" - ], - [ - 0.030333, - "\b \b" - ], - [ - 0.143801, - "m" - ], - [ - 0.137018, - "\bma" - ], - [ - 0.127748, - "k" - ], - [ - 0.071257, - "e" - ], - [ - 0.112655, - " " - ], - [ - 0.103705, - "k" - ], - [ - 0.120423, - "p" - ], - [ - 0.088219, - "o" - ], - [ - 0.151337, - "d" - ], - [ - 0.144145, - "\u001b[?1l\u001b>" - ], - [ - 0.000245, - "\u001b[?2004l\r\r\n" - ], - [ - 0.003789, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 6.807666, - "go build -ldflags '-X main.gitCommit=6ca462a3 -X main.buildInfo=1502976873' -tags \"selinux seccomp \" -o kpod github.com/kubernetes-incubator/cri-o/cmd/kpod\r\n" - ], - [ - 2.098933, - "# github.com/kubernetes-incubator/cri-o/cmd/kpod\r\ncmd/kpod/rmi.go:65: ctrID declared and not used\r\n" - ], - [ - 0.002582, - "make: *** [Makefile:83: kpod] Error 2\r\n" - ], - [ - 0.000308, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.020816, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m9s\u001b[39m\r\n" - ], - [ - 0.001294, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000209, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 3e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m " - ], - [ - 2.1e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000157, - "\u001b[?1h\u001b=" - ], - [ - 3.1e-05, - "\u001b[?2004h" - ], - [ - 2.964102, - "v" - ], - [ - 0.136044, - "\bvu" - ], - [ - 0.095982, - " " - ], - [ - 0.088003, - "c" - ], - [ - 0.08868, - "n" - ], - [ - 0.154713, - "\u0007" - ], - [ - 0.10947, - "j" - ], - [ - 0.161602, - "\u0007" - ], - [ - 0.173494, - "\b \b" - ], - [ - 0.151719, - "\b \b" - ], - [ - 0.609085, - "m" - ], - [ - 0.167394, - "d\u001b[1m/\u001b[0m" - ], - [ - 0.840479, - "\b\u001b[0m/k" - ], - [ - 0.206026, - "pod\u001b[1m/\u001b[0m" - ], - [ - 0.257517, - "\b\u001b[0m/r" - ], - [ - 0.127635, - "m" - ], - [ - 0.103364, - "i.go\u001b[1m \u001b[0m" - ], - [ - 0.616305, - "\b\u001b[0m \b" - ], - [ - 0.000215, - "\u001b[?1l\u001b>" - ], - [ - 0.000236, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004049, - "\u001b]2;vu cmd/kpod/rmi.go\u0007\u001b]1;vu\u0007" - ], - [ - 0.00938, - "zsh: vu: command not found..." - ], - [ - 6.4e-05, - "\r\n" - ], - [ - 0.171147, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.017238, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001466, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.00011, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.7e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.3e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.2e-05, - "\u001b[?2004h" - ], - [ - 0.476196, - "6" - ], - [ - 0.080551, - "\b65" - ], - [ - 0.255454, - "G" - ], - [ - 0.432225, - "\b \b" - ], - [ - 0.159879, - "\b\b6 \b" - ], - [ - 0.1445, - "\b \b" - ], - [ - 0.384217, - "vu cmd/kpod/rmi.go" - ], - [ - 0.895489, - "\u001b[18D" - ], - [ - 0.176977, - "\u001b[1C" - ], - [ - 0.166725, - "\u001b[1C" - ], - [ - 0.256332, - "\b\bv cmd/kpod/rmi.go \u001b[17D" - ], - [ - 0.520213, - "\bvi cmd/kpod/rmi.go\u001b[16D" - ], - [ - 0.223669, - "\u001b[?1l\u001b>" - ], - [ - 0.000151, - "\u001b[?2004l\r\r\n" - ], - [ - 0.00446, - "\u001b]2;vim cmd/kpod/rmi.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.138232, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000485, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"cmd/kpod/rmi.go\"" - ], - [ - 0.000106, - " 123L, 3100C" - ], - [ - 0.008045, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.002005, - "\u001b[1;1H\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m rmiCommand = cli.Command{\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m\u001b[8CName:\u001b[8C\u001b[36m\"rmi\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m\u001b[8CUsage:\u001b[7C\u001b[36m\"removes one or more images from local storage\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m\u001b[8CDescription: rmiDescription,\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[8CAction: rmiCmd,\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m\u001b[8CArgsUsage: \u001b[36m\"IMAGE-NAME-OR-ID [...]\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m\u001b[8CFlags:\u001b[7CrmiFlags,\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 28 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 29 \r\n 30 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m rmiCmd(c *cli.Context) \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 31 \r\n 32 \u001b[m\u001b[93m\u001b[107m force := \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m c.IsSet(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m\u001b[8Cforce = c.Bool(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 36 \r\n 37 \u001b[m\u001b[93m\u001b[107m args := c.Args()\r\n\u001b[96m\u001b[47m 3" - ], - [ - 1.9e-05, - "8 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(args) == \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Errorf(\u001b[36m\"image name or ID must be specified\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 41 \r\n 42 \u001b[m\u001b[93m\u001b[107m config, err := getConfig(c)\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Could not get config\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m store, err := getStore(config)\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 50 \r\n 51 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, id := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m args {\r\n\u001b[96m\u001b[47m 52 \u001b[m\u001b[93m\u001b[107m\u001b[8Cimage, err := libkpodimage.FindImage(store, id)\r\n" - ], - [ - 0.031049, - "\u001b[96m\u001b[47m 53 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 54 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"could not get image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 55 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 56 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m image != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 57 \u001b[m\u001b[93m\u001b[107m\u001b[12CctrIDs, err := runningContainers(image, store)\r\n\u001b[96m\u001b[47m 58 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 59 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"error getting running containers for image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 60 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 61 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(ctrIDs) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m && \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(image.Names) <= \u001b[36m1\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 62 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m force {\r\n\u001b[96m\u001b[47m 63 \u001b[" - ], - [ - 0.000197, - "m\u001b[93m\u001b[107m\u001b[20CremoveContainers(ctrIDs, store)\r\n\u001b[96m\u001b[47m 64 \u001b[m\u001b[93m\u001b[107m\u001b[16C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 65 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mfor\u001b[m\u001b[93m\u001b[107m ctrID := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\r\n\u001b[96m\u001b[47m 66 \u001b[m\u001b[93m\u001b[107m\u001b[24C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m fmt.Errorf(\u001b[36m\"Could not remove image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m (must force) - one or more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 67 \u001b[m\u001b[93m\u001b[107m\u001b[20C}\r\n\u001b[96m\u001b[47m 68 \u001b[m\u001b[93m\u001b[107m\u001b[16C}\r\n\u001b[96m\u001b[47m 69 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;220m\u001b[48;5;240m M \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;41H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                        " - ], - [ - 0.009393, - "                                                                                                              \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:25 \u001b[47;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.000186, - "\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 0.894574, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[43;30H\u001b[1m\u001b[31m\u001b[106m{\u001b[45;21H}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  52%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:17\u001b[45;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.755059, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[43;30H{\u001b[45;21H} \u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  53%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:20\u001b[46;24H\u001b[?12l\u001b[?25h" - ], - [ - 0.297693, - "\u001b[?25l\u001b[51;210H1\u001b[46;25H\u001b[?12l\u001b[?25h" - ], - [ - 0.494594, - "\u001b[51;210H2\u001b[46;26H" - ], - [ - 0.029144, - "\u001b[51;210H3\u001b[46;27H" - ], - [ - 0.189856, - "\u001b[51;210H4\u001b[46;28H" - ], - [ - 0.503162, - "\u001b[51;210H5\u001b[46;29H" - ], - [ - 0.02688, - "\u001b[51;210H6\u001b[46;30H" - ], - [ - 0.031882, - "\u001b[51;210H7\u001b[46;31H" - ], - [ - 0.031015, - "\u001b[51;210H8\u001b[46;32H" - ], - [ - 0.028268, - "\u001b[51;210H9\u001b[46;33H" - ], - [ - 0.725696, - "\u001b[51;209H30\u001b[46;34H" - ], - [ - 5.869681, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[34m-- INSERT --\u001b[m\u001b[93m\u001b[107m\u001b[52;13H\u001b[K" - ], - [ - 0.040358, - "\u001b[51;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;220m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;220m\u001b[48;5;31m M \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[51;41H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                                      \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[10" - ], - [ - 2.4e-05, - "7m\u001b[51;195H\u001b[38;5;186m\u001b[48;5;31m  53%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 65\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:30 \u001b[46;34H\u001b[?12l\u001b[?25h" - ], - [ - 0.274464, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\u001b[46;51H\u001b[K\u001b[51;39H\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[51;43H \u001b[m\u001b[93m\u001b[107m\u001b[165C\u001b[38;5;22m\u001b[48;5;117m29\u001b[46;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.151925, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\u001b[46;50H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[46;32H\u001b[?12l\u001b[?25h" - ], - [ - 0.167731, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\u001b[46;49H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[46;31H\u001b[?12l\u001b[?25h" - ], - [ - 0.184041, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\u001b[46;48H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[46;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.156407, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\u001b[46;47H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[46;29H\u001b[?12l\u001b[?25h" - ], - [ - 1.347077, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m_ := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[46;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.334595, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K" - ], - [ - 0.014443, - "\u001b[51;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;220m\u001b[48;5;240m M\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[51;42H\u001b[38;5;240m\u001b[48;5;236m\u001b[51;43H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                                    \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b" - ], - [ - 2.9e-05, - "[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  53%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 65\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:25 \u001b[46;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.257293, - "\u001b[?25l\u001b[52;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h" - ], - [ - 8.6e-05, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.135176, - "w\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.03633, - "q" - ], - [ - 0.000156, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.124203, - "\r" - ], - [ - 0.0003, - "\u001b[?25l\u001b[?2004l" - ], - [ - 0.023972, - "\"cmd/kpod/rmi.go\"" - ], - [ - 0.012083, - " 123L, 3096C written" - ], - [ - 0.013278, - "\r\r\r\n\u001b[39;49m\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.002006, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.025275, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m13s\u001b[39m\r\n" - ], - [ - 0.001052, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 8.3e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 6.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 7e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.4e-05, - "\u001b[?1h\u001b=" - ], - [ - 2e-05, - "\u001b[?2004h" - ], - [ - 0.122123, - "vi cmd/kpod/rmi.go" - ], - [ - 0.183865, - "\u001b[18Dvu\u001b[16C" - ], - [ - 0.8524, - "\u001b[18Dvi\u001b[16C" - ], - [ - 0.251769, - "\u001b[18D \u001b[18D" - ], - [ - 0.427799, - "vi cmd/kpod/rmi.go" - ], - [ - 0.996091, - "\u001b[?1l\u001b>" - ], - [ - 0.000117, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004829, - "\u001b]2;vim cmd/kpod/rmi.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.136844, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000622, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"cmd/kpod/rmi.go\"" - ], - [ - 7.1e-05, - " 123L, 3096C" - ], - [ - 0.008554, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.002929, - "\u001b[1;1H\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m rmiCommand = cli.Command{\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m\u001b[8CName:\u001b[8C\u001b[36m\"rmi\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m\u001b[8CUsage:\u001b[7C\u001b[36m\"removes one or more images from local storage\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m\u001b[8CDescription: rmiDescription,\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[8CAction: rmiCmd,\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m\u001b[8CArgsUsage: \u001b[36m\"IMAGE-NAME-OR-ID [...]\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m\u001b[8CFlags:\u001b[7CrmiFlags,\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 28 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 29 \r\n 30 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m rmiCmd(c *cli.Context) \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 31 \r\n 32 \u001b[m\u001b[93m\u001b[107m force := \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m c.IsSet(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m\u001b[8Cforce = c.Bool(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 36 \r\n 37 \u001b[m\u001b[93m\u001b[107" - ], - [ - 1.9e-05, - "m args := c.Args()\r\n\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(args) == \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Errorf(\u001b[36m\"image name or ID must be specified\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 41 \r\n 42 \u001b[m\u001b[93m\u001b[107m config, err := getConfig(c)\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Could not get config\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m store, err := getStore(config)\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 50 \r\n 51 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, id := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m args {\r\n\u001b[96m\u001b[47m 52 \u001b[m\u001b[93m\u001b[107m\u001b[8Cimage, err := libkpodimage.FindI" - ], - [ - 0.030095, - "mage(store, id)\r\n\u001b[96m\u001b[47m 53 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 54 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"could not get image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 55 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 56 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m image != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 57 \u001b[m\u001b[93m\u001b[107m\u001b[12CctrIDs, err := runningContainers(image, store)\r\n\u001b[96m\u001b[47m 58 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 59 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"error getting running containers for image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 60 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 61 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(ctrIDs) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m && \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(image.Names) <= \u001b[36m1\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 62 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m force {\r" - ], - [ - 3.7e-05, - "\n\u001b[96m\u001b[47m 63 \u001b[m\u001b[93m\u001b[107m\u001b[20CremoveContainers(ctrIDs, store)\r\n\u001b[96m\u001b[47m 64 \u001b[m\u001b[93m\u001b[107m\u001b[16C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 65 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mfor\u001b[m\u001b[93m\u001b[107m _ := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\r\n\u001b[96m\u001b[47m 66 \u001b[m\u001b[93m\u001b[107m\u001b[24C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m fmt.Errorf(\u001b[36m\"Could not remove image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m (must force) - one or more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 67 \u001b[m\u001b[93m\u001b[107m\u001b[20C}\r\n\u001b[96m\u001b[47m 68 \u001b[m\u001b[93m\u001b[107m\u001b[16C}\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;220m\u001b[48;5;240m M \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;41H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                    " - ], - [ - 0.011253, - "                                                                                                  \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  53%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 65\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:21 \u001b[47;25H\u001b[?12l\u001b[?25h\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 0.884291, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K\u001b[52;1H:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.175395, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.047934, - "\r" - ], - [ - 0.019314, - "\u001b[?25l\u001b[?2004l\u001b[52;1H\u001b[K\u001b[52;1H\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.001837, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.019156, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001205, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000106, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 9.5e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.1e-05, - "\u001b[?2004h" - ], - [ - 0.22274, - "m" - ], - [ - 0.120257, - "\bma" - ], - [ - 0.087838, - "k" - ], - [ - 0.119717, - "e" - ], - [ - 0.055994, - " " - ], - [ - 0.103706, - "k" - ], - [ - 0.080698, - "p" - ], - [ - 0.051603, - "o" - ], - [ - 0.124175, - "d" - ], - [ - 0.076543, - "\u001b[?1l\u001b>\u001b[?2004l\r\r\n" - ], - [ - 0.004916, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 6.744424, - "go build -ldflags '-X main.gitCommit=6ca462a3 -X main.buildInfo=1502976912' -tags \"selinux seccomp \" -o kpod github.com/kubernetes-incubator/cri-o/cmd/kpod\r\n" - ], - [ - 1.99486, - "# github.com/kubernetes-incubator/cri-o/cmd/kpod\r\ncmd/kpod/rmi.go:65: no new variables on left side of :=\r\n" - ], - [ - 0.002634, - "make: *** [Makefile:83: kpod] Error 2\r\n" - ], - [ - 0.000711, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.019707, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m9s\u001b[39m\r\n" - ], - [ - 0.001352, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000124, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000112, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.8e-05, - "\u001b[?2004h" - ], - [ - 12.559376, - "make kpod" - ], - [ - 0.160079, - "\u001b[9Dvi cmd/kpod/rmi.go" - ], - [ - 0.376119, - "\\" - ], - [ - 0.719352, - "\u001b[?1l\u001b>" - ], - [ - 0.000252, - "\u001b[?2004l\r\r\n" - ], - [ - 0.000889, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J> \u001b[K" - ], - [ - 0.000257, - "\u001b[?1h\u001b=" - ], - [ - 9e-05, - "\u001b[?2004h" - ], - [ - 0.814396, - "\u001b[?2004l\r\r\n" - ], - [ - 0.002088, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.020752, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m24s\u001b[39m\r\n" - ], - [ - 0.001027, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000116, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.6e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000109, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.1e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.9e-05, - "\u001b[?2004h" - ], - [ - 0.191891, - "vi cmd/kpod/rmi.go\\" - ], - [ - 0.255869, - "\b \b" - ], - [ - 0.164227, - "\u001b[?1l\u001b>" - ], - [ - 0.000395, - "\u001b[?2004l\r\r\n" - ], - [ - 0.005085, - "\u001b]2;vim cmd/kpod/rmi.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.139286, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000664, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"cmd/kpod/rmi.go\" 123L, 3096C" - ], - [ - 0.007849, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.001971, - "\u001b[1;1H\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m rmiCommand = cli.Command{\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m\u001b[8CName:\u001b[8C\u001b[36m\"rmi\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m\u001b[8CUsage:\u001b[7C\u001b[36m\"removes one or more images from local storage\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m\u001b[8CDescription: rmiDescription,\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[8CAction: rmiCmd,\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m\u001b[8CArgsUsage: \u001b[36m\"IMAGE-NAME-OR-ID [...]\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m\u001b[8CFlags:\u001b[7CrmiFlags,\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 28 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 29 \r\n 30 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m rmiCmd(c *cli.Context) \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 31 \r\n 32 \u001b[m\u001b[93m\u001b[107m force := \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m c.IsSet(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m\u001b[8Cforce = c.Bool(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 36 \r\n 37 \u001b[m\u001b[93m\u001b[107" - ], - [ - 2e-05, - "m args := c.Args()\r\n\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(args) == \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Errorf(\u001b[36m\"image name or ID must be specified\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 41 \r\n 42 \u001b[m\u001b[93m\u001b[107m config, err := getConfig(c)\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Could not get config\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m store, err := getStore(config)\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 50 \r\n 51 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, id := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m args {\r\n\u001b[96m\u001b[47m 52 \u001b[m\u001b[93m\u001b[107m\u001b[8Cimage, err := libkpodimage.FindI" - ], - [ - 0.030785, - "mage(store, id)\r\n\u001b[96m\u001b[47m 53 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 54 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"could not get image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 55 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 56 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m image != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 57 \u001b[m\u001b[93m\u001b[107m\u001b[12CctrIDs, err := runningContainers(image, store)\r\n\u001b[96m\u001b[47m 58 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 59 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"error getting running containers for image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 60 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 61 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(ctrIDs) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m && \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(image.Names) <= \u001b[36m1\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 62 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m force {\r" - ], - [ - 3e-05, - "\n\u001b[96m\u001b[47m 63 \u001b[m\u001b[93m\u001b[107m\u001b[20CremoveContainers(ctrIDs, store)\r\n\u001b[96m\u001b[47m 64 \u001b[m\u001b[93m\u001b[107m\u001b[16C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 65 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mfor\u001b[m\u001b[93m\u001b[107m _ := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\r\n\u001b[96m\u001b[47m 66 \u001b[m\u001b[93m\u001b[107m\u001b[24C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m fmt.Errorf(\u001b[36m\"Could not remove image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m (must force) - one or more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 67 \u001b[m\u001b[93m\u001b[107m\u001b[20C}\r\n\u001b[96m\u001b[47m 68 \u001b[m\u001b[93m\u001b[107m\u001b[16C}\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;220m\u001b[48;5;240m M \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;41H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                    " - ], - [ - 0.009791, - "                                                                                                  \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  53%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 65\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:21 \u001b[47;25H\u001b[?12l\u001b[?25h" - ], - [ - 3.2e-05, - "\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 0.549454, - "\u001b[?25l\u001b[51;210H2\u001b[47;26H\u001b[?12l\u001b[?25h" - ], - [ - 0.282227, - "\u001b[51;210H3\u001b[47;27H" - ], - [ - 0.500981, - "\u001b[51;210H4\u001b[47;28H" - ], - [ - 0.028181, - "\u001b[51;210H5\u001b[47;29H" - ], - [ - 0.120266, - "\u001b[51;210H6\u001b[47;30H" - ], - [ - 0.176334, - "\u001b[51;210H7\u001b[47;31H" - ], - [ - 0.332685, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m= \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\u001b[47;47H\u001b[K\u001b[51;39H\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;43H \u001b[47;31H\u001b[?12l\u001b[?25h" - ], - [ - 0.457865, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K\u001b[52;1H:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.16749, - "w\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.072064, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.096121, - "\r" - ], - [ - 0.000123, - "\u001b[?25l\u001b[?2004l" - ], - [ - 0.01842, - "\"cmd/kpod/rmi.go\"" - ], - [ - 0.012505, - " 123L, 3095C written" - ], - [ - 0.016238, - "\r\r\r\n\u001b[39;49m\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.002512, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.022671, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001565, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000124, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000106, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.2e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.7e-05, - "\u001b[?2004h" - ], - [ - 0.334146, - "vi cmd/kpod/rmi.go" - ], - [ - 0.182947, - "\\" - ], - [ - 0.599976, - "\u001b[?1l\u001b>" - ], - [ - 5.2e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.000542, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J> \u001b[K" - ], - [ - 0.000169, - "\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 1.143722, - "\u001b[?2004l\r\r\n" - ], - [ - 0.001295, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.028119, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m6s\u001b[39m\r\n" - ], - [ - 0.00136, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000114, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.7e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000126, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.9e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.2e-05, - "\u001b[?2004h" - ], - [ - 0.193141, - "vi cmd/kpod/rmi.go\\" - ], - [ - 0.159626, - "\b \b" - ], - [ - 0.18457, - "\\" - ], - [ - 0.29669, - "\u001b[19Dmake kpod \u001b[10D" - ], - [ - 0.863034, - "\u001b[?1l\u001b>" - ], - [ - 0.000232, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004368, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 6.733531, - "go build -ldflags '-X main.gitCommit=6ca462a3 -X main.buildInfo=1502976943' -tags \"selinux seccomp \" -o kpod github.com/kubernetes-incubator/cri-o/cmd/kpod\r\n" - ], - [ - 4.55149, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.018009, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m12s\u001b[39m\r\n" - ], - [ - 0.001122, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000105, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8.3e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.1e-05, - "\u001b[?1h\u001b=" - ], - [ - 2e-05, - "\u001b[?2004h" - ], - [ - 20.811668, - "m" - ], - [ - 0.119573, - "\bma" - ], - [ - 0.111827, - "k" - ], - [ - 0.080357, - "e" - ], - [ - 0.103149, - " " - ], - [ - 0.084516, - "k" - ], - [ - 0.143968, - "p" - ], - [ - 0.079279, - "o" - ], - [ - 0.088902, - "d" - ], - [ - 0.135804, - "\u001b[?1l\u001b>" - ], - [ - 0.000119, - "\u001b[?2004l" - ], - [ - 0.000522, - "\r\r\n" - ], - [ - 0.003588, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 6.696294, - "make: 'kpod' is up to date.\r\n" - ], - [ - 0.000283, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.020139, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m7s\u001b[39m\r\n" - ], - [ - 0.001161, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 9e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.8e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000108, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.5e-05, - "\u001b[?1h\u001b=" - ], - [ - 7.7e-05, - "\u001b[?2004h" - ], - [ - 243.314771, - "s" - ], - [ - 0.111394, - "\bsu" - ], - [ - 0.096183, - "d" - ], - [ - 0.080041, - "o" - ], - [ - 0.128398, - " " - ], - [ - 0.119735, - "m" - ], - [ - 0.095922, - "a" - ], - [ - 0.088141, - "k" - ], - [ - 0.072233, - "e" - ], - [ - 0.095742, - " " - ], - [ - 0.087941, - "i" - ], - [ - 0.104278, - "n" - ], - [ - 0.032039, - "s" - ], - [ - 0.118983, - "t" - ], - [ - 0.072644, - "a" - ], - [ - 0.143994, - "l" - ], - [ - 0.168065, - "l" - ], - [ - 0.752715, - "\u001b[?1l\u001b>\u001b[?2004l\r\r\n" - ], - [ - 0.009834, - "\u001b]2;sudo make install\u0007\u001b]1;make\u0007" - ], - [ - 0.955201, - "[sudo] password for ryan: " - ], - [ - 20.245864, - "\r\n" - ], - [ - 0.045577, - "mkdir -p \"/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/_output/src/github.com/kubernetes-incubator\"\r\n" - ], - [ - 0.001888, - "ln -s \"/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\" \"/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/_output/src/github.com/kubernetes-incubator\"\r\n" - ], - [ - 0.001297, - "touch \"/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/_output/.gopathok\"\r\n" - ], - [ - 0.001197, - "install -D -m 755 crio /usr/local/bin/crio\r\n" - ], - [ - 0.000745, - "install: cannot stat 'crio': No such file or directory\r\n" - ], - [ - 0.000258, - "make: *** [Makefile:133: install] Error 1\r\n" - ], - [ - 0.002376, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.025148, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m21s\u001b[39m\r\n" - ], - [ - 0.001447, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000116, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 3.1e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000295, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 8.2e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.9e-05, - "\u001b[?2004h" - ], - [ - 16.688284, - "s" - ], - [ - 0.136222, - "\bsu" - ], - [ - 0.119402, - "d" - ], - [ - 0.080011, - "o" - ], - [ - 0.135772, - " " - ], - [ - 0.088659, - "m" - ], - [ - 0.112281, - "a" - ], - [ - 0.118598, - "k" - ], - [ - 0.120387, - "e" - ], - [ - 0.104104, - " " - ], - [ - 0.248577, - "u" - ], - [ - 0.215792, - "n" - ], - [ - 0.140158, - "i" - ], - [ - 0.067519, - "n" - ], - [ - 0.071505, - "s" - ], - [ - 0.104231, - "t" - ], - [ - 0.08841, - "a" - ], - [ - 0.120753, - "l" - ], - [ - 0.111041, - "l" - ], - [ - 0.188244, - "\u001b[?1l\u001b>" - ], - [ - 0.000191, - "\u001b[?2004l\r\r\n" - ], - [ - 0.008904, - "\u001b]2;sudo make uninstall\u0007\u001b]1;make\u0007" - ], - [ - 2.992644, - "rm -f /usr/local/bin/crio\r\n" - ], - [ - 0.008835, - "rm -f /usr/local/bin/crioctl\r\n" - ], - [ - 0.006105, - "rm -f /usr/local/libexec/crio/conmon\r\n" - ], - [ - 0.001162, - "rm -f /usr/local/libexec/crio/pause" - ], - [ - 0.000104, - "\r\n" - ], - [ - 0.000807, - "for i in docs/kpod-diff.1 docs/kpod-push.1 docs/kpod.1 docs/kpod-export.1 docs/kpod-load.1 docs/kpod-images.1 docs/kpod-umount.1 docs/kpod-save.1 docs/kpod-cp.1 docs/kpod-info.1 docs/kpod-mount.1 docs/kpod-inspect.1 docs/kpod-logs.1 docs/kpod-history.1 docs/kpod-pull.1 docs/kpod-rmi.1 docs/kpod-version.1 docs/kpod-tag.1; do \\\r\n\trm -f /usr/local/share/man/man8/$(basename ${i}); \\\r\ndone\r\n" - ], - [ - 0.038238, - "for i in docs/crio.conf.5; do \\\r\n\trm -f /usr/local/share/man/man5/$(basename ${i}); \\\r\ndone\r\n" - ], - [ - 0.003095, - "for i in docs/crio.8; do \\\r\n\trm -f /usr/local/share/man/man8/$(basename ${i}); \\\r\ndone\r\n" - ], - [ - 0.006486, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024997, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001214, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 8.9e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.5e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 8e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.1e-05, - "\u001b[?2004h" - ], - [ - 0.734872, - "m" - ], - [ - 0.09234, - "\bma" - ], - [ - 0.115606, - "k" - ], - [ - 0.060071, - "e" - ], - [ - 0.080073, - " " - ], - [ - 0.10406, - "a" - ], - [ - 0.055694, - "l" - ], - [ - 0.108501, - "l" - ], - [ - 0.199594, - "\u001b[?1l\u001b>" - ], - [ - 6.5e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.00357, - "\u001b]2;make all\u0007" - ], - [ - 7.2e-05, - "\u001b]1;make\u0007" - ], - [ - 7.412593, - "go build -ldflags '-X main.gitCommit=6ca462a3 -X main.buildInfo=1502977274' -tags \"selinux seccomp \" -o crio github.com/kubernetes-incubator/cri-o/cmd/crio\r\n" - ], - [ - 6.928493, - "go build -ldflags '-X main.gitCommit=6ca462a3 -X main.buildInfo=1502977274' -tags \"selinux seccomp \" -o crioctl github.com/kubernetes-incubator/cri-o/cmd/crioctl\r\n" - ], - [ - 1.93333, - "make -C conmon\r\n" - ], - [ - 0.004362, - "make[1]: Entering directory '/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/conmon'\r\n" - ], - [ - 0.000772, - "cc -std=c99 -Os -Wall -Wextra -I/usr/include/glib-2.0 -I/usr/lib64/glib-2.0/include -c -o conmon.o conmon.c\r\n" - ], - [ - 0.314835, - "cc -std=c99 -Os -Wall -Wextra -I/usr/include/glib-2.0 -I/usr/lib64/glib-2.0/include -c -o cmsg.o cmsg.c\r\n" - ], - [ - 0.040765, - "cc -o conmon conmon.o cmsg.o -std=c99 -Os -Wall -Wextra -I/usr/include/glib-2.0 -I/usr/lib64/glib-2.0/include -lglib-2.0 \r\n" - ], - [ - 0.01855, - "make[1]: Leaving directory '/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/conmon'\r\n" - ], - [ - 0.000285, - "make -C pause\r\n" - ], - [ - 0.00317, - "make[1]: Entering directory '/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/pause'\r\ncc -std=c99 -Os -Wall -Wextra -static -c -o pause.o pause.c\r\n" - ], - [ - 0.030319, - "cc -o pause pause.o -std=c99 -Os -Wall -Wextra -static \r\n" - ], - [ - 0.090802, - "strip pause\r\n" - ], - [ - 0.004286, - "make[1]: Leaving directory '/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/pause'\r\n" - ], - [ - 0.068555, - "go build -ldflags '-X main.gitCommit=6ca462a3 -X main.buildInfo=1502977274' -tags \"selinux seccomp \" -o test/bin2img/bin2img github.com/kubernetes-incubator/cri-o/test/bin2img\r\n" - ], - [ - 2.210776, - "go build -ldflags '-X main.gitCommit=6ca462a3 -X main.buildInfo=1502977274' -tags \"selinux seccomp \" -o test/copyimg/copyimg github.com/kubernetes-incubator/cri-o/test/copyimg\r\n" - ], - [ - 2.412996, - "go build -ldflags '-X main.gitCommit=6ca462a3 -X main.buildInfo=1502977274' -tags \"selinux seccomp \" -o test/checkseccomp/checkseccomp github.com/kubernetes-incubator/cri-o/test/checkseccomp" - ], - [ - 6.7e-05, - "\r\n" - ], - [ - 0.162664, - "./crio --config=\"\" config --default > crio.conf\r\n" - ], - [ - 0.091642, - "(go-md2man -in docs/kpod-diff.1.md -out docs/kpod-diff.1.tmp && touch docs/kpod-diff.1.tmp && mv docs/kpod-diff.1.tmp docs/kpod-diff.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-diff.1.md -out docs/kpod-diff.1.tmp && touch docs/kpod-diff.1.tmp && mv docs/kpod-diff.1.tmp docs/kpod-diff.1)\r\n" - ], - [ - 0.006608, - "(go-md2man -in docs/kpod-push.1.md -out docs/kpod-push.1.tmp && touch docs/kpod-push.1.tmp && mv docs/kpod-push.1.tmp docs/kpod-push.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-push.1.md -out docs/kpod-push.1.tmp && touch docs/kpod-push.1.tmp && mv docs/kpod-push.1.tmp docs/kpod-push.1)\r\n" - ], - [ - 0.008204, - "(go-md2man -in docs/crio.conf.5.md -out docs/crio.conf.5.tmp && touch docs/crio.conf.5.tmp && mv docs/crio.conf.5.tmp docs/crio.conf.5) || (/home/ryan/Development/Go/bin/go-md2man -in docs/crio.conf.5.md -out docs/crio.conf.5.tmp && touch docs/crio.conf.5.tmp && mv docs/crio.conf.5.tmp docs/crio.conf.5)\r\n" - ], - [ - 0.00881, - "(go-md2man -in docs/kpod.1.md -out docs/kpod.1.tmp && touch docs/kpod.1.tmp && mv docs/kpod.1.tmp docs/kpod.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod.1.md -out docs/kpod.1.tmp && touch docs/kpod.1.tmp && mv docs/kpod.1.tmp docs/kpod.1)\r\n" - ], - [ - 0.008192, - "(go-md2man -in docs/kpod-export.1.md -out docs/kpod-export.1.tmp && touch docs/kpod-export.1.tmp && mv docs/kpod-export.1.tmp docs/kpod-export.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-export.1.md -out docs/kpod-export.1.tmp && touch docs/kpod-export.1.tmp && mv docs/kpod-export.1.tmp docs/kpod-export.1)\r\n" - ], - [ - 0.0109, - "(go-md2man -in docs/kpod-load.1.md -out docs/kpod-load.1.tmp && touch docs/kpod-load.1.tmp && mv docs/kpod-load.1.tmp docs/kpod-load.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-load.1.md -out docs/kpod-load.1.tmp && touch docs/kpod-load.1.tmp && mv docs/kpod-load.1.tmp docs/kpod-load.1)\r\n" - ], - [ - 0.009389, - "(go-md2man -in docs/kpod-images.1.md -out docs/kpod-images.1.tmp && touch docs/kpod-images.1.tmp && mv docs/kpod-images.1.tmp docs/kpod-images.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-images.1.md -out docs/kpod-images.1.tmp && touch docs/kpod-images.1.tmp && mv docs/kpod-images.1.tmp docs/kpod-images.1)\r\n" - ], - [ - 0.005353, - "(go-md2man -in docs/kpod-umount.1.md -out docs/kpod-umount.1.tmp && touch docs/kpod-umount.1.tmp && mv docs/kpod-umount.1.tmp docs/kpod-umount.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-umount.1.md -out docs/kpod-umount.1.tmp && touch docs/kpod-umount.1.tmp && mv docs/kpod-umount.1.tmp docs/kpod-umount.1)\r\n" - ], - [ - 0.008716, - "(go-md2man -in docs/crio.8.md -out docs/crio.8.tmp && touch docs/crio.8.tmp && mv docs/crio.8.tmp docs/crio.8) || (/home/ryan/Development/Go/bin/go-md2man -in docs/crio.8.md -out docs/crio.8.tmp && touch docs/crio.8.tmp && mv docs/crio.8.tmp docs/crio.8)\r\n" - ], - [ - 0.010697, - "(go-md2man -in docs/kpod-save.1.md -out docs/kpod-save.1.tmp && touch docs/kpod-save.1.tmp && mv docs/kpod-save.1.tmp docs/kpod-save.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-save.1.md -out docs/kpod-save.1.tmp && touch docs/kpod-save.1.tmp && mv docs/kpod-save.1.tmp docs/kpod-save.1)\r\n" - ], - [ - 0.007561, - "(go-md2man -in docs/kpod-cp.1.md -out docs/kpod-cp.1.tmp && touch docs/kpod-cp.1.tmp && mv docs/kpod-cp.1.tmp docs/kpod-cp.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-cp.1.md -out docs/kpod-cp.1.tmp && touch docs/kpod-cp.1.tmp && mv docs/kpod-cp.1.tmp docs/kpod-cp.1)\r\n" - ], - [ - 0.005973, - "(go-md2man -in docs/kpod-info.1.md -out docs/kpod-info.1.tmp && touch docs/kpod-info.1.tmp && mv docs/kpod-info.1.tmp docs/kpod-info.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-info.1.md -out docs/kpod-info.1.tmp && touch docs/kpod-info.1.tmp && mv docs/kpod-info.1.tmp docs/kpod-info.1)\r\n" - ], - [ - 0.005551, - "(go-md2man -in docs/kpod-mount.1.md -out docs/kpod-mount.1.tmp && touch docs/kpod-mount.1.tmp && mv docs/kpod-mount.1.tmp docs/kpod-mount.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-mount.1.md -out docs/kpod-mount.1.tmp && touch docs/kpod-mount.1.tmp && mv docs/kpod-mount.1.tmp docs/kpod-mount.1)\r\n" - ], - [ - 0.009121, - "(go-md2man -in docs/kpod-inspect.1.md -out docs/kpod-inspect.1.tmp && touch docs/kpod-inspect.1.tmp && mv docs/kpod-inspect.1.tmp docs/kpod-inspect.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-inspect.1.md -out docs/kpod-inspect.1.tmp && touch docs/kpod-inspect.1.tmp && mv docs/kpod-inspect.1.tmp docs/kpod-inspect.1)\r\n" - ], - [ - 0.006826, - "(go-md2man -in docs/kpod-logs.1.md -out docs/kpod-logs.1.tmp && touch docs/kpod-logs.1.tmp && mv docs/kpod-logs.1.tmp docs/kpod-logs.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-logs.1.md -out docs/kpod-logs.1.tmp && touch docs/kpod-logs.1.tmp && mv docs/kpod-logs.1.tmp docs/kpod-logs.1)\r\n" - ], - [ - 0.007772, - "(go-md2man -in docs/kpod-history.1.md -out docs/kpod-history.1.tmp && touch docs/kpod-history.1.tmp && mv docs/kpod-history.1.tmp docs/kpod-history.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-history.1.md -out docs/kpod-history.1.tmp && touch docs/kpod-history.1.tmp && mv docs/kpod-history.1.tmp docs/kpod-history.1)\r\n" - ], - [ - 0.005003, - "(go-md2man -in docs/kpod-pull.1.md -out docs/kpod-pull.1.tmp && touch docs/kpod-pull.1.tmp && mv docs/kpod-pull.1.tmp docs/kpod-pull.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-pull.1.md -out docs/kpod-pull.1.tmp && touch docs/kpod-pull.1.tmp && mv docs/kpod-pull.1.tmp docs/kpod-pull.1)\r\n" - ], - [ - 0.010404, - "(go-md2man -in docs/kpod-rmi.1.md -out docs/kpod-rmi.1.tmp && touch docs/kpod-rmi.1.tmp && mv docs/kpod-rmi.1.tmp docs/kpod-rmi.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-rmi.1.md -out docs/kpod-rmi.1.tmp && touch docs/kpod-rmi.1.tmp && mv docs/kpod-rmi.1.tmp docs/kpod-rmi.1)\r\n" - ], - [ - 0.007878, - "(go-md2man -in docs/kpod-version.1.md -out docs/kpod-version.1.tmp && touch docs/kpod-version.1.tmp && mv docs/kpod-version.1.tmp docs/kpod-version.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-version.1.md -out docs/kpod-version.1.tmp && touch docs/kpod-version.1.tmp && mv docs/kpod-version.1.tmp docs/kpod-version.1)\r\n" - ], - [ - 0.006074, - "(go-md2man -in docs/kpod-tag.1.md -out docs/kpod-tag.1.tmp && touch docs/kpod-tag.1.tmp && mv docs/kpod-tag.1.tmp docs/kpod-tag.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-tag.1.md -out docs/kpod-tag.1.tmp && touch docs/kpod-tag.1.tmp && mv docs/kpod-tag.1.tmp docs/kpod-tag.1)\r\n" - ], - [ - 0.007358, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.021811, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m22s\u001b[39m\r\n" - ], - [ - 0.001102, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000118, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.5e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 6.9e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000142, - "\u001b[?1h\u001b=" - ], - [ - 2.9e-05, - "\u001b[?2004h" - ], - [ - 22.687022, - "s" - ], - [ - 0.176986, - "\bsu" - ], - [ - 0.231668, - "d" - ], - [ - 0.167315, - "o" - ], - [ - 0.224159, - " " - ], - [ - 0.184546, - "m" - ], - [ - 0.111947, - "a" - ], - [ - 0.127867, - "k" - ], - [ - 0.07167, - "e" - ], - [ - 0.095736, - " " - ], - [ - 0.072818, - "i" - ], - [ - 0.071884, - "n" - ], - [ - 0.07219, - "s" - ], - [ - 0.088277, - "t" - ], - [ - 0.071516, - "a" - ], - [ - 0.14414, - "l" - ], - [ - 0.119615, - "l" - ], - [ - 0.239306, - "\u001b[?1l\u001b>" - ], - [ - 7.7e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.001719, - "\u001b]2;sudo make install\u0007\u001b]1;make\u0007" - ], - [ - 3.018747, - "install -D -m 755 crio /usr/local/bin/crio\r\n" - ], - [ - 0.042567, - "install -D -m 755 crioctl /usr/local/bin/crioctl\r\n" - ], - [ - 0.026457, - "install -D -m 755 kpod /usr/local/bin/kpod" - ], - [ - 6.3e-05, - "\r\n" - ], - [ - 0.040019, - "install -D -m 755 conmon/conmon /usr/local/libexec/crio/conmon\r\n" - ], - [ - 0.003162, - "install -D -m 755 pause/pause /usr/local/libexec/crio/pause\r\n" - ], - [ - 0.003708, - "install -d -m 755 /usr/local/share/man/man1\r\n" - ], - [ - 0.00089, - "install -d -m 755 /usr/local/share/man/man5\r\n" - ], - [ - 0.000625, - "install -d -m 755 /usr/local/share/man/man8\r\n" - ], - [ - 0.000846, - "install -m 644 docs/kpod-diff.1 docs/kpod-push.1 docs/kpod-cp.1 docs/kpod.1 docs/kpod-export.1 docs/kpod-load.1 docs/kpod-logs.1 docs/kpod-images.1 docs/kpod-umount.1 docs/kpod-save.1 docs/kpod-mount.1 docs/kpod-info.1 docs/kpod-inspect.1 docs/kpod-history.1 docs/kpod-pull.1 docs/kpod-rmi.1 docs/kpod-version.1 docs/kpod-tag.1 -t /usr/local/share/man/man1\r\n" - ], - [ - 0.009723, - "install -m 644 docs/crio.conf.5 -t /usr/local/share/man/man5\r\n" - ], - [ - 0.002898, - "install -m 644 docs/crio.8 -t /usr/local/share/man/man8\r\n" - ], - [ - 0.006399, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.021303, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001274, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.0001, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 7.9e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 7e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.4e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.4e-05, - "\u001b[?2004h" - ], - [ - 255.497514, - "v" - ], - [ - 0.111133, - "\bvi" - ], - [ - 0.096261, - " " - ], - [ - 0.055637, - "c" - ], - [ - 0.128288, - "m" - ], - [ - 0.295825, - "\b \b" - ], - [ - 0.499642, - "\b \b" - ], - [ - 0.03181, - "\b" - ], - [ - 0.030112, - "\b\bv \b" - ], - [ - 0.029886, - "\b \b" - ], - [ - 2470.181351, - "g" - ], - [ - 0.134982, - "\bgi" - ], - [ - 0.116687, - "t" - ], - [ - 0.248596, - " " - ], - [ - 0.091959, - "c" - ], - [ - 0.221434, - "h" - ], - [ - 0.511729, - "\b \b" - ], - [ - 0.149467, - "\b \b" - ], - [ - 0.180553, - "r" - ], - [ - 0.09336, - "e" - ], - [ - 0.118113, - "s" - ], - [ - 0.166341, - "e" - ], - [ - 0.101897, - "t" - ], - [ - 0.135639, - " " - ], - [ - 0.101112, - "-" - ], - [ - 0.133755, - "-" - ], - [ - 0.204129, - "h" - ], - [ - 0.087003, - "a" - ], - [ - 0.070922, - "r" - ], - [ - 0.107809, - "d" - ], - [ - 0.081233, - " " - ], - [ - 0.210727, - "H" - ], - [ - 0.08464, - "E" - ], - [ - 0.340797, - "AD\u001b[1m \u001b[0m" - ], - [ - 0.359977, - "\b\u001b[0m \b" - ], - [ - 0.000209, - "\u001b[?1l\u001b>" - ], - [ - 3.7e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.00869, - "\u001b]2;git reset --hard HEAD\u0007\u001b]1;git\u0007" - ], - [ - 0.033935, - "HEAD is now at 6ca462a3 Merge pull request #718 from 14rcole/kpod-logs\r\n" - ], - [ - 0.000491, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.039313, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.00621, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000838, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.401122, - "g" - ], - [ - 0.143266, - "\bgi" - ], - [ - 0.141142, - "t" - ], - [ - 0.13335, - " " - ], - [ - 0.080629, - "p" - ], - [ - 0.068567, - "u" - ], - [ - 0.139439, - "s" - ], - [ - 0.111629, - "h" - ], - [ - 0.103453, - " " - ], - [ - 0.140922, - "o" - ], - [ - 0.09323, - "r" - ], - [ - 0.126723, - "i" - ], - [ - 0.116153, - "g" - ], - [ - 0.103085, - "i" - ], - [ - 0.110423, - "n" - ], - [ - 0.047839, - " " - ], - [ - 0.211066, - "m" - ], - [ - 0.140391, - "a" - ], - [ - 0.493452, - "s" - ], - [ - 0.228966, - "ter\u001b[1m \u001b[0m" - ], - [ - 0.433861, - "\b\u001b[0m \b" - ], - [ - 0.000112, - "\u001b[?1l\u001b>" - ], - [ - 3.1e-05, - "\u001b[?2004l" - ], - [ - 0.001239, - "\r\r\n" - ], - [ - 0.004134, - "\u001b]2;git push origin master\u0007\u001b]1;git\u0007" - ], - [ - 0.74689, - "Total 0 (delta 0), reused 0 (delta 0)\r\n" - ], - [ - 1.494101, - "To github.com:14rcole/cri-o\r\n a69631c1..6ca462a3 master -> master\r\n" - ], - [ - 0.000159, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.046807, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001056, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000155, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 4e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 9.5e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.1e-05, - "\u001b[?1h\u001b=" - ], - [ - 4.1e-05, - "\u001b[?2004h" - ], - [ - 7.372135, - "g" - ], - [ - 0.126302, - "\bgi" - ], - [ - 0.101424, - "t" - ], - [ - 0.42971, - "n" - ], - [ - 1.654559, - "\b \b" - ], - [ - 0.292926, - " " - ], - [ - 0.227087, - "c" - ], - [ - 0.127935, - "h" - ], - [ - 0.100042, - "e" - ], - [ - 0.081115, - "c" - ], - [ - 0.195682, - "k" - ], - [ - 0.505623, - "o" - ], - [ - 0.138216, - "ut" - ], - [ - 0.45806, - " " - ], - [ - 0.100148, - "m" - ], - [ - 0.335248, - "\b \b" - ], - [ - 0.188284, - "k" - ], - [ - 0.140606, - "pod-" - ], - [ - 0.287649, - "t" - ], - [ - 0.053698, - "e" - ], - [ - 0.304664, - "st-refactor\u001b[1m \u001b[0m" - ], - [ - 0.651671, - "\b\u001b[0m \b" - ], - [ - 9.4e-05, - "\u001b[?1l\u001b>" - ], - [ - 6.3e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.002007, - "\u001b]2;git checkout kpod-test-refactor\u0007\u001b]1;git\u0007" - ], - [ - 0.048356, - "Switched to branch 'kpod-test-refactor'\r\n" - ], - [ - 0.001669, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.055687, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-test-refactor \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.000911, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 6.7e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.2e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000112, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.9e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.1e-05, - "\u001b[?2004h" - ], - [ - 101.519318, - "v" - ], - [ - 0.095071, - "\bvi" - ], - [ - 0.079201, - " " - ], - [ - 1.258215, - "c" - ], - [ - 0.126693, - "m" - ], - [ - 0.116773, - "d\u001b[1m/\u001b[0m" - ], - [ - 0.846315, - "\b\u001b[0m/t" - ], - [ - 0.047782, - "e" - ], - [ - 0.357957, - "\b \b" - ], - [ - 0.134026, - "\b \b" - ], - [ - 0.180238, - "k" - ], - [ - 0.084009, - "pod\u001b[1m/\u001b[0m" - ], - [ - 1.056429, - "\b\u001b[0m \b" - ], - [ - 0.500154, - "\b \b" - ], - [ - 0.0301, - "\b \b" - ], - [ - 0.031128, - "\b \b" - ], - [ - 0.030599, - "\b \b" - ], - [ - 0.029396, - "\b \b" - ], - [ - 0.031795, - "\b \b" - ], - [ - 0.120498, - "\b \b" - ], - [ - 0.173821, - "\b \b" - ], - [ - 0.155303, - "t" - ], - [ - 0.046457, - "e" - ], - [ - 0.208794, - "st\u001b[1m/\u001b[0m" - ], - [ - 0.115379, - "\b\u001b[0m/k" - ], - [ - 0.204258, - "pod_" - ], - [ - 0.686373, - "p" - ], - [ - 0.067817, - "s" - ], - [ - 0.535881, - "\b \b" - ], - [ - 0.187935, - "u" - ], - [ - 0.08672, - "\u0007" - ], - [ - 0.000211, - "\r\r\n" - ], - [ - 6.1e-05, - "\u001b[J\u001b[0mkpod_pull.bats \u001b[Jkpod_push.bats\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\u001b[39m\r\u001b[2Cvi test/kpod_pu\u001b[K\u001b[193C\u001b[90m\u001b[39m\u001b[39m\u001b[193D" - ], - [ - 0.347316, - "s" - ], - [ - 0.191815, - "h.bats\u001b[1m \u001b[0m" - ], - [ - 0.247116, - "\b\u001b[0m \b\u001b[?1l\u001b>" - ], - [ - 0.001036, - "\u001b[?2004l\r\r\n\u001b[J" - ], - [ - 0.001089, - "\u001b]2;vim test/kpod_push.bats\u0007" - ], - [ - 4.6e-05, - "\u001b]1;vi\u0007" - ], - [ - 0.251946, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.001597, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"test/kpod_push.bats\"" - ], - [ - 0.000102, - " 87L, 2372C" - ], - [ - 0.003896, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.000853, - "\u001b[1;1H\u001b[96m\u001b[47m 1 \u001b[m\u001b[93m\u001b[107m\u001b[96m#!/usr/bin/env bats\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 2 \r\n 3 \u001b[m\u001b[93m\u001b[107mload helpers\r\n\u001b[96m\u001b[47m 4 \r\n 5 \u001b[m\u001b[93m\u001b[107mIMAGE=\u001b[36m\"alpine:latest\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 6 \u001b[m\u001b[93m\u001b[107mROOT=\u001b[36m\"$TESTDIR/crio\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 7 \u001b[m\u001b[93m\u001b[107mRUNROOT=\u001b[36m\"$TESTDIR/crio-run\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 8 \u001b[m\u001b[93m\u001b[107mKPOD_OPTIONS=\u001b[36m\"--root $ROOT --runroot $RUNROOT $STORAGE_OPTS\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 9 \r\n 10 \u001b[m\u001b[93m\u001b[107mfunction teardown() {\r\n\u001b[96m\u001b[47m 11 \u001b[m\u001b[93m\u001b[107m cleanup_test\r\n\u001b[96m\u001b[47m 12 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 13 \r\n 14 \u001b[m\u001b[93m\u001b[107m@test \u001b[36m\"kpod push to containers/storage\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 15 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 16 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 17 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 18 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m contai" - ], - [ - 3e-05, - "ners-storage:[$ROOT]busybox:test\r\n\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m stop_crio\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 27 \r\n 28 \u001b[m\u001b[93m\u001b[107m@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 29 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 30 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 31 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 32 \u001b[m\u001b[93m\u001b[107m run mkdir /tmp/busybox\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m echo " - ], - [ - 0.059183, - "\u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\r\n\u001b[96m\u001b[47m 36 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 37 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m rm -rf /tmp/busybox\r\n\u001b[96m\u001b[47m 41 \u001b[m\u001b[93m\u001b[107m stop_crio\r\n\u001b[96m\u001b[47m 42 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 43 \r\n 44 \u001b[m\u001b[93m\u001b[107m@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m run ${KP" - ], - [ - 4.1e-05, - "OD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 50 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H kpod-test-refactor \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;33H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mtest/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mkpod_push.bats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;55H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                      \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;180H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m" - ], - [ - 0.013876, - "\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;188H\u001b[38;5;247m\u001b[48;5;236m conf\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  26%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 23\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5  \u001b[23;9H\u001b[?12l\u001b[?25h\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 4.816904, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[24;9H\u001b[1m\u001b[31m\u001b[106m[\u001b[17C]\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  28%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[24;9H\u001b[?12l\u001b[?25h" - ], - [ - 4.011791, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.843663, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m[ \u001b[16C]\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  26%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[23;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.530257, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\b\b\b\u001b[7m\u001b[96m\u001b[107m \u001b[m\u001b[93m\u001b[107mr\u001b[7m\u001b[96m\u001b[107mun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[96m\u001b[107m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[96m\u001b[107m " - ], - [ - 0.005375, - "\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[34m-- VISUAL LINE --\u001b[m\u001b[93m\u001b[107m\u001b[52;18H\u001b[K\u001b[51;1H\u001b[1m\u001b[38;5;94m\u001b[48;5;214m V·\u001b[51;4HLINE \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;214m\u001b[48;5;94m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;214m\u001b[48;5;94m⇕1 \u001b[m\u001b[93m\u001b[107m\u001b[38;5;94m\u001b[48;5;240m\u001b[51;15H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;17H kpod-test-refactor \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;38H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mtest/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mp\bkpod_push.bats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;60H \u001b[23;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.187201, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[96m\u001b[107mr\u001b[24;5H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31m\u001b[106m[\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[96m\u001b[107m \u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[96m\u001b[107m\"$status\"\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[96m\u001b[107m -eq 0 ] \u001b[m\u001b[93m\u001b[107m\u001b[51;12H\u001b[1m\u001b[38;5;214m\u001b[48;5;94m2 \u001b[m\u001b[93m\u001b[107m\u001b[181C\u001b[38;5;107m\u001b[48;5;240m  28%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[24;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.311219, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[23;5H stop_crio\u001b[23;18H\u001b[K\u001b[24;5H}\u001b[24;6H\u001b[K\u001b[25;9H\u001b[K\u001b[26;5H@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[27;9Hrun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[28;5H echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[28;23H\u001b[K\u001b[29;9H[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[29;28H\u001b[K\u001b[30;9Hrun mkdir /tmp/busybox\u001b[31;9Hecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[31;23H\u001b[K\u001b[32;9H[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[32;28H\u001b[K\u001b[33;9Hrun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[34;9Hecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[34;23H\u001b[K\u001b[35;9H[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[35;28H\u001b[K\u001b[36;9Hrun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[38;10Hm -rf /tmp/busybox\u001b[38;28H\u001b[K\u001b[39;9Hstop_crio\u001b[39;18H\u001b[K\u001b[40;5H}\u001b[40;9H\u001b[K\u001b[41;9H\u001b[K\u001b[42;5H@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[43;9Hrun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[44;5H echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[44;23H\u001b[K\u001b[45;9H[ \u001b[36m\"$sta" - ], - [ - 6.1e-05, - "tus\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[45;28H\u001b[K\u001b[46;9Hrun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[47;9Hecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[47;23H\u001b[K\u001b[48;9H[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[48;28H\u001b[K\u001b[49;9Hrm /tmp/busybox-archive\u001b[50;9Hrun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;214m\u001b[48;5;94m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H kpod-test-refactor \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;33H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mtest/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mkpod_push.bats.\u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mt\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;240m\u001b[48;5;236m\u001b[51;57H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;231m\u001b[48;5;236m   \u001b[m\u001b[93m\u001b[107m\u001b[134C\u001b[38;5;107m\u001b[48;5;240m  27%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m" - ], - [ - 0.003283, - "\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K\u001b[23;9H\u001b[?12l\u001b[?25h" - ], - [ - 4.012199, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 2.525173, - "\u001b[?25l\u001b[52;1H:" - ], - [ - 3.9e-05, - "\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.585401, - "w\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.118422, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 8.69039, - "\r\u001b[?25l\u001b[?2004l\"test/kpod_push.bats\"" - ], - [ - 0.005802, - " 85L, 2298C written" - ], - [ - 0.01652, - "\r\r\r\n\u001b[39;49m" - ], - [ - 0.000114, - "\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.00281, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.044965, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-test-refactor* \u001b[39m \u001b[33m27s\u001b[39m\r\n" - ], - [ - 0.004489, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000232, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 9.4e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000151, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 5.2e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.3e-05, - "\u001b[?1h\u001b=" - ], - [ - 3e-05, - "\u001b[?2004h" - ], - [ - 1.0948, - "g" - ], - [ - 0.142791, - "\bgi" - ], - [ - 0.093165, - "t" - ], - [ - 0.104881, - " " - ], - [ - 0.146445, - "c" - ], - [ - 0.080499, - "o" - ], - [ - 0.116396, - "m" - ], - [ - 0.149451, - "m" - ], - [ - 0.126645, - "i" - ], - [ - 0.124885, - "t" - ], - [ - 0.057566, - " " - ], - [ - 0.196291, - "-" - ], - [ - 0.162575, - "a" - ], - [ - 0.097583, - " " - ], - [ - 0.118011, - "-" - ], - [ - 0.149342, - "-" - ], - [ - 0.046139, - "a" - ], - [ - 0.16449, - "m" - ], - [ - 0.100832, - "e" - ], - [ - 0.454585, - "n" - ], - [ - 0.157844, - "d" - ], - [ - 0.128767, - "\u001b[?1l\u001b>" - ], - [ - 6.4e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.002817, - "\u001b]2;git commit -a --amend\u0007\u001b]1;git\u0007" - ], - [ - 0.030281, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.003594, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"~/Development/Go/src/github.com/kubernetes-incubator/cri-o/.git/COMMIT_EDITMSG\"" - ], - [ - 0.000205, - " 24L, 716C" - ], - [ - 0.000426, - "\u001b[1;1HRefactor kpod tests\r\n\r\nMove kpod tests from kpod.bats to kpod_[commandname].bats\r\n\r\nSigned-off-by: Ryan Cole \r\n\r\n# Please enter the commit message for your changes. Lines starting\r\n# with '#' will be ignored, and an empty message aborts the commit.\r\n#\r\n# Date: Mon Aug 14 09:15:22 2017 -0400\r\n#\r\n# On branch kpod-test-refactor\r\n# Changes to be committed:\r\n#\u001b[7Cdeleted: test/kpod.bats\r\n#\u001b[7Cmodified: test/kpod_diff.bats\r\n#\u001b[7Cnew file: test/kpod_history.bats\r\n#\u001b[7Cnew file: test/kpod_images.bats\r\n#\u001b[7Cnew file: test/kpod_inspect.bats\r\n#\u001b[7Cmodified: test/kpod_load.bats\r\n#\u001b[7Cnew file: test/kpod_pull.bats\r\n#\u001b[7Cnew file: test/kpod_push.bats\r\n#\u001b[7Cmodified: test/kpod_save.bats\r\n#\u001b[7Cnew file: test/kpod_version.bats\r\n#\r\n\u001b[94m~ \u001b[26;1H~ " - ], - [ - 3e-05, - " \u001b[27;1H~ \u001b[28;1H~ \u001b[29;1H~ \u001b[30;1H~ " - ], - [ - 0.000148, - " \u001b[31;1H~ \u001b[32;1H~ \u001b[33;1H~ \u001b[34;1H~ \u001b[35;1H~ " - ], - [ - 2.2e-05, - " \u001b[36;1H~ \u001b[37;1H~ \u001b[38;1H~ \u001b[39;1H~ \u001b[40;1H~ " - ], - [ - 0.000582, - " \u001b[41;1H~ \u001b[42;1H~ \u001b[43;1H~ \u001b[44;1H~ " - ], - [ - 3e-05, - " \u001b[45;1H~ \u001b[46;1H~ \u001b[47;1H~ \u001b[48;1H~ \u001b[49;1H~ " - ], - [ - 0.000162, - " \u001b[50;1H~ \u001b[51;1H~ \u001b[1;1H\u001b[?12l\u001b[?25h" - ], - [ - 0.276849, - "\u001b[?25l\u001b[m\u001b[52;1H\u001b[K\u001b[52;1H:" - ], - [ - 5e-05, - "\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.161861, - "w" - ], - [ - 0.062363, - "q" - ], - [ - 0.113128, - "\r" - ], - [ - 7e-05, - "\u001b[?25l\u001b[?2004l\".git/COMMIT_EDITMSG\"" - ], - [ - 0.016122, - " 24L, 716C written\r\r\r\n\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.001866, - "[kpod-test-refactor 9f5954a6] Refactor kpod tests\r\n Date: Mon Aug 14 09:15:22 2017 -0400\r\n 10 files changed, 333 insertions(+), 253 deletions(-)\r\n delete mode 100644 test/kpod.bats\r\n create mode 100644 test/kpod_history.bats\r\n create mode 100644 test/kpod_images.bats\r\n create mode 100644 test/kpod_inspect.bats\r\n create mode 100644 test/kpod_pull.bats\r\n create mode 100644 test/kpod_push.bats\r\n create mode 100644 test/kpod_version.bats\r\n\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.046977, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-test-refactor \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001287, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000166, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000157, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 2.7e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000266, - "\u001b[?1h\u001b=" - ], - [ - 2.4e-05, - "\u001b[?2004h" - ], - [ - 0.08068, - "g" - ], - [ - 0.127014, - "\bgi" - ], - [ - 0.077769, - "t" - ], - [ - 0.07119, - " " - ], - [ - 0.125899, - "p" - ], - [ - 0.0933, - "u" - ], - [ - 0.107051, - "s" - ], - [ - 0.103696, - "h" - ], - [ - 0.079917, - " " - ], - [ - 0.173889, - "-" - ], - [ - 0.115307, - "f" - ], - [ - 0.080793, - " " - ], - [ - 0.077616, - "o" - ], - [ - 0.131483, - "r" - ], - [ - 0.1108, - "i" - ], - [ - 0.164739, - "g" - ], - [ - 0.119456, - "i" - ], - [ - 0.102232, - "n" - ], - [ - 0.064044, - " " - ], - [ - 0.163385, - "k" - ], - [ - 0.063896, - "pod-" - ], - [ - 0.226923, - "t" - ], - [ - 0.070106, - "e" - ], - [ - 0.236851, - "st-refactor\u001b[1m \u001b[0m" - ], - [ - 0.608419, - "\b\u001b[0m \b" - ], - [ - 5.6e-05, - "\u001b[?1l\u001b>" - ], - [ - 4.9e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.002783, - "\u001b]2;git push -f origin kpod-test-refactor\u0007\u001b]1;git\u0007" - ], - [ - 0.779856, - "Counting objects: 12, done.\r\n" - ], - [ - 0.000105, - "Delta compression using up to 4 threads.\r\n" - ], - [ - 9.5e-05, - "Compressing objects: 8% (1/12) \rCompressing objects: 16% (2/12) \r" - ], - [ - 0.000129, - "Compressing objects: 25% (3/12) \r" - ], - [ - 0.00012, - "Compressing objects: 33% (4/12) \r" - ], - [ - 1.9e-05, - "Compressing objects: 41% (5/12) \r" - ], - [ - 4.6e-05, - "Compressing objects: 50% (6/12) \r" - ], - [ - 0.000177, - "Compressing objects: 58% (7/12) \rCompressing objects: 66% (8/12) \r" - ], - [ - 0.000199, - "Compressing objects: 75% (9/12) \r" - ], - [ - 3.8e-05, - "Compressing objects: 83% (10/12) \r" - ], - [ - 5.6e-05, - "Compressing objects: 91% (11/12) \r" - ], - [ - 5e-05, - "Compressing objects: 100% (12/12) \r" - ], - [ - 3.7e-05, - "Compressing objects: 100% (12/12), done.\r\n" - ], - [ - 0.000272, - "Writing objects: 8% (1/12) \rWriting objects: 16% (2/12) \rWriting objects: 25% (3/12) \r" - ], - [ - 2.8e-05, - "Writing objects: 33% (4/12) \r" - ], - [ - 0.0001, - "Writing objects: 41% (5/12) \r" - ], - [ - 0.00016, - "Writing objects: 58% (7/12) \r" - ], - [ - 4e-05, - "Writing objects: 66% (8/12) \r" - ], - [ - 6.7e-05, - "Writing objects: 75% (9/12) \r" - ], - [ - 0.000162, - "Writing objects: 83% (10/12) \r" - ], - [ - 3.9e-05, - "Writing objects: 91% (11/12) \r" - ], - [ - 4.4e-05, - "Writing objects: 100% (12/12) \r" - ], - [ - 5.8e-05, - "Writing objects: 100% (12/12), 2.56 KiB | 2.56 MiB/s, done.\r\n" - ], - [ - 3e-05, - "Total 12 (delta 9), reused 0 (delta 0)\r\n" - ], - [ - 0.054483, - "remote: Resolving deltas: 0% (0/9) \u001b[K\r" - ], - [ - 0.038253, - "remote: Resolving deltas: 22% (2/9) \u001b[K\rremote: Resolving deltas: 44% (4/9) \u001b[K\rremote: Resolving deltas: 55% (5/9) \u001b[K\rremote: Resolving deltas: 66% (6/9) \u001b[K\rremote: Resolving deltas: 77% (7/9) \u001b[K\rremote: Resolving deltas: 88% (8/9) \u001b[K\rremote: Resolving deltas: 100% (9/9) \u001b[K\rremote: Resolving deltas: 100% (9/9), completed with 5 local objects.\u001b[K\r\n" - ], - [ - 1.340424, - "To github.com:14rcole/cri-o\r\n + 72c6c49b...9f5954a6 kpod-test-refactor -> kpod-test-refactor" - ], - [ - 6.1e-05, - " (forced update)\r\n" - ], - [ - 0.00145, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.03447, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-test-refactor \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001176, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000105, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 6.4e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 6.2e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 4e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000267, - "\u001b[?1h\u001b=" - ], - [ - 1.9e-05, - "\u001b[?2004h" - ], - [ - 0.606748, - "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 1186.178261, - "g" - ], - [ - 0.102859, - "\bgi" - ], - [ - 0.126056, - "t" - ], - [ - 0.086902, - " " - ], - [ - 0.763198, - "c" - ], - [ - 0.110654, - "h" - ], - [ - 0.062485, - "e" - ], - [ - 0.087974, - "c" - ], - [ - 0.045368, - "k" - ], - [ - 0.149436, - "o" - ], - [ - 0.070017, - "u" - ], - [ - 0.062669, - "t" - ], - [ - 0.109796, - " " - ], - [ - 0.164267, - "m" - ], - [ - 0.26327, - "aster\u001b[1m \u001b[0m" - ], - [ - 0.456115, - "\b\u001b[0m \b" - ], - [ - 2.9e-05, - "\u001b[?1l\u001b>" - ], - [ - 0.000106, - "\u001b[?2004l\r\r\n" - ], - [ - 0.005237, - "\u001b]2;git checkout master\u0007\u001b]1;git\u0007" - ], - [ - 0.070788, - "Switched to branch 'master'\r\n" - ], - [ - 0.000203, - "Your branch is up-to-date with 'origin/master'.\r\n" - ], - [ - 0.000797, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.074676, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.002717, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.32787, - "g" - ], - [ - 0.126467, - "\bgi" - ], - [ - 0.072925, - "t" - ], - [ - 0.076466, - " " - ], - [ - 0.102271, - "p" - ], - [ - 0.068608, - "u" - ], - [ - 0.213672, - "l" - ], - [ - 0.142438, - "l" - ], - [ - 0.102593, - " " - ], - [ - 0.126466, - "u" - ], - [ - 0.110638, - "p" - ], - [ - 0.175053, - "stream\u001b[1m \u001b[0m" - ], - [ - 0.338118, - "\b\u001b[0m m" - ], - [ - 0.11582, - "a" - ], - [ - 0.071538, - "s" - ], - [ - 0.203225, - "ter\u001b[1m:\u001b[0m" - ], - [ - 0.396247, - "\b\u001b[0m \b\u001b[?1l\u001b>\u001b[?2004l\r\r\n" - ], - [ - 0.003149, - "\u001b]2;git pull upstream master\u0007" - ], - [ - 0.000119, - "\u001b]1;git\u0007" - ], - [ - 1.611391, - "remote: Counting objects: 1, done.\u001b[K\r\n" - ], - [ - 0.000939, - "remote: Total 1 (delta 0), reused 1 (delta 0), pack-reused 0\u001b[K\r\n" - ], - [ - 0.00237, - "Unpacking objects: 100% (1/1) \r" - ], - [ - 9.3e-05, - "Unpacking objects: 100% (1/1), done.\r\n" - ], - [ - 0.096361, - "From github.com:kubernetes-incubator/cri-o" - ], - [ - 0.000156, - "\r\n" - ], - [ - 8.9e-05, - " * branch master -> FETCH_HEAD" - ], - [ - 7.3e-05, - "\r\n" - ], - [ - 0.000902, - " 6ca462a3..8c496a10 master -> upstream/master" - ], - [ - 0.000108, - "\r\n" - ], - [ - 0.013153, - "Updating 6ca462a3..8c496a10" - ], - [ - 0.000529, - "\r\n" - ], - [ - 0.035506, - "Fast-forward" - ], - [ - 5.4e-05, - "\r\n" - ], - [ - 0.006117, - " cmd/kpod/common.go | 8 \u001b[32m++\u001b[m" - ], - [ - 5.2e-05, - "\r\n" - ], - [ - 4.4e-05, - " cmd/kpod/formats/formats.go | 23 \u001b[32m+++\u001b[m\u001b[31m-\u001b[m" - ], - [ - 3.6e-05, - "\r\n" - ], - [ - 3.8e-05, - " cmd/kpod/formats/templates.go | 78 \u001b[32m++++++++++++\u001b[m" - ], - [ - 3.3e-05, - "\r\n" - ], - [ - 3.9e-05, - " cmd/kpod/images.go | 126 \u001b[32m+++++++++\u001b[m\u001b[31m----------\u001b[m" - ], - [ - 3.5e-05, - "\r\n" - ], - [ - 3.9e-05, - " vendor.conf | 1 \u001b[32m+\u001b[m" - ], - [ - 3.3e-05, - "\r\n" - ], - [ - 4.4e-05, - " vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go | 934 \u001b[31m---------------------------------------------------------------------------------------------------------------------------------------\u001b[m" - ], - [ - 3.5e-05, - "\r\n" - ], - [ - 3.7e-05, - " vendor/github.com/containers/storage/pkg/archive/example_changes.go | 97 \u001b[31m--------------\u001b[m" - ], - [ - 3e-05, - "\r\n" - ], - [ - 3.5e-05, - " vendor/github.com/fatih/camelcase/LICENSE.md | 20 \u001b[32m+++\u001b[m" - ], - [ - 3e-05, - "\r\n" - ], - [ - 3.6e-05, - " vendor/github.com/fatih/camelcase/README.md | 58 \u001b[32m+++++++++\u001b[m" - ], - [ - 3e-05, - "\r\n" - ], - [ - 4.2e-05, - " vendor/github.com/fatih/camelcase/camelcase.go | 90 \u001b[32m+++++++++++++\u001b[m" - ], - [ - 3.3e-05, - "\r\n" - ], - [ - 3.8e-05, - " 10 files changed, 331 insertions(+), 1104 deletions(-)" - ], - [ - 3e-05, - "\r\n" - ], - [ - 3.8e-05, - " create mode 100644 cmd/kpod/formats/templates.go" - ], - [ - 2.8e-05, - "\r\n" - ], - [ - 3.5e-05, - " delete mode 100644 vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go" - ], - [ - 3.1e-05, - "\r\n" - ], - [ - 3.2e-05, - " delete mode 100644 vendor/github.com/containers/storage/pkg/archive/example_changes.go" - ], - [ - 2.6e-05, - "\r\n" - ], - [ - 3.1e-05, - " create mode 100644 vendor/github.com/fatih/camelcase/LICENSE.md" - ], - [ - 2.7e-05, - "\r\n" - ], - [ - 3.1e-05, - " create mode 100644 vendor/github.com/fatih/camelcase/README.md" - ], - [ - 2.7e-05, - "\r\n" - ], - [ - 3.3e-05, - " create mode 100644 vendor/github.com/fatih/camelcase/camelcase.go" - ], - [ - 2.7e-05, - "\r\n" - ], - [ - 0.000942, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.058415, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.00259, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.00032, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000152, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000223, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 8.5e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000154, - "\u001b[?1h\u001b=" - ], - [ - 0.000119, - "\u001b[?2004h" - ], - [ - 29.505715, - "v" - ], - [ - 0.127766, - "\bvi" - ], - [ - 0.118693, - " " - ], - [ - 0.402967, - "c" - ], - [ - 0.134732, - "m" - ], - [ - 0.171696, - "d\u001b[1m/\u001b[0m" - ], - [ - 0.082566, - "\b\u001b[0m/k" - ], - [ - 0.141218, - "pod\u001b[1m/\u001b[0m" - ], - [ - 0.41599, - "\b\u001b[0m/i" - ], - [ - 0.051368, - "m" - ], - [ - 0.088439, - "a" - ], - [ - 0.170485, - "ges.go\u001b[1m \u001b[0m" - ], - [ - 0.319745, - "\b\u001b[0m \b" - ], - [ - 7.2e-05, - "\u001b[?1l\u001b>" - ], - [ - 4.7e-05, - "\u001b[?2004l\r" - ], - [ - 0.000136, - "\r\n" - ], - [ - 0.003148, - "\u001b]2;vim cmd/kpod/images.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.330583, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000904, - "\u001b[1;54r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[54;1H\"cmd/kpod/images.go\"" - ], - [ - 8.4e-05, - " 203L, 4796C" - ], - [ - 0.019404, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.012968, - "\u001b[1;1H\u001b[96m\u001b[47m 98 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 99 \u001b[m\u001b[93m\u001b[107m } \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m100 \u001b[m\u001b[93m\u001b[107m\u001b[8Cparams = \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m101 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m102 \r\n103 \u001b[m\u001b[93m\u001b[107m imageList, err := libkpodimage.GetImagesMatchingFilter(store, params, name)\r\n\u001b[96m\u001b[47m104 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m105 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"could not get list of images matching filter\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m106 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m107 \r\n108 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m outputImages(store, imageList, truncate, digests, quiet, outputFormat, noheading)\r\n\u001b[96m\u001b[47m109 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m110 \r\n111 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m genImagesFormat(quiet, truncate, digests \u001b[33mbool\u001b[m\u001b[93m\u001b[107m) (format \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m112 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m quiet {\r\n\u001b[96m\u001b[47m113 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mr" - ], - [ - 4e-05, - "eturn\u001b[m\u001b[93m\u001b[107m \u001b[36m\"{{.ID}}\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m114 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m115 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m truncate {\r\n\u001b[96m\u001b[47m116 \u001b[m\u001b[93m\u001b[107m\u001b[8Cformat = \u001b[36m\"table {{ .ID | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-20.12s\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m }} \"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m117 \u001b[m\u001b[93m\u001b[107m } \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m118 \u001b[m\u001b[93m\u001b[107m\u001b[8Cformat = \u001b[36m\"table {{ .ID | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-64s\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m }} \"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m119 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m120 \u001b[m\u001b[93m\u001b[107m format += \u001b[36m\"{{ .Name | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-56s\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m }} \"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m121 \r\n122 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m digests {\r\n\u001b[96m\u001b[47m123 \u001b[m\u001b[93m\u001b[107m\u001b[8Cformat += \u001b[36m\"{{ .Digest | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m}} \"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m124 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m125 \r\n126 \u001b[m\u001b[93m\u001b[107m format += \u001b[36m\"{{ .CreatedAt | printf " - ], - [ - 0.073924, - "\u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-22s\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m }} {{.Size}}\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m127 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m128 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m129 \r\n130 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m outputImages(store storage.Store, images []storage.Image, truncate, digests, quiet \u001b[33mbool\u001b[m\u001b[93m\u001b[107m, outputFormat \u001b[33mstring\u001b[m\u001b[93m\u001b[107m, noheading \u001b[33mbool\u001b[m\u001b[93m\u001b[107m) \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m131 \u001b[m\u001b[93m\u001b[107m imageOutput := []imageOutputParams{}\r\n\u001b[96m\u001b[47m132 \r\n133 \u001b[m\u001b[93m\u001b[107m lastID := \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m134 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, img := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m images {\r\n\u001b[96m\u001b[47m135 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m quiet && lastID == img.ID {\r\n\u001b[96m\u001b[47m136 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mcontinue\u001b[m\u001b[93m\u001b[107m \u001b[96m// quiet should not show the same ID multiple times\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m137 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m138 \u001b[m\u001b[93m\u001b[107m\u001b[8CcreatedTime := img.Created\r\n\u001b[96m\u001b[47m139 \r\n140 \u001b[m\u001b[93m\u001b[107m\u001b[" - ], - [ - 4e-05, - "8Cname := \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m141 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(img.Names) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m142 \u001b[m\u001b[93m\u001b[107m\u001b[12Cname = img.Names[\u001b[36m0\u001b[m\u001b[93m\u001b[107m]\r\n\u001b[96m\u001b[47m143 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m144 \r\n145 \u001b[m\u001b[93m\u001b[107m\u001b[8Cinfo, imageDigest, size, _ := libkpodimage.InfoAndDigestAndSize(store, img)\r\n\u001b[96m\u001b[47m146 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m info != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m147 \u001b[m\u001b[93m\u001b[107m\u001b[12CcreatedTime = info.Created\r\n\u001b[96m\u001b[47m148 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m149 \u001b[m\u001b[93m\u001b[107m\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mimages.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;42H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                         " - ], - [ - 0.028403, - "                                                                                                            \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  61%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m123\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9  \u001b[26;13H\u001b[?12l\u001b[?25h" - ], - [ - 3.8e-05, - "\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 2.1862, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  60%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[25;13H\u001b[?12l\u001b[?25h" - ], - [ - 4.025378, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 34.053856, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  61%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[26;13H\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;20H\u001b[1m\u001b[31m\u001b[106m{\u001b[27;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[27;9H\u001b[?12l\u001b[?25h\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;20H{\u001b[27;9H}\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  62%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[28;5H\u001b[?12l\u001b[?25h\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[29;13H\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  63%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[30;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.081812, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[14;73H\u001b[1m\u001b[31m\u001b[106m{\u001b[31;5H}\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[31;5H\u001b[?12l\u001b[?25h\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[14;73H{\u001b[31;5H}\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  64%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[32;5H\u001b[?12l\u001b[?25h\u001b[m\u001b[93m\u001b[107m\u001b[53;206H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m30\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[33;13H\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  65%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[34;13H\u001b[?12l\u001b[?25h\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[35;5H\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  66%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[36;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.035045, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[37;13H\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  67%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[38;13H\u001b[?12l\u001b[?25h\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:12\u001b[39;16H\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[38;42H\u001b[1m\u001b[31m\u001b[106m{\u001b[40;13H}\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9 \u001b[40;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.014251, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[38;42H{\u001b[40;13H}\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  68%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[41;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.035294, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[42;5H" - ], - [ - 3.803564, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[41;13H" - ], - [ - 0.502126, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[38;42H\u001b[1m\u001b[31m\u001b[106m{\u001b[40;13H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  67%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[40;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.039219, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[38;42H{\u001b[40;13H}\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:12\u001b[39;16H\u001b[?12l\u001b[?25h" - ], - [ - 0.022715, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9 \u001b[38;13H" - ], - [ - 0.030883, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  66%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[37;13H" - ], - [ - 0.032199, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[36;13H" - ], - [ - 0.028523, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  65%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[35;5H" - ], - [ - 0.031179, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[34;13H" - ], - [ - 0.032779, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  64%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[33;13H" - ], - [ - 0.028555, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;206H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m29\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[32;5H" - ], - [ - 0.048269, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[14;73H\u001b[1m\u001b[31m\u001b[106m{\u001b[31;5H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  63%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[31;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.019915, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[14;73H{\u001b[31;5H}\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[30;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.028641, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  62%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[29;13H" - ], - [ - 0.027012, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[28;5H" - ], - [ - 0.033581, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;20H\u001b[1m\u001b[31m\u001b[106m{\u001b[27;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  61%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[27;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.02925, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;20H{\u001b[27;9H}\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[26;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.030751, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  60%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[25;13H" - ], - [ - 0.031339, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[24;5H" - ], - [ - 0.041524, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  59%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.022427, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[20;16H\u001b[1m\u001b[31m\u001b[106m{\u001b[22;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;206H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m19\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[22;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.033264, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[20;16H{\u001b[22;9H}\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  58%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[21;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.023387, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[20;13H" - ], - [ - 0.037125, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  57%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[19;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.026419, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[18;13H" - ], - [ - 0.040842, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[15;18H\u001b[1m\u001b[31m\u001b[106m{\u001b[17;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  56%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[17;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.023205, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[15;18H{\u001b[17;9H}\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[16;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.033995, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  55%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[15;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.023807, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[14;13H" - ], - [ - 0.037724, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[13;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.028157, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;206H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m09\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[12;5H" - ], - [ - 0.035934, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  53%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[11;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.027185, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[10;5H" - ], - [ - 0.036538, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[7;23H\u001b[1m\u001b[31m\u001b[106m{\u001b[9;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  52%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[9;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.029736, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[7;23H{\u001b[9;9H}\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[8;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.036535, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  51%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[7;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.023374, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[6;13H" - ], - [ - 0.033091, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  50%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[5;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.025945, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[2;16H\u001b[1m\u001b[31m\u001b[106m{\u001b[4;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.041382, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m 97 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"error parsing filter\"\u001b[m\u001b[93m\u001b[107m)\u001b[3;16H{\u001b[5;9H}\u001b[54;1H\u001b[K\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  49%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[4;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.030519, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m 96 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[53;205H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 99\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.048667, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m 95 \u001b[m\u001b[93m\u001b[107m\u001b[8Cparams, err = libkpodimage.ParseFilter(store, c.String(\u001b[36m\"filter\"\u001b[m\u001b[93m\u001b[107m))\u001b[2;27H\u001b[1m\u001b[31m\u001b[106m{\u001b[4;13H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  48%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.028735, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m 94 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m c.IsSet(\u001b[36m\"filter\"\u001b[m\u001b[93m\u001b[107m) {\u001b[3;27H{\u001b[5;13H}\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:12\u001b[4;16H\u001b[?12l\u001b[?25h" - ], - [ - 0.048253, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m 93 \u001b[m\u001b[93m\u001b[107m \u001b[32mvar\u001b[m\u001b[93m\u001b[107m params *libkpodimage.FilterParams\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  47%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9 \u001b[4;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.011542, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m 92 \u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.035746, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m 91 \u001b[m\u001b[93m\u001b[107m }\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  46%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.039133, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m 90 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.New(\u001b[36m\"'kpod images' requires at most 1 argument\"\u001b[m\u001b[93m\u001b[107m)\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.033562, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m 89 \u001b[m\u001b[93m\u001b[107m } \u001b[32melse\u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(c.Args()) > \u001b[36m1\u001b[m\u001b[93m\u001b[107m {\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  45%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[4;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.423507, - "\u001b[?25l\u001b[54;1H\u001b[m\u001b[93m\u001b[107m/" - ], - [ - 8.6e-05, - "\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.351895, - "h\u001b[?25l" - ], - [ - 0.0106, - "\u001b[15;53H\u001b[7m\u001b[91mh\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[53;2H\u001b[1m\u001b[38;5;22m\u001b[48;5;148mCOMMND \u001b[m\u001b[93m\u001b[107m\u001b[186C\u001b[38;5;107m\u001b[48;5;240m  51%\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m103\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:49\r\n\u001b[m\u001b[93m\u001b[107m/h" - ], - [ - 6e-05, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.063442, - "e" - ], - [ - 0.000104, - "\u001b[?25l" - ], - [ - 0.012164, - "\u001b[15;53Hh\u001b[20;89H\u001b[7m\u001b[91mhe\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  53%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:85\r\n\u001b[m\u001b[93m\u001b[107m/he" - ], - [ - 0.000532, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.072256, - "a" - ], - [ - 7.1e-05, - "\u001b[?25l" - ], - [ - 0.00992, - "\u001b[20;91H\u001b[7m\u001b[91ma\u001b[54;5H" - ], - [ - 0.001459, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.162629, - "\u001b[27m\u001b[m\u001b[93m\u001b[107md" - ], - [ - 8.2e-05, - "\u001b[?25l" - ], - [ - 0.009968, - "\u001b[20;92H\u001b[7m\u001b[91md\u001b[54;6H" - ], - [ - 0.00011, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.147673, - "\u001b[27m\u001b[m\u001b[93m\u001b[107me\u001b[?25l" - ], - [ - 0.013264, - "\u001b[1;52r\u001b[1;1H\u001b[29M\u001b[1;54r\u001b[24;1H\u001b[96m\u001b[47m141 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(img.Names) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m142 \u001b[m\u001b[93m\u001b[107m\u001b[12Cname = img.Names[\u001b[36m0\u001b[m\u001b[93m\u001b[107m]\r\n\u001b[96m\u001b[47m143 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m144 \r\n145 \u001b[m\u001b[93m\u001b[107m\u001b[8Cinfo, imageDigest, size, _ := libkpodimage.InfoAndDigestAndSize(store, img)\r\n\u001b[96m\u001b[47m146 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m info != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m147 \u001b[m\u001b[93m\u001b[107m\u001b[12CcreatedTime = info.Created\r\n\u001b[96m\u001b[47m148 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m149 \r\n150 \u001b[m\u001b[93m\u001b[107m\u001b[8Cparams := imageOutputParams{\r\n\u001b[96m\u001b[47m151 \u001b[m\u001b[93m\u001b[107m\u001b[12CID:\u001b[8Cimg.ID,\r\n\u001b[96m\u001b[47m152 \u001b[m\u001b[93m\u001b[107m\u001b[12CName: name,\r\n\u001b[96m\u001b[47m153 \u001b[m\u001b[93m\u001b[107m\u001b[12CDigest: imageDigest,\r\n\u001b[96m\u001b[47m154 \u001b[m\u001b[93m\u001b[107m\u001b[12CCreatedAt: createdTime.Format(\u001b[36m\"Jan 2, 2006 15:04\"\u001b[m\u001b[93m\u001b[107m),\r\n\u001b[96m\u001b[47m155 \u001b[m\u001b[93m\u001b[107m\u001b[12CSize: libkpodimage.FormattedSize(size),\r\n\u001b[96m\u001b[47m156 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m157 \u001b[m\u001b[93m\u001b[10" - ], - [ - 6.2e-05, - "7m\u001b[8CimageOutput = \u001b[32mappend\u001b[m\u001b[93m\u001b[107m(imageOutput, params)\r\n\u001b[96m\u001b[47m158 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m159 \r\n160 \u001b[m\u001b[93m\u001b[107m \u001b[32mvar\u001b[m\u001b[93m\u001b[107m out formats.Writer\r\n\u001b[96m\u001b[47m161 \r\n162 \u001b[m\u001b[93m\u001b[107m \u001b[32mswitch\u001b[m\u001b[93m\u001b[107m outputFormat {\r\n\u001b[96m\u001b[47m163 \u001b[m\u001b[93m\u001b[107m \u001b[32mcase\u001b[m\u001b[93m\u001b[107m \u001b[36m\"json\"\u001b[m\u001b[93m\u001b[107m:\r\n\u001b[96m\u001b[47m164 \u001b[m\u001b[93m\u001b[107m\u001b[8Cout = formats.JSONstruct{Output: toGeneric(imageOutput)}\r\n\u001b[96m\u001b[47m165 \u001b[m\u001b[93m\u001b[107m \u001b[32mdefault\u001b[m\u001b[93m\u001b[107m:\r\n\u001b[96m\u001b[47m166 \u001b[m\u001b[93m\u001b[107m\u001b[8Cout = formats.StdoutTemplate{Output: toGeneric(imageOutput), Template: outputFormat, Fields: imageOutput[\u001b[36m0\u001b[m\u001b[93m\u001b[107m].\u001b[7m\u001b[91mheade\u001b[27m\u001b[m\u001b[93m\u001b[107mrMap()}\r\n\u001b[96m\u001b[47m167 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m168 \r\n169 \u001b[m\u001b[93m\u001b[107m formats.Writer(out).Out()\u001b[53;175H\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;181H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;189H\u001b[38;5;247m\u001b[48;5;236m" - ], - [ - 0.000184, - " go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;194H\u001b[38;5;144m\u001b[48;5;240m  82%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;201H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1668\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:117\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[K\u001b[54;1H/heade" - ], - [ - 6.2e-05, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.090014, - "r\u001b[?25l" - ], - [ - 0.010578, - "\u001b[49;126H\u001b[7m\u001b[91mr\u001b[54;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.242781, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mM" - ], - [ - 6.5e-05, - "\u001b[?25l" - ], - [ - 0.018322, - "\u001b[49;127H\u001b[7m\u001b[91mM\u001b[54;9H" - ], - [ - 0.000103, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.216526, - "\u001b[27m\u001b[m\u001b[93m\u001b[107ma\u001b[?25l" - ], - [ - 0.008316, - "\u001b[49;128H\u001b[7m\u001b[91ma\u001b[54;10H" - ], - [ - 0.000108, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.164608, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mp\u001b[?25l" - ], - [ - 0.010964, - "\u001b[49;129H\u001b[7m\u001b[91mp\u001b[54;11H" - ], - [ - 5.8e-05, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.303811, - "\r" - ], - [ - 5.4e-05, - "\u001b[?25l" - ], - [ - 0.01537, - "\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[49;121H\u001b[7m\u001b[33mheaderMap\u001b[m\u001b[93m\u001b[107m\u001b[53;2H\u001b[1m\u001b[38;5;22m\u001b[48;5;148mNORMAL \u001b[49;121H\u001b[?12l\u001b[?25h" - ], - [ - 0.613627, - "\u001b[?25l\u001b[54;1H" - ], - [ - 0.009249, - "\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[24M\u001b[1;54r\u001b[29;1H\u001b[96m\u001b[47m170 \r\n171 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m172 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m173 \r\n174 \u001b[m\u001b[93m\u001b[107m\u001b[32mtype\u001b[m\u001b[93m\u001b[107m imageOutputParams \u001b[32mstruct\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m175 \u001b[m\u001b[93m\u001b[107m ID\u001b[8C\u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[36m`json:\"id\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m176 \u001b[m\u001b[93m\u001b[107m Name \u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[36m`json:\"names\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m177 \u001b[m\u001b[93m\u001b[107m Digest digest.Digest \u001b[36m`json:\"digest\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m178 \u001b[m\u001b[93m\u001b[107m CreatedAt \u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[36m`json:\"created\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m179 \u001b[m\u001b[93m\u001b[107m Size \u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[36m`json:\"size\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m180 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m181 \r\n182 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m toGeneric(params []imageOutputParams) []\u001b[32minterface\u001b[m\u001b[93m\u001b[107m{} {\r\n\u001b[96m\u001b[47m183 \u001b[m\u001b[93m\u001b[107m genericParams := \u001b[32mmake\u001b[m\u001b[93m\u001b[107m([]\u001b[32mi" - ], - [ - 4.4e-05, - "nterface\u001b[m\u001b[93m\u001b[107m{}, \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(params))\r\n\u001b[96m\u001b[47m184 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m i, v := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m params {\r\n\u001b[96m\u001b[47m185 \u001b[m\u001b[93m\u001b[107m\u001b[8CgenericParams[i] = \u001b[32minterface\u001b[m\u001b[93m\u001b[107m{}(v)\r\n\u001b[96m\u001b[47m186 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m187 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m genericParams\r\n\u001b[96m\u001b[47m188 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m189 \r\n190 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (i *imageOutputParams) \u001b[7m\u001b[33mheaderMap\u001b[m\u001b[93m\u001b[107m() \u001b[33mmap\u001b[m\u001b[93m\u001b[107m[\u001b[33mstring\u001b[m\u001b[93m\u001b[107m]\u001b[33mstring\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m191 \u001b[m\u001b[93m\u001b[107m v := reflect.Indirect(reflect.ValueOf(i))\r\n\u001b[96m\u001b[47m192 \u001b[m\u001b[93m\u001b[107m values := \u001b[32mmake\u001b[m\u001b[93m\u001b[107m(\u001b[33mmap\u001b[m\u001b[93m\u001b[107m[\u001b[33mstring\u001b[m\u001b[93m\u001b[107m]\u001b[33mstring\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m193 \u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[K\u001b[53;175H\u001b[38;5;231m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b" - ], - [ - 2.6e-05, - "[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  94%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\b190\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:29\u001b[49;33H\u001b[?12l\u001b[?25h" - ], - [ - 2.596474, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m194 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m i := \u001b[36m0\u001b[m\u001b[93m\u001b[107m; i < v.NumField(); i++ {\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.204651, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m195 \u001b[m\u001b[93m\u001b[107m\u001b[8Ckey := v.Type().Field(i).Name\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  95%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.176184, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m196 \u001b[m\u001b[93m\u001b[107m\u001b[8Cvalue := key\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1 \u001b[49;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.187364, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m197 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m value == \u001b[36m\"ID\"\u001b[m\u001b[93m\u001b[107m || value == \u001b[36m\"Name\"\u001b[m\u001b[93m\u001b[107m {\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  96%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:29\u001b[49;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.159818, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m198 \u001b[m\u001b[93m\u001b[107m\u001b[12Cvalue = \u001b[36m\"Image\"\u001b[m\u001b[93m\u001b[107m + value\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.510264, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m199 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  97%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:20\u001b[49;24H\u001b[?12l\u001b[?25h" - ], - [ - 0.025799, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m200 \u001b[m\u001b[93m\u001b[107m\u001b[8Cvalues[key] = fmt.Sprintf(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[31m%s\u001b[m\u001b[93m\u001b[107m\u001b[36m \"\u001b[m\u001b[93m\u001b[107m, strings.ToUpper(splitCamelCase(value)))\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:29\u001b[49;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.03976, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m201 \u001b[m\u001b[93m\u001b[107m }\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  98%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.024833, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[47;49H\u001b[1m\u001b[31m\u001b[106m{\u001b[49;13H}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m202 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m values\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9 \u001b[49;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.052706, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[46;49H{\u001b[48;13H}\u001b[52;1H\u001b[96m\u001b[47m203 \u001b[m\u001b[93m\u001b[107m}\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  99%\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m200\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:29\u001b[49;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.031789, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[43;43H\u001b[1m\u001b[31m\u001b[106m{\u001b[50;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5 \u001b[50;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.028149, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[43;43H{\u001b[50;9H}\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m 100%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:17\u001b[51;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.030014, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[39;63H\u001b[1m\u001b[31m\u001b[106m{\u001b[52;5H}\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1 \u001b[52;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.018881, - "\u001b[?5h" - ], - [ - 0.000174, - "\u001b[?2004l" - ], - [ - 0.100098, - "\u001b[?2004h" - ], - [ - 0.031161, - "\u001b[?5l" - ], - [ - 0.340032, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[39;63H{\u001b[52;5H}\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:17\u001b[51;21H\u001b[?12l\u001b[?25h" - ], - [ - 47.126026, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[43;43H\u001b[1m\u001b[31m\u001b[106m{\u001b[50;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  99%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5 \u001b[50;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.49885, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[43;43H{\u001b[50;9H}\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:29\u001b[49;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.03165, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[46;49H\u001b[1m\u001b[31m\u001b[106m{\u001b[48;13H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  98%\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m199\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9 \u001b[48;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.030139, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[46;49H{\u001b[48;13H}\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:29\u001b[47;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.029042, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  97%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[46;33H" - ], - [ - 0.029773, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:20\u001b[45;24H" - ], - [ - 0.031993, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  96%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:29\u001b[44;33H" - ], - [ - 0.03237, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[43;33H" - ], - [ - 0.195643, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  95%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1 \u001b[42;5H" - ], - [ - 0.210797, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:29\u001b[41;33H" - ], - [ - 0.517507, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  94%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[40;33H" - ], - [ - 0.244061, - "\u001b[53;210H8\u001b[40;32H" - ], - [ - 0.216928, - "\u001b[53;210H7\u001b[40;31H" - ], - [ - 0.181163, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[31m\u001b[106m(\u001b[18C)\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m6\u001b[40;30H\u001b[?12l\u001b[?25h" - ], - [ - 315.572339, - "\u001b[?25l\u001b[54;1H\u001b[m\u001b[93m\u001b[107m:" - ], - [ - 4.9e-05, - "\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.220904, - "\b" - ], - [ - 0.002635, - "\u001b[?25l\u001b[40;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.335899, - "\u001b[?25l" - ], - [ - 0.0299, - "\u001b[54;1H\u001b[34m-- VISUAL --\u001b[m\u001b[93m\u001b[107m\u001b[53;1H\u001b[1m\u001b[38;5;94m\u001b[48;5;214m VISUAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[53;9H\u001b[38;5;214m\u001b[48;5;94m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;214m\u001b[48;5;94m↔\u001b[53;12H1 \u001b[m\u001b[93m\u001b[107m\u001b[38;5;94m\u001b[48;5;240m\u001b[53;15H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;17H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;26H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240ms\bimages.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;47H \u001b[40;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.729109, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[K\u001b[54;1H:\u001b[?2004h'<,'>\u001b[?12l\u001b[?25h" - ], - [ - 0.225295, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.582167, - "\u001b[?25l\u001b[54;1H\u001b[K" - ], - [ - 0.007677, - "\u001b[53;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;214m\u001b[48;5;94m\u001b[m\u001b[93m\u001b[107m\u001b[53;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpo\u001b[m\u001b[93m\u001b[107m\u001b[2C\u001b[1m\u001b[38;5;231m\u001b[48;5;240mimages.go s\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;240m\u001b[48;5;236m\u001b[53;42H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mg\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;231m\u001b[48;5;236m     \u001b[40;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.719145, - "\u001b[?25l\u001b[54;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.136951, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.082665, - "\r" - ], - [ - 0.02936, - "\u001b[?25l" - ], - [ - 0.000147, - "\u001b[?2004l" - ], - [ - 9.5e-05, - "\u001b[54;1H\u001b[K\u001b[54;1H" - ], - [ - 5.5e-05, - "\u001b[?2004l\u001b[?1l\u001b>" - ], - [ - 6.2e-05, - "\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.003739, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.062587, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master \u001b[39m \u001b[33m422s\u001b[39m\r\n" - ], - [ - 0.003799, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.263929, - "v" - ], - [ - 0.112167, - "\bvi" - ], - [ - 0.109735, - " " - ], - [ - 0.650955, - "c" - ], - [ - 0.088996, - "m" - ], - [ - 0.11713, - "d\u001b[1m/\u001b[0m" - ], - [ - 0.07719, - "\b\u001b[0m/k" - ], - [ - 0.528816, - "\b \b" - ], - [ - 0.145274, - "\b \b" - ], - [ - 0.153621, - "\b \b" - ], - [ - 0.165982, - "\b \b" - ], - [ - 0.149926, - "\b \b" - ], - [ - 0.099378, - "c" - ], - [ - 0.113919, - "m" - ], - [ - 0.124791, - "d\u001b[1m/\u001b[0m" - ], - [ - 0.076941, - "\b\u001b[0m/k" - ], - [ - 0.118135, - "pod\u001b[1m/\u001b[0m" - ], - [ - 0.453118, - "\b\u001b[0m/f" - ], - [ - 0.087524, - "o" - ], - [ - 0.110424, - "rmats\u001b[1m/\u001b[0m" - ], - [ - 0.196023, - "\b\u001b[0m/f" - ], - [ - 0.110967, - "o" - ], - [ - 0.110584, - "rmats.go\u001b[1m \u001b[0m" - ], - [ - 0.502681, - "\b\u001b[0m \b\u001b[?1l\u001b>" - ], - [ - 3e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.007009, - "\u001b]2;vim cmd/kpod/formats/formats.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.333483, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000971, - "\u001b[1;54r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[54;1H\"cmd/kpod/formats/formats.go\"" - ], - [ - 8.5e-05, - " 69L, 1356C" - ], - [ - 0.020264, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H" - ], - [ - 9.3e-05, - "\u001b[>c" - ], - [ - 0.007943, - "\u001b[1;1H\u001b[96m\u001b[47m 9 \r\n 10 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"github.com/pkg/errors\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 11 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 12 \r\n 13 \u001b[m\u001b[93m\u001b[107m\u001b[96m// Writer interface for outputs\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 14 \u001b[m\u001b[93m\u001b[107m\u001b[32mtype\u001b[m\u001b[93m\u001b[107m Writer \u001b[32minterface\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 15 \u001b[m\u001b[93m\u001b[107m Out() \u001b[33merror\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 16 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 17 \r\n 18 \u001b[m\u001b[93m\u001b[107m\u001b[96m// JSONstruct for JSON output\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m\u001b[32mtype\u001b[m\u001b[93m\u001b[107m JSONstruct \u001b[32mstruct\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m Output []\u001b[32minterface\u001b[m\u001b[93m\u001b[107m{}\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 22 \r\n 23 \u001b[m\u001b[93m\u001b[107m\u001b[96m// StdoutTemplate for Go template output\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[32mtype\u001b[m\u001b[93m\u001b[107m StdoutTemplate \u001b[32mstruct\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m Output []\u001b[32minterface\u001b[m\u001b[93m\u001b[107m{}\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m Template \u001b[33mstring\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[" - ], - [ - 4.3e-05, - "93m\u001b[107m Fields \u001b[33mmap\u001b[m\u001b[93m\u001b[107m[\u001b[33mstring\u001b[m\u001b[93m\u001b[107m]\u001b[33mstring\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 28 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 29 \r\n 30 \u001b[m\u001b[93m\u001b[107m\u001b[96m// Out method for JSON\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 31 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (j JSONstruct) Out() \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 32 \u001b[m\u001b[93m\u001b[107m data, err := json.MarshalIndent(j.Output, \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m, \u001b[36m\" \"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 36 \u001b[m\u001b[93m\u001b[107m fmt.Printf(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[31m%s\\n\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, data)\r\n\u001b[96m\u001b[47m 37 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 39 \r\n 40 \u001b[m\u001b[93m\u001b[107m\u001b[96m// Out method for Go templates\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 41 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (t StdoutTemplate) Out() " - ], - [ - 0.071435, - "\u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 42 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m strings.HasPrefix(t.Template, \u001b[36m\"table\"\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m\u001b[8Ct.Template = strings.TrimSpace(t.Template[\u001b[36m5\u001b[m\u001b[93m\u001b[107m:])\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[8CheaderTmpl, err := template.New(\u001b[36m\"header\"\u001b[m\u001b[93m\u001b[107m).Funcs(headerFunctions).Parse(t.Template)\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Template parsing error\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[8Cerr = headerTmpl.Execute(os.Stdout, t.Fields)\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 50 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 51 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 52 \u001b[m\u001b[93m\u001b[107m\u001b[8Cfmt.Println()\r\n\u001b[96m\u001b[47m 53 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 54 \u001b[m\u001b[93m\u001b[107m tmpl, err := template.N" - ], - [ - 4.8e-05, - "ew(\u001b[36m\"image\"\u001b[m\u001b[93m\u001b[107m).Funcs(basicFunctions).Parse(t.Template)\r\n\u001b[96m\u001b[47m 55 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m \u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 56 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Template parsing error\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 57 \u001b[m\u001b[93m\u001b[107m \u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 58 \r\n 59 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, img := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m t.Output {\r\n\u001b[96m\u001b[47m 60 \u001b[m\u001b[93m\u001b[107m\u001b[8CbasicTmpl := tmpl.Funcs(basicFunctions)\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/formats/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mformats.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;51H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                   " - ], - [ - 0.027914, - "                                                                                         \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  83%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 57\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5  \u001b[49;9H\u001b[?12l\u001b[?25h" - ], - [ - 6.6e-05, - "\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 0.752985, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;2H\u001b[96m\u001b[47m10\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[36m\"github.com/pkg/errors\"\u001b[m\u001b[93m\u001b[107m\u001b[2;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C)\u001b[2;9H\u001b[K\u001b[3;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[3;5H\u001b[K\u001b[4;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// Writer interface for outputs\u001b[m\u001b[93m\u001b[107m\u001b[5;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mtype\u001b[m\u001b[93m\u001b[107m Writer \u001b[32minterface\u001b[m\u001b[93m\u001b[107m {\u001b[5;28H\u001b[K\u001b[6;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C Out() \u001b[33merror\u001b[m\u001b[93m\u001b[107m\u001b[6;20H\u001b[K\u001b[7;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[7;9H\u001b[K\u001b[8;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[8;5H\u001b[K\u001b[9;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// JSONstruct for JSON output\u001b[m\u001b[93m\u001b[107m\u001b[10;3H\u001b[96m\u001b[47m9\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mtype\u001b[m\u001b[93m\u001b[107m JSONstruct \u001b[32mstruct\u001b[m\u001b[93m\u001b[107m {\u001b[10;29H\u001b[K\u001b[11;2H\u001b[96m\u001b[47m20\u001b[m\u001b[93m\u001b[107m\u001b[1C Output []\u001b[32minterface\u001b[m\u001b[93m\u001b[107m{}\u001b[12;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[12;9H\u001b[K\u001b[13;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[13;5H\u001b[K\u001b[14;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// StdoutTemplate for Go template output\u001b[m\u001b[93m\u001b[107m\u001b[15;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[10" - ], - [ - 2.7e-05, - "7m\u001b[1C\u001b[32mtype\u001b[m\u001b[93m\u001b[107m StdoutTemplate \u001b[32mstruct\u001b[m\u001b[93m\u001b[107m {\u001b[15;33H\u001b[K\u001b[16;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C Output []\u001b[32minterface\u001b[m\u001b[93m\u001b[107m{}\u001b[16;32H\u001b[K\u001b[17;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5CTemplate \u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[17;24H\u001b[K\u001b[18;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5CFields \u001b[33mmap\u001b[m\u001b[93m\u001b[107m[\u001b[33mstring\u001b[m\u001b[93m\u001b[107m]\u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[19;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[19;9H\u001b[K\u001b[20;3H\u001b[96m\u001b[47m9\u001b[m\u001b[93m\u001b[107m\u001b[20;5H\u001b[K\u001b[21;2H\u001b[96m\u001b[47m30\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// Out method for JSON\u001b[m\u001b[93m\u001b[107m\u001b[22;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (j JSONstruct) Out() \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\u001b[23;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C data, err := json.MarshalIndent(j.Output, \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m, \u001b[36m\" \"\u001b[m\u001b[93m\u001b[107m)\u001b[24;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[24;24H\u001b[K\u001b[25;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\u001b[25;23H\u001b[K\u001b[26;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5C}\u001b[26;13H" - ], - [ - 0.002588, - "\u001b[K\u001b[27;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cfmt.Printf(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[31m%s\\n\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, data)\u001b[28;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\u001b[28;19H\u001b[K\u001b[29;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[29;9H\u001b[K\u001b[30;3H\u001b[96m\u001b[47m9\u001b[m\u001b[93m\u001b[107m\u001b[30;5H\u001b[K\u001b[31;2H\u001b[96m\u001b[47m40\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// Out method for Go templates\u001b[m\u001b[93m\u001b[107m\u001b[32;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (t StdoutTemplate) Out() \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\u001b[33;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C \u001b[32mif\u001b[m\u001b[93m\u001b[107m strings.HasPrefix(t.Template, \u001b[36m\"table\"\u001b[m\u001b[93m\u001b[107m) {\u001b[34;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C t.Template = strings.TrimSpace(t.Template[\u001b[36m5\u001b[m\u001b[93m\u001b[107m:])\u001b[35;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[9CheaderTmpl, err := template.New(\u001b[36m\"header\"\u001b[m\u001b[93m\u001b[107m).Funcs(headerFunctions).Parse(t.Template)\u001b[36;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[9C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[36;29H\u001b[K\u001b[37;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[9C \u001b[32mreturn\u001b[m\u001b[93m\u001b" - ], - [ - 3.4e-05, - "[107m errors.Wrapf(err, \u001b[36m\"Template parsing error\"\u001b[m\u001b[93m\u001b[107m)\u001b[38;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[9C}\u001b[38;17H\u001b[K\u001b[39;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[9Cerr = headerTmpl.Execute(os.Stdout, t.Fields)\u001b[40;3H\u001b[96m\u001b[47m9\u001b[m\u001b[93m\u001b[107m\u001b[9C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[40;28H\u001b[K\u001b[41;2H\u001b[96m\u001b[47m50\u001b[m\u001b[93m\u001b[107m\u001b[9C \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\u001b[41;27H\u001b[K\u001b[42;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[9C}\u001b[42;17H\u001b[K\u001b[43;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[9Cfmt.Println()\u001b[44;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C}\u001b[44;13H\u001b[K\u001b[45;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5Ctmpl, err := template.New(\u001b[36m\"image\"\u001b[m\u001b[93m\u001b[107m).Funcs(basicFunctions).Parse(t.Template)\u001b[46;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[46;24H\u001b[K\u001b[47;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5C \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Template parsing error\"\u001b[m\u001b[93m\u001b[107m)\u001b[48;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C}\u001b[48;13H\u001b[K\u001b[49;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[49;9H\u001b[K\u001b[50;3H\u001b[96m\u001b[47m9\u001b[m\u001b[93m\u001b[107m\u001b[5C" - ], - [ - 0.033553, - "\u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, img := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m t.Output {\u001b[51;2H\u001b[96m\u001b[47m60\u001b[m\u001b[93m\u001b[107m\u001b[5C basicTmpl := tmpl.Funcs(basicFunctions)\u001b[52;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[9Cerr = basicTmpl.Execute(os.Stdout, img\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  84%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[49;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.379256, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[46;23H\u001b[1m\u001b[31m\u001b[106m{\u001b[48;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  83%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[48;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.137128, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[46;23H{\u001b[48;9H}\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  84%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[49;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.501192, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m 62 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[54;1H\u001b[K\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  86%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[49;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.045304, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m 63 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  87%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m60\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:8\u001b[49;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.024223, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m 64 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  88%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.040877, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m 65 \u001b[m\u001b[93m\u001b[107m\u001b[8Cfmt.Println()\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  90%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.038981, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m 66 \u001b[m\u001b[93m\u001b[107m }\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  91%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.022814, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m 67 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  93%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.03284, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m 68 \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  94%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.045318, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[42;38H\u001b[1m\u001b[31m\u001b[106m{\u001b[49;9H}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 69 \u001b[m\u001b[93m\u001b[107m}\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  96%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[49;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.028648, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[42;38H{\u001b[49;9H}\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  97%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[50;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.022824, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  99%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[51;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.137956, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  97%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[50;9H" - ], - [ - 0.507979, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[42;38H\u001b[1m\u001b[31m\u001b[106m{\u001b[49;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  96%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.024442, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[42;38H{\u001b[49;9H}\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  94%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:8\u001b[48;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.037304, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  93%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[47;12H" - ], - [ - 0.030234, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  91%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[46;12H" - ], - [ - 0.035803, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  90%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[45;12H" - ], - [ - 0.027875, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  88%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[44;12H" - ], - [ - 0.031294, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  87%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[43;12H" - ], - [ - 0.028909, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  86%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m59\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[42;9H" - ], - [ - 0.028601, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  84%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[41;5H" - ], - [ - 0.0318, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[38;23H\u001b[1m\u001b[31m\u001b[106m{\u001b[40;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  83%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[40;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.04273, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[38;23H{\u001b[40;9H}\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  81%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:8\u001b[39;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.028264, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  80%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[38;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.031967, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  78%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[37;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.037706, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;51H\u001b[1m\u001b[31m\u001b[106m{\u001b[36;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  77%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[36;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.030776, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;51H{\u001b[36;9H}\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  75%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:8\u001b[35;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.028632, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  74%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[34;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.032475, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  72%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[33;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.026166, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  71%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m49\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[32;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.168545, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  70%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[31;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.49982, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  68%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[30;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.030812, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  67%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[29;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.030802, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  65%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[28;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.032574, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  64%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[27;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.029137, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  62%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[26;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.037678, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  61%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[25;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.28215, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  62%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:8\u001b[26;12H" - ], - [ - 0.499953, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  64%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[27;12H" - ], - [ - 0.029953, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  65%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[28;12H" - ], - [ - 0.033793, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  67%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[29;12H" - ], - [ - 0.032469, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  68%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[30;12H" - ], - [ - 0.030452, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  70%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[31;12H" - ], - [ - 0.03474, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  71%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[32;12H" - ], - [ - 0.031553, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  72%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m50\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[33;12H" - ], - [ - 0.027886, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  74%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[34;12H" - ], - [ - 0.032183, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  75%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[35;12H" - ], - [ - 0.361308, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  74%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[34;12H" - ], - [ - 0.180436, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  72%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[33;12H" - ], - [ - 0.163593, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  71%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m49\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[32;12H" - ], - [ - 0.167433, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  70%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[31;12H" - ], - [ - 0.499694, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  68%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[30;12H" - ], - [ - 0.035176, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  67%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[29;12H" - ], - [ - 0.176855, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  65%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[28;12H" - ], - [ - 0.167552, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  64%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[27;12H" - ], - [ - 0.162525, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  62%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[26;12H" - ], - [ - 0.344534, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  61%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[25;9H" - ], - [ - 0.475291, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[42C\u001b[1m\u001b[31m\u001b[106m{\u001b[36;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;209H\u001b[38;5;22m\u001b[48;5;252m47\u001b[25;51H\u001b[?12l\u001b[?25h" - ], - [ - 0.862743, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{\u001b[36;9H}\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m6\u001b[25;50H\u001b[?12l\u001b[?25h" - ], - [ - 0.211622, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;29H\u001b[1m\u001b[31m\u001b[106m(\u001b[19C)\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m5\u001b[25;49H\u001b[?12l\u001b[?25h" - ], - [ - 0.334139, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;29H(t\u001b[18C) \u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m6\u001b[25;50H\u001b[?12l\u001b[?25h" - ], - [ - 0.690098, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m \u001b[1m\u001b[31m\u001b[106m{\u001b[36;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m7\u001b[25;51H\u001b[?12l\u001b[?25h" - ], - [ - 0.829404, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{\u001b[36;9H}\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  62%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[26;51H\u001b[?12l\u001b[?25h" - ], - [ - 0.187135, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  64%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[27;51H" - ], - [ - 0.223449, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[28;27H\u001b[1m\u001b[31m\u001b[106m{\u001b[30;13H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  65%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:23\u001b[28;27H\u001b[?12l\u001b[?25h" - ], - [ - 0.381099, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{\u001b[30;13H}\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  67%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:47\u001b[29;51H\u001b[?12l\u001b[?25h" - ], - [ - 0.445137, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[28;27H\u001b[1m\u001b[31m\u001b[106m{\u001b[30;13H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  68%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9 \u001b[30;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.207385, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[28;27H{\u001b[30;13H}\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  70%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:47\u001b[31;51H\u001b[?12l\u001b[?25h" - ], - [ - 0.222319, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[32;27H\u001b[1m\u001b[31m\u001b[106m{\u001b[34;13H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  71%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:23\u001b[32;27H\u001b[?12l\u001b[?25h" - ], - [ - 0.499981, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{\u001b[34;13H}\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  72%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m50\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:22\u001b[33;26H\u001b[?12l\u001b[?25h" - ], - [ - 0.029604, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[32;27H\u001b[1m\u001b[31m\u001b[106m{\u001b[34;13H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  74%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9 \u001b[34;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.162284, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[32;27H{\u001b[34;13H}\u001b[35;24H\u001b[1m\u001b[31m\u001b[106m()\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  75%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:21\u001b[35;25H\u001b[?12l\u001b[?25h" - ], - [ - 0.200133, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;51H\u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\u001b[35;24H()\u001b[36;9H\u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  77%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5 \u001b[36;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.196823, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;51H{\u001b[36;9H}\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  78%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:47\u001b[37;51H\u001b[?12l\u001b[?25h" - ], - [ - 0.356692, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;51H\u001b[1m\u001b[31m\u001b[106m{\u001b[36;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  77%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5 \u001b[36;9H\u001b[?12l\u001b[?25h" - ], - [ - 837.577075, - "\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[1;1H\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m\u001b[32mtype\u001b[m\u001b[93m\u001b[107m JSONstruct \u001b[32mstruct\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m Output []\u001b[32minterface\u001b[m\u001b[93m\u001b[107m{}\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 22 \r\n 23 \u001b[m\u001b[93m\u001b[107m\u001b[96m// StdoutTemplate for Go template output\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[32mtype\u001b[m\u001b[93m\u001b[107m StdoutTemplate \u001b[32mstruct\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m Output []\u001b[32minterface\u001b[m\u001b[93m\u001b[107m{}\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m Template \u001b[33mstring\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m Fields \u001b[33mmap\u001b[m\u001b[93m\u001b[107m[\u001b[33mstring\u001b[m\u001b[93m\u001b[107m]\u001b[33mstring\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 28 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 29 \r\n 30 \u001b[m\u001b[93m\u001b[107m\u001b[96m// Out method for JSON\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 31 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (j JSONstruct) Out() \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 32 \u001b[m\u001b[93m\u001b[107m data, err := json.MarshalIndent(j.Output, \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m, \u001b[36m\" \"\u001b[m\u001b[93m\u001b[107m)\r\n" - ], - [ - 7.7e-05, - "\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 36 \u001b[m\u001b[93m\u001b[107m fmt.Printf(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[31m%s\\n\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, data)\r\n\u001b[96m\u001b[47m 37 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 39 \r\n 40 \u001b[m\u001b[93m\u001b[107m\u001b[96m// Out method for Go templates\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 41 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (t StdoutTemplate) Out() \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 42 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m strings.HasPrefix(t.Template, \u001b[36m\"table\"\u001b[m\u001b[93m\u001b[107m) \u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m\u001b[8Ct.Template = strings.TrimSpace(t.Template[\u001b[36m5\u001b[m\u001b[93m\u001b[107m:])\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[8CheaderTmpl, err := template.New(\u001b[36m\"header\"\u001b[m\u001b[93m\u001b[107m).Funcs(headerFunctions).Parse(t.Template)\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m" - ], - [ - 0.006486, - "\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Template parsing error\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[8Cerr = headerTmpl.Execute(os.Stdout, t.Fields)\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 50 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 51 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 52 \u001b[m\u001b[93m\u001b[107m\u001b[8Cfmt.Println()\r\n\u001b[96m\u001b[47m 53 \u001b[m\u001b[93m\u001b[107m \u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 54 \u001b[m\u001b[93m\u001b[107m tmpl, err := template.New(\u001b[36m\"image\"\u001b[m\u001b[93m\u001b[107m).Funcs(basicFunctions).Parse(t.Template)\r\n\u001b[96m\u001b[47m 55 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 56 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Template parsing error\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 57 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 58 \r\n 59 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m" - ], - [ - 6.6e-05, - " _, img := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m t.Output {\r\n\u001b[96m\u001b[47m 60 \u001b[m\u001b[93m\u001b[107m\u001b[8CbasicTmpl := tmpl.Funcs(basicFunctions)\r\n\u001b[96m\u001b[47m 61 \u001b[m\u001b[93m\u001b[107m\u001b[8Cerr = basicTmpl.Execute(os.Stdout, img)\r\n\u001b[96m\u001b[47m 62 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 63 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 64 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 65 \u001b[m\u001b[93m\u001b[107m\u001b[8Cfmt.Println()\r\n\u001b[96m\u001b[47m 66 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 67 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 68 \u001b[m\u001b[93m\u001b[107m\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/formats/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mformats.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;51H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                   " - ], - [ - 2.6e-05, - "                                                                                         \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  77%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 53\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5  \u001b[35;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.84171, - "\u001b[?25l\u001b[52;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.167691, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.064539, - "\r" - ], - [ - 0.016979, - "\u001b[?25l\u001b[?2004l\u001b[52;1H\u001b[K\u001b[52;1H\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.002329, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.02599, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master \u001b[39m \u001b[33m853s\u001b[39m\r\n" - ], - [ - 0.001018, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000113, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8.7e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.7e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.7e-05, - "\u001b[?2004h" - ], - [ - 0.177155, - "e" - ], - [ - 0.184267, - "\bex" - ], - [ - 0.095845, - "i" - ], - [ - 0.144511, - "t" - ], - [ - 0.087093, - "\u001b[?1l\u001b>" - ], - [ - 0.000252, - "\u001b[?2004l\r\r\n" - ], - [ - 0.009572, - "\u001b]2;exit\u0007\u001b]1;exit\u0007" - ] - ] -} \ No newline at end of file diff --git a/kubernetes.md b/kubernetes.md index 5585c494..e95694ac 100644 --- a/kubernetes.md +++ b/kubernetes.md @@ -1,6 +1,6 @@ -# Running cri-o on kubernetes cluster +# Running CRI-O on kubernetes cluster -## Switching runtime from docker to cri-o +## Switching runtime from docker to CRI-O In standard docker kubernetes cluster, kubelet is running on each node as systemd service and is taking care of communication between runtime and api service. It is reponsible for starting microservices pods (such as `kube-proxy`, `kubedns`, etc. - they can be different for various ways of deploying k8s) and user pods. @@ -9,21 +9,19 @@ Configuration of kubelet determines which runtime is used and in what way. Kubelet itself is executed in docker container (as we can see in `kubelet.service`), but, what is important, **it's not** a kubernetes pod (at least for now), so we can keep kubelet running inside container (as well as directly on the host), and regardless of this, run pods in chosen runtime. -Below, you can find an instruction how to switch one or more nodes on running kubernetes cluster from docker to cri-o. +Below, you can find an instruction how to switch one or more nodes on running kubernetes cluster from docker to CRI-O. ### Preparing crio -You must prepare and install `crio` on each node you would like to switch. Here's the list of files that must be provided: +You must prepare and install `crio` on each node you would like to switch. +Besides the files installed by `make install install.config`, here's the list of files that must be provided: -| File path | Description | Location | -|--------------------------------------------|----------------------------|-----------------------------------------------------| -| `/etc/crio/crio.conf` | crio configuration | Generated on cri-o `make install` | -| `/etc/crio/seccomp.conf` | seccomp config | Example stored in cri-o repository | -| `/etc/containers/policy.json` | containers policy | Example stored in cri-o repository | -| `/bin/{crio, runc}` | `crio` and `runc` binaries | Built from cri-o repository | -| `/usr/local/libexec/crio/conmon` | `conmon` binary | Built from cri-o repository | -| `/opt/cni/bin/{flannel, bridge,...}` | CNI plugins binaries | Can be built from sources `containernetworking/cni` | -| `/etc/cni/net.d/10-mynet.conf` | Network config | Example stored in [README file](README.md) | +| File path | Description | Location | +|--------------------------------------------|-----------------------------|---------------------------------------------------------| +| `/etc/containers/policy.json` | containers policy | [Example](test/policy.json) stored in cri-o repository | +| `/bin/runc` | `runc` or other OCI runtime | Can be build from sources `opencontainers/runc` | +| `/opt/cni/bin/{flannel, bridge,...}` | CNI plugins binaries | Can be built from sources `containernetworking/plugins` | +| `/etc/cni/net.d/...` | CNI network config | Example [here](contrib/cni) | `crio` binary can be executed directly on host, inside the container or in any way. However, recommended way is to set it as a systemd service. @@ -36,7 +34,7 @@ Description=CRI-O daemon Documentation=https://github.com/kubernetes-incubator/cri-o [Service] -ExecStart=/bin/crio --runtime /bin/runc --log /root/crio.log --debug +ExecStart=/bin/crio --runtime /bin/runc --log /root/crio.log --log-level debug Restart=always RestartSec=10s @@ -79,7 +77,7 @@ KUBELET_ARGS="--pod-manifest-path=/etc/kubernetes/manifests You need to add following parameters to `KUBELET_ARGS`: * `--experimental-cri=true` - Use Container Runtime Interface. Will be true by default from kubernetes 1.6 release. * `--container-runtime=remote` - Use remote runtime with provided socket. -* `--container-runtime-endpoint=/var/run/crio.sock` - Socket for remote runtime (default `crio` socket localization). +* `--container-runtime-endpoint=/var/run/crio/crio.sock` - Socket for remote runtime (default `crio` socket localization). * `--runtime-request-timeout=10m` - Optional but useful. Some requests, especially pulling huge images, may take longer than default (2 minutes) and will cause an error. Kubelet is prepared now. @@ -95,7 +93,7 @@ If your cluster is using flannel network, your network configuration should be l ``` Then, kubelet will take parameters from `/run/flannel/subnet.env` - file generated by flannel kubelet microservice. -## Starting kubelet with cri-o +## Starting kubelet with CRI-O Start crio first, then kubelet. If you created `crio` service: ``` # systemctl start crio diff --git a/libkpod/config.go b/lib/config.go similarity index 90% rename from libkpod/config.go rename to lib/config.go index 4a3ade4f..6a63b2b0 100644 --- a/libkpod/config.go +++ b/lib/config.go @@ -1,4 +1,4 @@ -package libkpod +package lib import ( "bytes" @@ -23,7 +23,7 @@ const ( cniBinDir = "/opt/cni/bin/" cgroupManager = oci.CgroupfsCgroupsManager lockPath = "/run/crio.lock" - containerExitsDir = "/var/run/kpod/exits" + containerExitsDir = oci.ContainerExitsDir ) // Config represents the entire set of configuration values that can be set for @@ -51,6 +51,10 @@ const ( // DefaultPidsLimit is the default value for maximum number of processes // allowed inside a container DefaultPidsLimit = 1024 + + // DefaultLogSizeMax is the default value for the maximum log size + // allowed for a container. Negative values mean that no limit is imposed. + DefaultLogSizeMax = -1 ) // This structure is necessary to fake the TOML tables when parsing, @@ -79,7 +83,7 @@ type RootConfig struct { LogDir string `toml:"log_dir"` // FileLocking specifies whether to use file-based or in-memory locking - // File-based locking is required when multiple users of libkpod are + // File-based locking is required when multiple users of lib are // present on the same system FileLocking bool `toml:"file_locking"` } @@ -114,6 +118,12 @@ type RuntimeConfig struct { // container runtime for all containers. DefaultWorkloadTrust string `toml:"default_workload_trust"` + // NoPivot instructs the runtime to not use `pivot_root`, but instead use `MS_MOVE` + NoPivot bool `toml:"no_pivot"` + + // EnableSharePidNamespace instructs the runtime to enable share pid namespace + EnableSharedPIDNamespace bool `toml:"enable_shared_pid_namespace"` + // Conmon is the path to conmon binary, used for managing the runtime. Conmon string `toml:"conmon"` @@ -138,6 +148,10 @@ type RuntimeConfig struct { // HooksDirPath location of oci hooks config files HooksDirPath string `toml:"hooks_dir_path"` + // DefaultMounts is the list of mounts to be mounted for each container + // The format of each mount is "host-path:container-path" + DefaultMounts []string `toml:"default_mounts"` + // Hooks List of hooks to run with container Hooks map[string]HookParams @@ -145,6 +159,12 @@ type RuntimeConfig struct { // by the cgroup process number controller. PidsLimit int64 `toml:"pids_limit"` + // LogSizeMax is the maximum number of bytes after which the log file + // will be truncated. It can be expressed as a human-friendly string + // that is parsed to bytes. + // Negative values indicate that the log file won't be truncated. + LogSizeMax int64 `toml:"log_size_max"` + // ContainerExitsDir is the directory in which container exit files are // written to by conmon. ContainerExitsDir string `toml:"container_exits_dir"` @@ -274,6 +294,7 @@ func DefaultConfig() *Config { PidsLimit: DefaultPidsLimit, ContainerExitsDir: containerExitsDir, HooksDirPath: DefaultHooksDirPath, + LogSizeMax: DefaultLogSizeMax, }, ImageConfig: ImageConfig{ DefaultTransport: defaultTransport, diff --git a/lib/config_test.go b/lib/config_test.go new file mode 100644 index 00000000..59998382 --- /dev/null +++ b/lib/config_test.go @@ -0,0 +1,54 @@ +package lib + +import ( + "io/ioutil" + "os" + "testing" +) + +// TestConfigToFile ensures Config.ToFile(..) encodes and writes out +// a Config instance toa a file on disk. +func TestConfigToFile(t *testing.T) { + // Test with a default configuration + c := DefaultConfig() + tmpfile, err := ioutil.TempFile("", "config") + if err != nil { + t.Fatalf("Unable to create temporary file: %+v", err) + } + // Clean up temporary file + defer os.Remove(tmpfile.Name()) + + // Make the ToFile calls + err = c.ToFile(tmpfile.Name()) + // Make sure no errors occurred while populating the file + if err != nil { + t.Fatalf("Unable to write to temporary file: %+v", err) + } + + // Make sure the file is on disk + if _, err := os.Stat(tmpfile.Name()); os.IsNotExist(err) { + t.Fatalf("The config file was not written to disk: %+v", err) + } +} + +// TestConfigUpdateFromFile ensures Config.UpdateFromFile(..) properly +// updates an already create Config instancec with new data. +func TestConfigUpdateFromFile(t *testing.T) { + // Test with a default configuration + c := DefaultConfig() + // Make the ToFile calls + err := c.UpdateFromFile("testdata/config.toml") + // Make sure no errors occurred while populating from the file + if err != nil { + t.Fatalf("Unable update config from file: %+v", err) + } + + // Check fields that should have changed after UpdateFromFile + if c.Storage != "overlay2" { + t.Fatalf("Update failed. Storage did not change to overlay2") + } + + if c.RuntimeConfig.PidsLimit != 2048 { + t.Fatalf("Update failed. RuntimeConfig.PidsLimit did not change to 2048") + } +} diff --git a/libkpod/container.go b/lib/container.go similarity index 98% rename from libkpod/container.go rename to lib/container.go index 7835952d..8264ab6a 100644 --- a/libkpod/container.go +++ b/lib/container.go @@ -1,10 +1,10 @@ -package libkpod +package lib import ( "fmt" cstorage "github.com/containers/storage" - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/pkg/registrar" "github.com/pkg/errors" diff --git a/libkpod/container_server.go b/lib/container_server.go similarity index 91% rename from libkpod/container_server.go rename to lib/container_server.go index 20680ffc..9a4704b7 100644 --- a/libkpod/container_server.go +++ b/lib/container_server.go @@ -1,4 +1,4 @@ -package libkpod +package lib import ( "encoding/json" @@ -12,13 +12,14 @@ import ( cstorage "github.com/containers/storage" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/truncindex" - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/pkg/annotations" "github.com/kubernetes-incubator/cri-o/pkg/registrar" "github.com/kubernetes-incubator/cri-o/pkg/storage" "github.com/opencontainers/runc/libcontainer" rspec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -121,7 +122,7 @@ func New(config *Config) (*ContainerServer, error) { return nil, err } - runtime, err := oci.New(config.Runtime, config.RuntimeUntrustedWorkload, config.DefaultWorkloadTrust, config.Conmon, config.ConmonEnv, config.CgroupManager, config.ContainerExitsDir) + runtime, err := oci.New(config.Runtime, config.RuntimeUntrustedWorkload, config.DefaultWorkloadTrust, config.Conmon, config.ConmonEnv, config.CgroupManager, config.ContainerExitsDir, config.LogSizeMax, config.NoPivot) if err != nil { return nil, err } @@ -167,7 +168,8 @@ func New(config *Config) (*ContainerServer, error) { state: &containerServerState{ containers: oci.NewMemoryStore(), infraContainers: oci.NewMemoryStore(), - sandboxes: make(map[string]*sandbox.Sandbox), + sandboxes: sandbox.NewMemoryStore(), + processLevels: make(map[string]int), }, config: config, }, nil @@ -323,6 +325,8 @@ func (c *ContainerServer) LoadSandbox(id string) error { return err } + spp := m.Annotations[annotations.SeccompProfilePath] + kubeAnnotations := make(map[string]string) if err = json.Unmarshal([]byte(m.Annotations[annotations.Annotations]), &kubeAnnotations); err != nil { return err @@ -335,7 +339,9 @@ func (c *ContainerServer) LoadSandbox(id string) error { if err != nil { return err } + sb.AddHostnamePath(m.Annotations[annotations.HostnamePath]) sb.AddIP(ip) + sb.SetSeccompProfilePath(spp) // We add a netNS only if we can load a permanent one. // Otherwise, the sandbox will live in the host namespace. @@ -383,10 +389,11 @@ func (c *ContainerServer) LoadSandbox(id string) error { return err } - scontainer, err := oci.NewContainer(m.Annotations[annotations.ContainerID], cname, sandboxPath, m.Annotations[annotations.LogPath], sb.NetNs(), labels, kubeAnnotations, "", "", "", nil, id, false, false, false, privileged, trusted, sandboxDir, created, m.Annotations["org.opencontainers.image.stopSignal"]) + scontainer, err := oci.NewContainer(m.Annotations[annotations.ContainerID], cname, sandboxPath, m.Annotations[annotations.LogPath], sb.NetNs(), labels, m.Annotations, kubeAnnotations, "", "", "", nil, id, false, false, false, privileged, trusted, sandboxDir, created, m.Annotations["org.opencontainers.image.stopSignal"]) if err != nil { return err } + scontainer.SetSpec(&m) scontainer.SetMountPoint(m.Annotations[annotations.MountPoint]) if m.Annotations[annotations.Volumes] != "" { @@ -506,11 +513,14 @@ func (c *ContainerServer) LoadContainer(id string) error { return err } - ctr, err := oci.NewContainer(id, name, containerPath, m.Annotations[annotations.LogPath], sb.NetNs(), labels, kubeAnnotations, img, imgName, imgRef, &metadata, sb.ID(), tty, stdin, stdinOnce, sb.Privileged(), sb.Trusted(), containerDir, created, m.Annotations["org.opencontainers.image.stopSignal"]) + ctr, err := oci.NewContainer(id, name, containerPath, m.Annotations[annotations.LogPath], sb.NetNs(), labels, m.Annotations, kubeAnnotations, img, imgName, imgRef, &metadata, sb.ID(), tty, stdin, stdinOnce, sb.Privileged(), sb.Trusted(), containerDir, created, m.Annotations["org.opencontainers.image.stopSignal"]) if err != nil { return err } + ctr.SetSpec(&m) ctr.SetMountPoint(m.Annotations[annotations.MountPoint]) + spp := m.Annotations[annotations.SeccompProfilePath] + ctr.SetSeccompProfilePath(spp) c.ContainerStateFromDisk(ctr) @@ -607,68 +617,53 @@ func (c *ContainerServer) Shutdown() error { type containerServerState struct { containers oci.ContainerStorer infraContainers oci.ContainerStorer - sandboxes map[string]*sandbox.Sandbox + sandboxes sandbox.Storer + // processLevels The number of sandboxes using the same SELinux MCS level. Need to release MCS Level, when count reaches 0 + processLevels map[string]int } // AddContainer adds a container to the container state store func (c *ContainerServer) AddContainer(ctr *oci.Container) { - c.stateLock.Lock() - defer c.stateLock.Unlock() - sandbox := c.state.sandboxes[ctr.Sandbox()] + sandbox := c.state.sandboxes.Get(ctr.Sandbox()) sandbox.AddContainer(ctr) c.state.containers.Add(ctr.ID(), ctr) } // AddInfraContainer adds a container to the container state store func (c *ContainerServer) AddInfraContainer(ctr *oci.Container) { - c.stateLock.Lock() - defer c.stateLock.Unlock() c.state.infraContainers.Add(ctr.ID(), ctr) } // GetContainer returns a container by its ID func (c *ContainerServer) GetContainer(id string) *oci.Container { - c.stateLock.Lock() - defer c.stateLock.Unlock() return c.state.containers.Get(id) } // GetInfraContainer returns a container by its ID func (c *ContainerServer) GetInfraContainer(id string) *oci.Container { - c.stateLock.Lock() - defer c.stateLock.Unlock() return c.state.infraContainers.Get(id) } // HasContainer checks if a container exists in the state func (c *ContainerServer) HasContainer(id string) bool { - c.stateLock.Lock() - defer c.stateLock.Unlock() - ctr := c.state.containers.Get(id) - return ctr != nil + return c.state.containers.Get(id) != nil } // RemoveContainer removes a container from the container state store func (c *ContainerServer) RemoveContainer(ctr *oci.Container) { - c.stateLock.Lock() - defer c.stateLock.Unlock() sbID := ctr.Sandbox() - sb := c.state.sandboxes[sbID] + sb := c.state.sandboxes.Get(sbID) sb.RemoveContainer(ctr) c.state.containers.Delete(ctr.ID()) } // RemoveInfraContainer removes a container from the container state store func (c *ContainerServer) RemoveInfraContainer(ctr *oci.Container) { - c.stateLock.Lock() - defer c.stateLock.Unlock() c.state.infraContainers.Delete(ctr.ID()) } // listContainers returns a list of all containers stored by the server state func (c *ContainerServer) listContainers() []*oci.Container { - c.stateLock.Lock() - defer c.stateLock.Unlock() return c.state.containers.List() } @@ -692,54 +687,52 @@ func (c *ContainerServer) ListContainers(filters ...func(*oci.Container) bool) ( // AddSandbox adds a sandbox to the sandbox state store func (c *ContainerServer) AddSandbox(sb *sandbox.Sandbox) { + c.state.sandboxes.Add(sb.ID(), sb) + c.stateLock.Lock() - defer c.stateLock.Unlock() - c.state.sandboxes[sb.ID()] = sb + c.state.processLevels[selinux.NewContext(sb.ProcessLabel())["level"]]++ + c.stateLock.Unlock() } // GetSandbox returns a sandbox by its ID func (c *ContainerServer) GetSandbox(id string) *sandbox.Sandbox { - c.stateLock.Lock() - defer c.stateLock.Unlock() - return c.state.sandboxes[id] + return c.state.sandboxes.Get(id) } // GetSandboxContainer returns a sandbox's infra container func (c *ContainerServer) GetSandboxContainer(id string) *oci.Container { - c.stateLock.Lock() - defer c.stateLock.Unlock() - sb, ok := c.state.sandboxes[id] - if !ok { - return nil - } + sb := c.state.sandboxes.Get(id) return sb.InfraContainer() } // HasSandbox checks if a sandbox exists in the state func (c *ContainerServer) HasSandbox(id string) bool { - c.stateLock.Lock() - defer c.stateLock.Unlock() - _, ok := c.state.sandboxes[id] - return ok + return c.state.sandboxes.Get(id) != nil } // RemoveSandbox removes a sandbox from the state store func (c *ContainerServer) RemoveSandbox(id string) { + sb := c.state.sandboxes.Get(id) + processLabel := sb.ProcessLabel() + level := selinux.NewContext(processLabel)["level"] + c.stateLock.Lock() - defer c.stateLock.Unlock() - delete(c.state.sandboxes, id) + pl, ok := c.state.processLevels[level] + if ok { + c.state.processLevels[level] = pl - 1 + if c.state.processLevels[level] == 0 { + label.ReleaseLabel(processLabel) + delete(c.state.processLevels, level) + } + } + c.stateLock.Unlock() + + c.state.sandboxes.Delete(id) } // ListSandboxes lists all sandboxes in the state store func (c *ContainerServer) ListSandboxes() []*sandbox.Sandbox { - c.stateLock.Lock() - defer c.stateLock.Unlock() - sbArray := make([]*sandbox.Sandbox, 0, len(c.state.sandboxes)) - for _, sb := range c.state.sandboxes { - sbArray = append(sbArray, sb) - } - - return sbArray + return c.state.sandboxes.List() } // LibcontainerStats gets the stats for the container with the given id from runc/libcontainer diff --git a/libkpod/hooks.go b/lib/hooks.go similarity index 98% rename from libkpod/hooks.go rename to lib/hooks.go index f353cdcd..fab563f0 100644 --- a/libkpod/hooks.go +++ b/lib/hooks.go @@ -1,4 +1,4 @@ -package libkpod +package lib import ( "encoding/json" @@ -27,6 +27,7 @@ type HookParams struct { Cmds []string `json:"cmd"` Annotations []string `json:"annotation"` HasBindMounts bool `json:"hasbindmounts"` + Arguments []string `json:"arguments"` } // readHook reads hooks json files, verifies it and returns the json config diff --git a/lib/kill.go b/lib/kill.go new file mode 100644 index 00000000..356932f1 --- /dev/null +++ b/lib/kill.go @@ -0,0 +1,45 @@ +package lib + +import ( + "github.com/docker/docker/pkg/signal" + "github.com/kubernetes-incubator/cri-o/oci" + "github.com/kubernetes-incubator/cri-o/utils" + "github.com/pkg/errors" + "os" + "syscall" +) + +// Reverse lookup signal string from its map +func findStringInSignalMap(killSignal syscall.Signal) (string, error) { + for k, v := range signal.SignalMap { + if v == killSignal { + return k, nil + } + } + return "", errors.Errorf("unable to convert signal to string") + +} + +// ContainerKill sends the user provided signal to the containers primary process. +func (c *ContainerServer) ContainerKill(container string, killSignal syscall.Signal) (string, error) { // nolint + ctr, err := c.LookupContainer(container) + if err != nil { + return "", errors.Wrapf(err, "failed to find container %s", container) + } + c.runtime.UpdateStatus(ctr) + cStatus := c.runtime.ContainerStatus(ctr) + + // If the container is not running, error and move on. + if cStatus.Status != oci.ContainerStateRunning { + return "", errors.Errorf("cannot kill container %s: it is not running", container) + } + signalString, err := findStringInSignalMap(killSignal) + if err != nil { + return "", err + } + if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, c.runtime.Path(ctr), "kill", ctr.ID(), signalString); err != nil { + return "", err + } + c.ContainerStateToDisk(ctr) + return ctr.ID(), nil +} diff --git a/libkpod/logs.go b/lib/logs.go similarity index 99% rename from libkpod/logs.go rename to lib/logs.go index 00b0f016..d287b153 100644 --- a/libkpod/logs.go +++ b/lib/logs.go @@ -1,4 +1,4 @@ -package libkpod +package lib import ( "path" diff --git a/lib/pause.go b/lib/pause.go new file mode 100644 index 00000000..70087a3a --- /dev/null +++ b/lib/pause.go @@ -0,0 +1,46 @@ +package lib + +import ( + "github.com/kubernetes-incubator/cri-o/oci" + "github.com/pkg/errors" +) + +// ContainerPause pauses a running container. +func (c *ContainerServer) ContainerPause(container string) (string, error) { + ctr, err := c.LookupContainer(container) + if err != nil { + return "", errors.Wrapf(err, "failed to find container %s", container) + } + + cStatus := c.runtime.ContainerStatus(ctr) + if cStatus.Status != oci.ContainerStatePaused { + if err := c.runtime.PauseContainer(ctr); err != nil { + return "", errors.Wrapf(err, "failed to pause container %s", ctr.ID()) + } + c.ContainerStateToDisk(ctr) + } else { + return "", errors.Wrapf(err, "container %s is already paused", ctr.ID()) + } + + return ctr.ID(), nil +} + +// ContainerUnpause unpauses a running container with a grace period (i.e., timeout). +func (c *ContainerServer) ContainerUnpause(container string) (string, error) { + ctr, err := c.LookupContainer(container) + if err != nil { + return "", errors.Wrapf(err, "failed to find container %s", container) + } + + cStatus := c.runtime.ContainerStatus(ctr) + if cStatus.Status == oci.ContainerStatePaused { + if err := c.runtime.UnpauseContainer(ctr); err != nil { + return "", errors.Wrapf(err, "failed to unpause container %s", ctr.ID()) + } + c.ContainerStateToDisk(ctr) + } else { + return "", errors.Wrapf(err, "the container %s is not paused", ctr.ID()) + } + + return ctr.ID(), nil +} diff --git a/libkpod/remove.go b/lib/remove.go similarity index 72% rename from libkpod/remove.go rename to lib/remove.go index bfe4fef8..e020637f 100644 --- a/libkpod/remove.go +++ b/lib/remove.go @@ -1,4 +1,4 @@ -package libkpod +package lib import ( "os" @@ -6,20 +6,24 @@ import ( "github.com/kubernetes-incubator/cri-o/oci" "github.com/pkg/errors" + "golang.org/x/net/context" ) // Remove removes a container -func (c *ContainerServer) Remove(container string, force bool) (string, error) { +func (c *ContainerServer) Remove(ctx context.Context, container string, force bool) (string, error) { ctr, err := c.LookupContainer(container) if err != nil { return "", err } ctrID := ctr.ID() - cState := c.runtime.ContainerStatus(ctr) - if cState.Status == oci.ContainerStateCreated || cState.Status == oci.ContainerStateRunning { + cStatus := c.runtime.ContainerStatus(ctr) + switch cStatus.Status { + case oci.ContainerStatePaused: + return "", errors.Errorf("cannot remove paused container %s", ctrID) + case oci.ContainerStateCreated, oci.ContainerStateRunning: if force { - _, err = c.ContainerStop(container, -1) + _, err = c.ContainerStop(ctx, container, 10) if err != nil { return "", errors.Wrapf(err, "unable to stop container %s", ctrID) } diff --git a/libkpod/rename.go b/lib/rename.go similarity index 99% rename from libkpod/rename.go rename to lib/rename.go index 7c0279bf..d03c3b13 100644 --- a/libkpod/rename.go +++ b/lib/rename.go @@ -1,4 +1,4 @@ -package libkpod +package lib import ( "encoding/json" diff --git a/lib/sandbox/history.go b/lib/sandbox/history.go new file mode 100644 index 00000000..84d0291d --- /dev/null +++ b/lib/sandbox/history.go @@ -0,0 +1,31 @@ +package sandbox + +import "sort" + +// History is a convenience type for storing a list of sandboxes, +// sorted by creation date in descendant order. +type History []*Sandbox + +// Len returns the number of sandboxes in the history. +func (history *History) Len() int { + return len(*history) +} + +// Less compares two sandboxes and returns true if the second one +// was created before the first one. +func (history *History) Less(i, j int) bool { + sandboxes := *history + // FIXME: state access should be serialized + return sandboxes[j].created.Before(sandboxes[i].created) +} + +// Swap switches sandboxes i and j positions in the history. +func (history *History) Swap(i, j int) { + sandboxes := *history + sandboxes[i], sandboxes[j] = sandboxes[j], sandboxes[i] +} + +// sort orders the history by creation date in descendant order. +func (history *History) sort() { + sort.Sort(history) +} diff --git a/lib/sandbox/memory_store.go b/lib/sandbox/memory_store.go new file mode 100644 index 00000000..17533bf7 --- /dev/null +++ b/lib/sandbox/memory_store.go @@ -0,0 +1,93 @@ +package sandbox + +import "sync" + +// memoryStore implements a Store in memory. +type memoryStore struct { + s map[string]*Sandbox + sync.RWMutex +} + +// NewMemoryStore initializes a new memory store. +func NewMemoryStore() Storer { + return &memoryStore{ + s: make(map[string]*Sandbox), + } +} + +// Add appends a new sandbox to the memory store. +// It overrides the id if it existed before. +func (c *memoryStore) Add(id string, cont *Sandbox) { + c.Lock() + c.s[id] = cont + c.Unlock() +} + +// Get returns a sandbox from the store by id. +func (c *memoryStore) Get(id string) *Sandbox { + var res *Sandbox + c.RLock() + res = c.s[id] + c.RUnlock() + return res +} + +// Delete removes a sandbox from the store by id. +func (c *memoryStore) Delete(id string) { + c.Lock() + delete(c.s, id) + c.Unlock() +} + +// List returns a sorted list of sandboxes from the store. +// The sandboxes are ordered by creation date. +func (c *memoryStore) List() []*Sandbox { + sandboxes := History(c.all()) + sandboxes.sort() + return sandboxes +} + +// Size returns the number of sandboxes in the store. +func (c *memoryStore) Size() int { + c.RLock() + defer c.RUnlock() + return len(c.s) +} + +// First returns the first sandbox found in the store by a given filter. +func (c *memoryStore) First(filter StoreFilter) *Sandbox { + for _, cont := range c.all() { + if filter(cont) { + return cont + } + } + return nil +} + +// ApplyAll calls the reducer function with every sandbox in the store. +// This operation is asynchronous in the memory store. +// NOTE: Modifications to the store MUST NOT be done by the StoreReducer. +func (c *memoryStore) ApplyAll(apply StoreReducer) { + wg := new(sync.WaitGroup) + for _, cont := range c.all() { + wg.Add(1) + go func(sandbox *Sandbox) { + apply(sandbox) + wg.Done() + }(cont) + } + + wg.Wait() +} + +func (c *memoryStore) all() []*Sandbox { + c.RLock() + sandboxes := make([]*Sandbox, 0, len(c.s)) + for _, cont := range c.s { + sandboxes = append(sandboxes, cont) + } + c.RUnlock() + return sandboxes +} + +var _ Storer = &memoryStore{} diff --git a/libkpod/sandbox/sandbox.go b/lib/sandbox/sandbox.go similarity index 94% rename from libkpod/sandbox/sandbox.go rename to lib/sandbox/sandbox.go index 8e84dfb2..7624b072 100644 --- a/libkpod/sandbox/sandbox.go +++ b/lib/sandbox/sandbox.go @@ -7,6 +7,7 @@ import ( "os" "path/filepath" "sync" + "time" "github.com/containernetworking/plugins/pkg/ns" "github.com/docker/docker/pkg/mount" @@ -151,11 +152,14 @@ type Sandbox struct { privileged bool trusted bool resolvPath string + hostnamePath string hostname string portMappings []*hostport.PortMapping stopped bool // ipv4 or ipv6 cache - ip string + ip string + seccompProfilePath string + created time.Time } const ( @@ -200,10 +204,21 @@ func New(id, namespace, name, kubeName, logDir string, labels, annotations map[s sb.resolvPath = resolvPath sb.hostname = hostname sb.portMappings = portMappings + sb.created = time.Now() return sb, nil } +// SetSeccompProfilePath sets the seccomp profile path +func (s *Sandbox) SetSeccompProfilePath(pp string) { + s.seccompProfilePath = pp +} + +// SeccompProfilePath returns the seccomp profile path +func (s *Sandbox) SeccompProfilePath() string { + return s.seccompProfilePath +} + // AddIP stores the ip in the sandbox func (s *Sandbox) AddIP(ip string) { s.ip = ip @@ -301,6 +316,16 @@ func (s *Sandbox) ResolvPath() string { return s.resolvPath } +// AddHostnamePath adds the hostname path to the sandbox +func (s *Sandbox) AddHostnamePath(hostname string) { + s.hostnamePath = hostname +} + +// HostnamePath retrieves the hostname path from a sandbox +func (s *Sandbox) HostnamePath() string { + return s.hostnamePath +} + // Hostname returns the hsotname of the sandbox func (s *Sandbox) Hostname() string { return s.hostname diff --git a/lib/sandbox/store.go b/lib/sandbox/store.go new file mode 100644 index 00000000..83d705cd --- /dev/null +++ b/lib/sandbox/store.go @@ -0,0 +1,27 @@ +package sandbox + +// StoreFilter defines a function to filter +// sandboxes in the store. +type StoreFilter func(*Sandbox) bool + +// StoreReducer defines a function to +// manipulate sandboxes in the store +type StoreReducer func(*Sandbox) + +// Storer defines an interface that any container store must implement. +type Storer interface { + // Add appends a new sandbox to the store. + Add(string, *Sandbox) + // Get returns a sandbox from the store by the identifier it was stored with. + Get(string) *Sandbox + // Delete removes a sandbox from the store by the identifier it was stored with. + Delete(string) + // List returns a list of sandboxes from the store. + List() []*Sandbox + // Size returns the number of sandboxes in the store. + Size() int + // First returns the first sandbox found in the store by a given filter. + First(StoreFilter) *Sandbox + // ApplyAll calls the reducer function with every sandbox in the store. + ApplyAll(StoreReducer) +} diff --git a/libkpod/stats.go b/lib/stats.go similarity index 99% rename from libkpod/stats.go rename to lib/stats.go index f4d645d6..229d8409 100644 --- a/libkpod/stats.go +++ b/lib/stats.go @@ -1,4 +1,4 @@ -package libkpod +package lib import ( "path/filepath" diff --git a/lib/stop.go b/lib/stop.go new file mode 100644 index 00000000..7dbbd066 --- /dev/null +++ b/lib/stop.go @@ -0,0 +1,36 @@ +package lib + +import ( + "github.com/kubernetes-incubator/cri-o/oci" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// ContainerStop stops a running container with a grace period (i.e., timeout). +func (c *ContainerServer) ContainerStop(ctx context.Context, container string, timeout int64) (string, error) { + ctr, err := c.LookupContainer(container) + if err != nil { + return "", errors.Wrapf(err, "failed to find container %s", container) + } + ctrID := ctr.ID() + + cStatus := c.runtime.ContainerStatus(ctr) + switch cStatus.Status { + + case oci.ContainerStatePaused: + return "", errors.Errorf("cannot stop paused container %s", ctrID) + default: + if cStatus.Status != oci.ContainerStateStopped { + if err := c.runtime.StopContainer(ctx, ctr, timeout); err != nil { + return "", errors.Wrapf(err, "failed to stop container %s", ctrID) + } + if err := c.storageRuntimeServer.StopContainer(ctrID); err != nil { + return "", errors.Wrapf(err, "failed to unmount container %s", ctrID) + } + } + } + + c.ContainerStateToDisk(ctr) + + return ctrID, nil +} diff --git a/lib/testdata/config.toml b/lib/testdata/config.toml new file mode 100644 index 00000000..31827367 --- /dev/null +++ b/lib/testdata/config.toml @@ -0,0 +1,28 @@ +[crio] + root = "/var/lib/containers/storage" + runroot = "/var/run/containers/storage" + storage_driver = "overlay2" + log_dir = "/var/log/crio/pods" + file_locking = true + [crio.runtime] + runtime = "/usr/bin/runc" + runtime_untrusted_workload = "" + default_workload_trust = "trusted" + conmon = "/usr/local/libexec/crio/conmon" + conmon_env = ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"] + selinux = true + seccomp_profile = "/etc/crio/seccomp.json" + apparmor_profile = "crio-default" + cgroup_manager = "cgroupfs" + hooks_dir_path = "/usr/share/containers/oci/hooks.d" + pids_limit = 2048 + container_exits_dir = "/var/run/kpod/exits" + [crio.image] + default_transport = "docker://" + pause_image = "kubernetes/pause" + pause_command = "/pause" + signature_policy = "" + image_volumes = "mkdir" + [crio.network] + network_dir = "/etc/cni/net.d/" + plugin_dir = "/opt/cni/bin/" diff --git a/lib/wait.go b/lib/wait.go new file mode 100644 index 00000000..c7b84c04 --- /dev/null +++ b/lib/wait.go @@ -0,0 +1,42 @@ +package lib + +import ( + "github.com/kubernetes-incubator/cri-o/oci" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/util/wait" +) + +func isStopped(c *ContainerServer, ctr *oci.Container) bool { + c.runtime.UpdateStatus(ctr) + cStatus := c.runtime.ContainerStatus(ctr) + if cStatus.Status == oci.ContainerStateStopped { + return true + } + return false +} + +// ContainerWait stops a running container with a grace period (i.e., timeout). +func (c *ContainerServer) ContainerWait(container string) (int32, error) { + ctr, err := c.LookupContainer(container) + if err != nil { + return 0, errors.Wrapf(err, "failed to find container %s", container) + } + + err = wait.PollImmediateInfinite(1, + func() (bool, error) { + if !isStopped(c, ctr) { + return false, nil + } else { // nolint + return true, nil // nolint + } // nolint + + }, + ) + + if err != nil { + return 0, err + } + exitCode := ctr.State().ExitCode + c.ContainerStateToDisk(ctr) + return exitCode, nil +} diff --git a/libkpod/container_data.go b/libkpod/container_data.go deleted file mode 100644 index 21843d40..00000000 --- a/libkpod/container_data.go +++ /dev/null @@ -1,178 +0,0 @@ -package libkpod - -import ( - "encoding/json" - "os" - - "k8s.io/apimachinery/pkg/fields" - pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" - - "github.com/kubernetes-incubator/cri-o/libpod/driver" - "github.com/kubernetes-incubator/cri-o/libpod/images" - "github.com/kubernetes-incubator/cri-o/oci" - "github.com/opencontainers/image-spec/specs-go/v1" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" -) - -// ContainerData handles the data used when inspecting a container -type ContainerData struct { - ID string - Name string - LogPath string - Labels fields.Set - Annotations fields.Set - State *oci.ContainerState - Metadata *pb.ContainerMetadata - BundlePath string - StopSignal string - FromImage string `json:"Image,omitempty"` - FromImageID string `json:"ImageID"` - MountPoint string `json:"Mountpoint,omitempty"` - MountLabel string - Mounts []specs.Mount - AppArmorProfile string - ImageAnnotations map[string]string `json:"Annotations,omitempty"` - ImageCreatedBy string `json:"CreatedBy,omitempty"` - Config v1.ImageConfig `json:"Config,omitempty"` - SizeRw uint `json:"SizeRw,omitempty"` - SizeRootFs uint `json:"SizeRootFs,omitempty"` - Args []string - ResolvConfPath string - HostnamePath string - HostsPath string - GraphDriver driverData -} - -type driverData struct { - Name string - Data map[string]string -} - -// GetContainerData gets the ContainerData for a container with the given name in the given store. -// If size is set to true, it will also determine the size of the container -func (c *ContainerServer) GetContainerData(name string, size bool) (*ContainerData, error) { - ctr, err := c.inspectContainer(name) - if err != nil { - return nil, errors.Wrapf(err, "error reading build container %q", name) - } - container, err := c.store.Container(name) - if err != nil { - return nil, errors.Wrapf(err, "error reading container data") - } - - // The runtime configuration won't exist if the container has never been started by cri-o or kpod, - // so treat a not-exist error as non-fatal. - m := getBlankSpec() - config, err := c.store.FromContainerDirectory(ctr.ID(), "config.json") - if err != nil && !os.IsNotExist(errors.Cause(err)) { - return nil, err - } - if len(config) > 0 { - if err = json.Unmarshal(config, &m); err != nil { - return nil, err - } - } - - if container.ImageID == "" { - return nil, errors.Errorf("error reading container image data: container is not based on an image") - } - imageData, err := images.GetData(c.store, container.ImageID) - if err != nil { - return nil, errors.Wrapf(err, "error reading container image data") - } - - driverName, err := driver.GetDriverName(c.store) - if err != nil { - return nil, err - } - topLayer, err := c.GetContainerTopLayerID(ctr.ID()) - if err != nil { - return nil, err - } - layer, err := c.store.Layer(topLayer) - if err != nil { - return nil, err - } - driverMetadata, err := driver.GetDriverMetadata(c.store, topLayer) - if err != nil { - return nil, err - } - imageName := "" - if len(imageData.Tags) > 0 { - imageName = imageData.Tags[0] - } else if len(imageData.Digests) > 0 { - imageName = imageData.Digests[0] - } - data := &ContainerData{ - ID: ctr.ID(), - Name: ctr.Name(), - LogPath: ctr.LogPath(), - Labels: ctr.Labels(), - Annotations: ctr.Annotations(), - State: ctr.State(), - Metadata: ctr.Metadata(), - BundlePath: ctr.BundlePath(), - StopSignal: ctr.GetStopSignal(), - Args: m.Process.Args, - FromImage: imageName, - FromImageID: container.ImageID, - MountPoint: layer.MountPoint, - ImageAnnotations: imageData.Annotations, - ImageCreatedBy: imageData.CreatedBy, - Config: imageData.Config, - GraphDriver: driverData{ - Name: driverName, - Data: driverMetadata, - }, - MountLabel: m.Linux.MountLabel, - Mounts: m.Mounts, - AppArmorProfile: m.Process.ApparmorProfile, - ResolvConfPath: "", - HostnamePath: "", - HostsPath: "", - } - - if size { - sizeRootFs, err := c.GetContainerRootFsSize(data.ID) - if err != nil { - - return nil, errors.Wrapf(err, "error reading size for container %q", name) - } - data.SizeRootFs = uint(sizeRootFs) - sizeRw, err := c.GetContainerRwSize(data.ID) - if err != nil { - return nil, errors.Wrapf(err, "error reading RWSize for container %q", name) - } - data.SizeRw = uint(sizeRw) - } - - return data, nil -} - -// Get an oci.Container and update its status -func (c *ContainerServer) inspectContainer(container string) (*oci.Container, error) { - ociCtr, err := c.LookupContainer(container) - if err != nil { - return nil, err - } - // call runtime.UpdateStatus() - err = c.Runtime().UpdateStatus(ociCtr) - if err != nil { - return nil, err - } - return ociCtr, nil -} - -func getBlankSpec() specs.Spec { - return specs.Spec{ - Process: &specs.Process{}, - Root: &specs.Root{}, - Mounts: []specs.Mount{}, - Hooks: &specs.Hooks{}, - Annotations: make(map[string]string), - Linux: &specs.Linux{}, - Solaris: &specs.Solaris{}, - Windows: &specs.Windows{}, - } -} diff --git a/libkpod/diff.go b/libkpod/diff.go deleted file mode 100644 index 281592c8..00000000 --- a/libkpod/diff.go +++ /dev/null @@ -1,54 +0,0 @@ -package libkpod - -import ( - "github.com/containers/storage/pkg/archive" - "github.com/kubernetes-incubator/cri-o/libpod/images" - "github.com/kubernetes-incubator/cri-o/libpod/layers" - "github.com/pkg/errors" -) - -// GetDiff returns the differences between the two images, layers, or containers -func (c *ContainerServer) GetDiff(from, to string) ([]archive.Change, error) { - toLayer, err := c.getLayerID(to) - if err != nil { - return nil, err - } - fromLayer := "" - if from != "" { - fromLayer, err = c.getLayerID(from) - if err != nil { - return nil, err - } - } - return c.Store().Changes(fromLayer, toLayer) -} - -// GetLayerID gets a full layer id given a full or partial id -// If the id matches a container or image, the id of the top layer is returned -// If the id matches a layer, the top layer id is returned -func (c *ContainerServer) getLayerID(id string) (string, error) { - var toLayer string - toImage, err := images.FindImage(c.store, id) - if err != nil { - toCtr, err := c.store.Container(id) - if err != nil { - toLayer, err = layers.FullID(c.store, id) - if err != nil { - return "", errors.Errorf("layer, image, or container %s does not exist", id) - } - } else { - toLayer = toCtr.LayerID - } - } else { - toLayer = toImage.TopLayer - } - return toLayer, nil -} - -func (c *ContainerServer) getLayerParent(layerID string) (string, error) { - layer, err := c.store.Layer(layerID) - if err != nil { - return "", err - } - return layer.Parent, nil -} diff --git a/libkpod/stop.go b/libkpod/stop.go deleted file mode 100644 index af7a8c5d..00000000 --- a/libkpod/stop.go +++ /dev/null @@ -1,28 +0,0 @@ -package libkpod - -import ( - "github.com/kubernetes-incubator/cri-o/oci" - "github.com/pkg/errors" -) - -// ContainerStop stops a running container with a grace period (i.e., timeout). -func (c *ContainerServer) ContainerStop(container string, timeout int64) (string, error) { - ctr, err := c.LookupContainer(container) - if err != nil { - return "", errors.Wrapf(err, "failed to find container %s", container) - } - - cStatus := c.runtime.ContainerStatus(ctr) - if cStatus.Status != oci.ContainerStateStopped { - if err := c.runtime.StopContainer(ctr, timeout); err != nil { - return "", errors.Wrapf(err, "failed to stop container %s", ctr.ID()) - } - if err := c.storageRuntimeServer.StopContainer(ctr.ID()); err != nil { - return "", errors.Wrapf(err, "failed to unmount container %s", ctr.ID()) - } - } - - c.ContainerStateToDisk(ctr) - - return ctr.ID(), nil -} diff --git a/libpod/common/common.go b/libpod/common/common.go deleted file mode 100644 index ac75a7a7..00000000 --- a/libpod/common/common.go +++ /dev/null @@ -1,98 +0,0 @@ -package common - -import ( - "io" - "strings" - "syscall" - - cp "github.com/containers/image/copy" - "github.com/containers/image/signature" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -var ( - // ErrNoPassword is returned if the user did not supply a password - ErrNoPassword = errors.Wrapf(syscall.EINVAL, "password was not supplied") -) - -// GetCopyOptions constructs a new containers/image/copy.Options{} struct from the given parameters -func GetCopyOptions(reportWriter io.Writer, signaturePolicyPath string, srcDockerRegistry, destDockerRegistry *DockerRegistryOptions, signing SigningOptions) *cp.Options { - if srcDockerRegistry == nil { - srcDockerRegistry = &DockerRegistryOptions{} - } - if destDockerRegistry == nil { - destDockerRegistry = &DockerRegistryOptions{} - } - srcContext := srcDockerRegistry.GetSystemContext(signaturePolicyPath) - destContext := destDockerRegistry.GetSystemContext(signaturePolicyPath) - return &cp.Options{ - RemoveSignatures: signing.RemoveSignatures, - SignBy: signing.SignBy, - ReportWriter: reportWriter, - SourceCtx: srcContext, - DestinationCtx: destContext, - } -} - -// GetSystemContext Constructs a new containers/image/types.SystemContext{} struct from the given signaturePolicy path -func GetSystemContext(signaturePolicyPath string) *types.SystemContext { - sc := &types.SystemContext{} - if signaturePolicyPath != "" { - sc.SignaturePolicyPath = signaturePolicyPath - } - return sc -} - -// CopyStringStringMap deep copies a map[string]string and returns the result -func CopyStringStringMap(m map[string]string) map[string]string { - n := map[string]string{} - for k, v := range m { - n[k] = v - } - return n -} - -// IsTrue determines whether the given string equals "true" -func IsTrue(str string) bool { - return str == "true" -} - -// IsFalse determines whether the given string equals "false" -func IsFalse(str string) bool { - return str == "false" -} - -// IsValidBool determines whether the given string equals "true" or "false" -func IsValidBool(str string) bool { - return IsTrue(str) || IsFalse(str) -} - -// GetPolicyContext creates a signature policy context for the given signature policy path -func GetPolicyContext(path string) (*signature.PolicyContext, error) { - policy, err := signature.DefaultPolicy(&types.SystemContext{SignaturePolicyPath: path}) - if err != nil { - return nil, err - } - return signature.NewPolicyContext(policy) -} - -// ParseRegistryCreds takes a credentials string in the form USERNAME:PASSWORD -// and returns a DockerAuthConfig -func ParseRegistryCreds(creds string) (*types.DockerAuthConfig, error) { - if creds == "" { - return nil, errors.New("no credentials supplied") - } - if !strings.Contains(creds, ":") { - return &types.DockerAuthConfig{ - Username: creds, - Password: "", - }, ErrNoPassword - } - v := strings.SplitN(creds, ":", 2) - cfg := &types.DockerAuthConfig{ - Username: v[0], - Password: v[1], - } - return cfg, nil -} diff --git a/libpod/common/docker_registry_options.go b/libpod/common/docker_registry_options.go deleted file mode 100644 index fdbaa059..00000000 --- a/libpod/common/docker_registry_options.go +++ /dev/null @@ -1,33 +0,0 @@ -package common - -import "github.com/containers/image/types" - -// DockerRegistryOptions encapsulates settings that affect how we connect or -// authenticate to a remote registry. -type DockerRegistryOptions struct { - // DockerRegistryCreds is the user name and password to supply in case - // we need to pull an image from a registry, and it requires us to - // authenticate. - DockerRegistryCreds *types.DockerAuthConfig - // DockerCertPath is the location of a directory containing CA - // certificates which will be used to verify the registry's certificate - // (all files with names ending in ".crt"), and possibly client - // certificates and private keys (pairs of files with the same name, - // except for ".cert" and ".key" suffixes). - DockerCertPath string - // DockerInsecureSkipTLSVerify turns off verification of TLS - // certificates and allows connecting to registries without encryption. - DockerInsecureSkipTLSVerify bool -} - -// GetSystemContext constructs a new system context from the given signaturePolicy path and the -// values in the DockerRegistryOptions -func (o DockerRegistryOptions) GetSystemContext(signaturePolicyPath string) *types.SystemContext { - sc := &types.SystemContext{ - SignaturePolicyPath: signaturePolicyPath, - DockerAuthConfig: o.DockerRegistryCreds, - DockerCertPath: o.DockerCertPath, - DockerInsecureSkipTLSVerify: o.DockerInsecureSkipTLSVerify, - } - return sc -} diff --git a/libpod/common/output_interfaces.go b/libpod/common/output_interfaces.go deleted file mode 100644 index 805d0c79..00000000 --- a/libpod/common/output_interfaces.go +++ /dev/null @@ -1 +0,0 @@ -package common diff --git a/libpod/common/signing_options.go b/libpod/common/signing_options.go deleted file mode 100644 index b7e14be8..00000000 --- a/libpod/common/signing_options.go +++ /dev/null @@ -1,10 +0,0 @@ -package common - -// SigningOptions encapsulates settings that control whether or not we strip or -// add signatures to images when writing them. -type SigningOptions struct { - // RemoveSignatures directs us to remove any signatures which are already present. - RemoveSignatures bool - // SignBy is a key identifier of some kind, indicating that a signature should be generated using the specified private key and stored with the image. - SignBy string -} diff --git a/libpod/ctr/container.go b/libpod/ctr/container.go deleted file mode 100644 index 3e5ed146..00000000 --- a/libpod/ctr/container.go +++ /dev/null @@ -1,77 +0,0 @@ -package ctr - -import ( - "fmt" - - "github.com/containers/storage" -) - -var ( - // ErrNotImplemented indicates that functionality is not yet implemented - ErrNotImplemented = fmt.Errorf("NOT IMPLEMENTED") -) - -// Container is a single OCI container -type Container struct { - // TODO populate -} - -// Create creates a container in the OCI runtime -func (c *Container) Create() error { - return ErrNotImplemented -} - -// Start starts a container -func (c *Container) Start() error { - return ErrNotImplemented -} - -// Stop stops a container -func (c *Container) Stop() error { - return ErrNotImplemented -} - -// Kill sends a signal to a container -func (c *Container) Kill(signal uint) error { - return ErrNotImplemented -} - -// Exec starts a new process inside the container -// Returns fully qualified URL of streaming server for executed process -func (c *Container) Exec(cmd []string, tty bool, stdin bool) (string, error) { - return "", ErrNotImplemented -} - -// Attach attaches to a container -// Returns fully qualified URL of streaming server for the container -func (c *Container) Attach(stdin, tty bool) (string, error) { - return "", ErrNotImplemented -} - -// Mount mounts a container's filesystem on the host -// The path where the container has been mounted is returned -func (c *Container) Mount() (string, error) { - return "", ErrNotImplemented -} - -// Status gets a container's status -// TODO this should return relevant information about container state -func (c *Container) Status() error { - return ErrNotImplemented -} - -// Export exports a container's root filesystem as a tar archive -// The archive will be saved as a file at the given path -func (c *Container) Export(path string) error { - return ErrNotImplemented -} - -// Commit commits the changes between a container and its image, creating a new -// image -// If the container was not created from an image (for example, -// WithRootFSFromPath will create a container from a directory on the system), -// a new base image will be created from the contents of the container's -// filesystem -func (c *Container) Commit() (*storage.Image, error) { - return nil, ErrNotImplemented -} diff --git a/libpod/driver/driver.go b/libpod/driver/driver.go deleted file mode 100644 index 4db55852..00000000 --- a/libpod/driver/driver.go +++ /dev/null @@ -1,27 +0,0 @@ -package driver - -import cstorage "github.com/containers/storage" - -// Data handles the data for a storage driver -type Data struct { - Name string - Data map[string]string -} - -// GetDriverName returns the name of the driver for the given store -func GetDriverName(store cstorage.Store) (string, error) { - driver, err := store.GraphDriver() - if err != nil { - return "", err - } - return driver.String(), nil -} - -// GetDriverMetadata returns the metadata regarding the driver for the layer in the given store -func GetDriverMetadata(store cstorage.Store, layerID string) (map[string]string, error) { - driver, err := store.GraphDriver() - if err != nil { - return nil, err - } - return driver.Metadata(layerID) -} diff --git a/libpod/image.go b/libpod/image.go deleted file mode 100644 index cbc52d49..00000000 --- a/libpod/image.go +++ /dev/null @@ -1,218 +0,0 @@ -package libpod - -import ( - "io" - "strings" - "syscall" - - cp "github.com/containers/image/copy" - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/manifest" - "github.com/containers/image/signature" - is "github.com/containers/image/storage" - "github.com/containers/image/transports/alltransports" - "github.com/containers/storage" - "github.com/containers/storage/pkg/archive" - "github.com/kubernetes-incubator/cri-o/libpod/common" - "github.com/kubernetes-incubator/cri-o/libpod/ctr" - "github.com/kubernetes-incubator/cri-o/libpod/images" - "github.com/pkg/errors" -) - -// Runtime API - -const ( - // DefaultRegistry is a prefix that we apply to an image name - // to check docker hub first for the image - DefaultRegistry = "docker://" -) - -// CopyOptions contains the options given when pushing or pulling images -type CopyOptions struct { - // Compression specifies the type of compression which is applied to - // layer blobs. The default is to not use compression, but - // archive.Gzip is recommended. - Compression archive.Compression - // DockerRegistryOptions encapsulates settings that affect how we - // connect or authenticate to a remote registry to which we want to - // push the image. - common.DockerRegistryOptions - // SigningOptions encapsulates settings that control whether or not we - // strip or add signatures to the image when pushing (uploading) the - // image to a registry. - common.SigningOptions -} - -// Image API - -// ImageFilter is a function to determine whether an image is included in -// command output. Images to be outputted are tested using the function. A true -// return will include the image, a false return will exclude it. -type ImageFilter func(*storage.Image) bool - -// PullImage pulls an image from configured registries -// By default, only the latest tag (or a specific tag if requested) will be -// pulled. If allTags is true, all tags for the requested image will be pulled. -// Signature validation will be performed if the Runtime has been appropriately -// configured -func (r *Runtime) PullImage(imgName string, allTags bool, reportWriter io.Writer) error { - // PullImage copies the image from the source to the destination - var ( - images []string - ) - - srcRef, err := alltransports.ParseImageName(imgName) - if err != nil { - defaultName := DefaultRegistry + imgName - srcRef2, err2 := alltransports.ParseImageName(defaultName) - if err2 != nil { - return errors.Errorf("error parsing image name %q: %v", defaultName, err2) - } - srcRef = srcRef2 - } - - splitArr := strings.Split(imgName, ":") - - // supports pulling from docker-archive, oci, and registries - if splitArr[0] == "docker-archive" { - tarSource := tarfile.NewSource(splitArr[len(splitArr)-1]) - manifest, err := tarSource.LoadTarManifest() - if err != nil { - return errors.Errorf("error retrieving manifest.json: %v", err) - } - // to pull all the images stored in one tar file - for i := range manifest { - if manifest[i].RepoTags != nil { - images = append(images, manifest[i].RepoTags[0]) - } else { - // create an image object and use the hex value of the digest as the image ID - // for parsing the store reference - newImg, err := srcRef.NewImage(r.imageContext) - if err != nil { - return err - } - defer newImg.Close() - digest := newImg.ConfigInfo().Digest - if err := digest.Validate(); err == nil { - images = append(images, "@"+digest.Hex()) - } else { - return errors.Wrapf(err, "error getting config info") - } - } - } - } else if splitArr[0] == "oci" { - // needs to be implemented in future - return errors.Errorf("oci not supported") - } else { - images = append(images, imgName) - } - - policy, err := signature.DefaultPolicy(r.imageContext) - if err != nil { - return err - } - - policyContext, err := signature.NewPolicyContext(policy) - if err != nil { - return err - } - defer policyContext.Destroy() - - copyOptions := common.GetCopyOptions(reportWriter, "", nil, nil, common.SigningOptions{}) - - for _, image := range images { - destRef, err := is.Transport.ParseStoreReference(r.store, image) - if err != nil { - return errors.Errorf("error parsing dest reference name: %v", err) - } - if err = cp.Image(policyContext, destRef, srcRef, copyOptions); err != nil { - return errors.Errorf("error loading image %q: %v", image, err) - } - } - return nil -} - -// PushImage pushes the given image to a location described by the given path -func (r *Runtime) PushImage(source string, destination string, options CopyOptions, reportWriter io.Writer) error { - // PushImage pushes the src image to the destination - //func PushImage(source, destination string, options CopyOptions) error { - if source == "" || destination == "" { - return errors.Wrapf(syscall.EINVAL, "source and destination image names must be specified") - } - - // Get the destination Image Reference - dest, err := alltransports.ParseImageName(destination) - if err != nil { - return errors.Wrapf(err, "error getting destination imageReference for %q", destination) - } - - policyContext, err := common.GetPolicyContext(r.GetConfig().SignaturePolicyPath) - if err != nil { - return errors.Wrapf(err, "Could not get default policy context for signature policy path %q", r.GetConfig().SignaturePolicyPath) - } - defer policyContext.Destroy() - // Look up the image name and its layer, then build the imagePushData from - // the image - img, err := images.FindImage(r.store, source) - if err != nil { - return errors.Wrapf(err, "error locating image %q for importing settings", source) - } - cd, err := images.ImportCopyDataFromImage(r.store, r.imageContext, img.ID, "", "") - if err != nil { - return err - } - // Give the image we're producing the same ancestors as its source image - cd.FromImage = cd.Docker.ContainerConfig.Image - cd.FromImageID = string(cd.Docker.Parent) - - // Prep the layers and manifest for export - src, err := cd.MakeImageRef(manifest.GuessMIMEType(cd.Manifest), options.Compression, img.Names, img.TopLayer, nil) - if err != nil { - return errors.Wrapf(err, "error copying layers and metadata") - } - - copyOptions := common.GetCopyOptions(reportWriter, r.GetConfig().SignaturePolicyPath, nil, &options.DockerRegistryOptions, options.SigningOptions) - - // Copy the image to the remote destination - err = cp.Image(policyContext, dest, src, copyOptions) - if err != nil { - return errors.Wrapf(err, "Error copying image to the remote destination") - } - return nil -} - -// TagImage adds a tag to the given image -func (r *Runtime) TagImage(image *storage.Image, tag string) error { - return ctr.ErrNotImplemented -} - -// UntagImage removes a tag from the given image -func (r *Runtime) UntagImage(image *storage.Image, tag string) error { - return ctr.ErrNotImplemented -} - -// RemoveImage deletes an image from local storage -// Images being used by running containers cannot be removed -func (r *Runtime) RemoveImage(image *storage.Image) error { - return ctr.ErrNotImplemented -} - -// GetImage retrieves an image matching the given name or hash from system -// storage -// If no matching image can be found, an error is returned -func (r *Runtime) GetImage(image string) (*storage.Image, error) { - return nil, ctr.ErrNotImplemented -} - -// GetImages retrieves all images present in storage -// Filters can be provided which will determine which images are included in the -// output. Multiple filters are handled by ANDing their output, so only images -// matching all filters are included -func (r *Runtime) GetImages(filter ...ImageFilter) ([]*storage.Image, error) { - return nil, ctr.ErrNotImplemented -} - -// ImportImage imports an OCI format image archive into storage as an image -func (r *Runtime) ImportImage(path string) (*storage.Image, error) { - return nil, ctr.ErrNotImplemented -} diff --git a/libpod/images/copy.go b/libpod/images/copy.go deleted file mode 100644 index 8a9688e0..00000000 --- a/libpod/images/copy.go +++ /dev/null @@ -1,185 +0,0 @@ -package images - -import ( - "io" - "os" - "strings" - "syscall" - - cp "github.com/containers/image/copy" - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/manifest" - "github.com/containers/image/signature" - is "github.com/containers/image/storage" - "github.com/containers/image/transports/alltransports" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/archive" - "github.com/kubernetes-incubator/cri-o/libpod/common" - "github.com/pkg/errors" -) - -const ( - // DefaultRegistry is a prefix that we apply to an image name - // to check docker hub first for the image - DefaultRegistry = "docker://" -) - -// CopyOptions contains the options given when pushing or pulling images -type CopyOptions struct { - // Compression specifies the type of compression which is applied to - // layer blobs. The default is to not use compression, but - // archive.Gzip is recommended. - Compression archive.Compression - // SignaturePolicyPath specifies an override location for the signature - // policy which should be used for verifying the new image as it is - // being written. Except in specific circumstances, no value should be - // specified, indicating that the shared, system-wide default policy - // should be used. - SignaturePolicyPath string - // ReportWriter is an io.Writer which will be used to log the writing - // of the new image. - ReportWriter io.Writer - // Store is the local storage store which holds the source image. - Store storage.Store - // DockerRegistryOptions encapsulates settings that affect how we - // connect or authenticate to a remote registry to which we want to - // push the image. - common.DockerRegistryOptions - // SigningOptions encapsulates settings that control whether or not we - // strip or add signatures to the image when pushing (uploading) the - // image to a registry. - common.SigningOptions -} - -// PushImage pushes the src image to the destination -func PushImage(srcName, destName string, options CopyOptions) error { - if srcName == "" || destName == "" { - return errors.Wrapf(syscall.EINVAL, "source and destination image names must be specified") - } - - // Get the destination Image Reference - dest, err := alltransports.ParseImageName(destName) - if err != nil { - return errors.Wrapf(err, "error getting destination imageReference for %q", destName) - } - - policyContext, err := common.GetPolicyContext(options.SignaturePolicyPath) - if err != nil { - return errors.Wrapf(err, "Could not get default policy context for signature policy path %q", options.SignaturePolicyPath) - } - defer policyContext.Destroy() - // Look up the image name and its layer, then build the imagePushData from - // the image - img, err := FindImage(options.Store, srcName) - if err != nil { - return errors.Wrapf(err, "error locating image %q for importing settings", srcName) - } - systemContext := common.GetSystemContext(options.SignaturePolicyPath) - cd, err := ImportCopyDataFromImage(options.Store, systemContext, img.ID, "", "") - if err != nil { - return err - } - // Give the image we're producing the same ancestors as its source image - cd.FromImage = cd.Docker.ContainerConfig.Image - cd.FromImageID = string(cd.Docker.Parent) - - // Prep the layers and manifest for export - src, err := cd.MakeImageRef(manifest.GuessMIMEType(cd.Manifest), options.Compression, img.Names, img.TopLayer, nil) - if err != nil { - return errors.Wrapf(err, "error copying layers and metadata") - } - - copyOptions := common.GetCopyOptions(options.ReportWriter, options.SignaturePolicyPath, nil, &options.DockerRegistryOptions, options.SigningOptions) - - // Copy the image to the remote destination - err = cp.Image(policyContext, dest, src, copyOptions) - if err != nil { - return errors.Wrapf(err, "Error copying image to the remote destination") - } - return nil -} - -// PullImage copies the image from the source to the destination -func PullImage(store storage.Store, imgName string, allTags, quiet bool, sc *types.SystemContext) error { - var ( - images []string - output io.Writer - ) - - if quiet { - output = nil - } else { - output = os.Stdout - } - - srcRef, err := alltransports.ParseImageName(imgName) - if err != nil { - defaultName := DefaultRegistry + imgName - srcRef2, err2 := alltransports.ParseImageName(defaultName) - if err2 != nil { - return errors.Errorf("error parsing image name %q: %v", defaultName, err2) - } - srcRef = srcRef2 - } - - splitArr := strings.Split(imgName, ":") - - // supports pulling from docker-archive, oci, and registries - if splitArr[0] == "docker-archive" { - tarSource := tarfile.NewSource(splitArr[len(splitArr)-1]) - manifest, err := tarSource.LoadTarManifest() - if err != nil { - return errors.Errorf("error retrieving manifest.json: %v", err) - } - // to pull all the images stored in one tar file - for i := range manifest { - if manifest[i].RepoTags != nil { - images = append(images, manifest[i].RepoTags[0]) - } else { - // create an image object and use the hex value of the digest as the image ID - // for parsing the store reference - newImg, err := srcRef.NewImage(sc) - if err != nil { - return err - } - defer newImg.Close() - digest := newImg.ConfigInfo().Digest - if err := digest.Validate(); err == nil { - images = append(images, "@"+digest.Hex()) - } else { - return errors.Wrapf(err, "error getting config info") - } - } - } - } else if splitArr[0] == "oci" { - // needs to be implemented in future - return errors.Errorf("oci not supported") - } else { - images = append(images, imgName) - } - - policy, err := signature.DefaultPolicy(sc) - if err != nil { - return err - } - - policyContext, err := signature.NewPolicyContext(policy) - if err != nil { - return err - } - defer policyContext.Destroy() - - copyOptions := common.GetCopyOptions(output, "", nil, nil, common.SigningOptions{}) - - for _, image := range images { - destRef, err := is.Transport.ParseStoreReference(store, image) - if err != nil { - return errors.Errorf("error parsing dest reference name: %v", err) - } - if err = cp.Image(policyContext, destRef, srcRef, copyOptions); err != nil { - return errors.Errorf("error loading image %q: %v", image, err) - } - } - return nil -} diff --git a/libpod/images/copy_data.go b/libpod/images/copy_data.go deleted file mode 100644 index 92cd3256..00000000 --- a/libpod/images/copy_data.go +++ /dev/null @@ -1,552 +0,0 @@ -package images - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "time" - - "github.com/containers/image/docker/reference" - is "github.com/containers/image/storage" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/archive" - "github.com/docker/docker/pkg/ioutils" - "github.com/kubernetes-incubator/cri-o/cmd/kpod/docker" - "github.com/kubernetes-incubator/cri-o/libpod/common" - digest "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/specs-go/v1" - ociv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -const ( - // Package is used to identify working containers - Package = "kpod" - containerType = Package + " 0.0.1" - stateFile = Package + ".json" - // OCIv1ImageManifest is the MIME type of an OCIv1 image manifest, - // suitable for specifying as a value of the PreferredManifestType - // member of a CommitOptions structure. It is also the default. - OCIv1ImageManifest = v1.MediaTypeImageManifest -) - -// CopyData stores the basic data used when copying a container or image -type CopyData struct { - store storage.Store - - // Type is used to help identify a build container's metadata. It - // should not be modified. - Type string `json:"type"` - // FromImage is the name of the source image which was used to create - // the container, if one was used. It should not be modified. - FromImage string `json:"image,omitempty"` - // FromImageID is the ID of the source image which was used to create - // the container, if one was used. It should not be modified. - FromImageID string `json:"image-id"` - // Config is the source image's configuration. It should not be - // modified. - Config []byte `json:"config,omitempty"` - // Manifest is the source image's manifest. It should not be modified. - Manifest []byte `json:"manifest,omitempty"` - - // Container is the name of the build container. It should not be modified. - Container string `json:"container-name,omitempty"` - // ContainerID is the ID of the build container. It should not be modified. - ContainerID string `json:"container-id,omitempty"` - // MountPoint is the last location where the container's root - // filesystem was mounted. It should not be modified. - MountPoint string `json:"mountpoint,omitempty"` - - // ImageAnnotations is a set of key-value pairs which is stored in the - // image's manifest. - ImageAnnotations map[string]string `json:"annotations,omitempty"` - // ImageCreatedBy is a description of how this container was built. - ImageCreatedBy string `json:"created-by,omitempty"` - - // Image metadata and runtime settings, in multiple formats. - OCIv1 v1.Image `json:"ociv1,omitempty"` - Docker docker.V2Image `json:"docker,omitempty"` -} - -func (c *CopyData) initConfig() { - image := ociv1.Image{} - dimage := docker.V2Image{} - if len(c.Config) > 0 { - // Try to parse the image config. If we fail, try to start over from scratch - if err := json.Unmarshal(c.Config, &dimage); err == nil && dimage.DockerVersion != "" { - image, err = makeOCIv1Image(&dimage) - if err != nil { - image = ociv1.Image{} - } - } else { - if err := json.Unmarshal(c.Config, &image); err != nil { - if dimage, err = makeDockerV2S2Image(&image); err != nil { - dimage = docker.V2Image{} - } - } - } - c.OCIv1 = image - c.Docker = dimage - } else { - // Try to dig out the image configuration from the manifest - manifest := docker.V2S1Manifest{} - if err := json.Unmarshal(c.Manifest, &manifest); err == nil && manifest.SchemaVersion == 1 { - if dimage, err = makeDockerV2S1Image(manifest); err == nil { - if image, err = makeOCIv1Image(&dimage); err != nil { - image = ociv1.Image{} - } - } - } - c.OCIv1 = image - c.Docker = dimage - } - - if len(c.Manifest) > 0 { - // Attempt to recover format-specific data from the manifest - v1Manifest := ociv1.Manifest{} - if json.Unmarshal(c.Manifest, &v1Manifest) == nil { - c.ImageAnnotations = v1Manifest.Annotations - } - } - - c.fixupConfig() -} - -func (c *CopyData) fixupConfig() { - if c.Docker.Config != nil { - // Prefer image-level settings over those from the container it was built from - c.Docker.ContainerConfig = *c.Docker.Config - } - c.Docker.Config = &c.Docker.ContainerConfig - c.Docker.DockerVersion = "" - now := time.Now().UTC() - if c.Docker.Created.IsZero() { - c.Docker.Created = now - } - if c.OCIv1.Created.IsZero() { - c.OCIv1.Created = &now - } - if c.OS() == "" { - c.SetOS(runtime.GOOS) - } - if c.Architecture() == "" { - c.SetArchitecture(runtime.GOARCH) - } - if c.WorkDir() == "" { - c.SetWorkDir(string(filepath.Separator)) - } -} - -// OS returns a name of the OS on which a container built using this image -//is intended to be run. -func (c *CopyData) OS() string { - return c.OCIv1.OS -} - -// SetOS sets the name of the OS on which a container built using this image -// is intended to be run. -func (c *CopyData) SetOS(os string) { - c.OCIv1.OS = os - c.Docker.OS = os -} - -// Architecture returns a name of the architecture on which a container built -// using this image is intended to be run. -func (c *CopyData) Architecture() string { - return c.OCIv1.Architecture -} - -// SetArchitecture sets the name of the architecture on which ta container built -// using this image is intended to be run. -func (c *CopyData) SetArchitecture(arch string) { - c.OCIv1.Architecture = arch - c.Docker.Architecture = arch -} - -// WorkDir returns the default working directory for running commands in a container -// built using this image. -func (c *CopyData) WorkDir() string { - return c.OCIv1.Config.WorkingDir -} - -// SetWorkDir sets the location of the default working directory for running commands -// in a container built using this image. -func (c *CopyData) SetWorkDir(there string) { - c.OCIv1.Config.WorkingDir = there - c.Docker.Config.WorkingDir = there -} - -// makeOCIv1Image builds the best OCIv1 image structure we can from the -// contents of the docker image structure. -func makeOCIv1Image(dimage *docker.V2Image) (ociv1.Image, error) { - config := dimage.Config - if config == nil { - config = &dimage.ContainerConfig - } - dimageCreatedTime := dimage.Created.UTC() - image := ociv1.Image{ - Created: &dimageCreatedTime, - Author: dimage.Author, - Architecture: dimage.Architecture, - OS: dimage.OS, - Config: ociv1.ImageConfig{ - User: config.User, - ExposedPorts: map[string]struct{}{}, - Env: config.Env, - Entrypoint: config.Entrypoint, - Cmd: config.Cmd, - Volumes: config.Volumes, - WorkingDir: config.WorkingDir, - Labels: config.Labels, - }, - RootFS: ociv1.RootFS{ - Type: "", - DiffIDs: []digest.Digest{}, - }, - History: []ociv1.History{}, - } - for port, what := range config.ExposedPorts { - image.Config.ExposedPorts[string(port)] = what - } - RootFS := docker.V2S2RootFS{} - if dimage.RootFS != nil { - RootFS = *dimage.RootFS - } - if RootFS.Type == docker.TypeLayers { - image.RootFS.Type = docker.TypeLayers - for _, id := range RootFS.DiffIDs { - image.RootFS.DiffIDs = append(image.RootFS.DiffIDs, digest.Digest(id.String())) - } - } - for _, history := range dimage.History { - historyCreatedTime := history.Created.UTC() - ohistory := ociv1.History{ - Created: &historyCreatedTime, - CreatedBy: history.CreatedBy, - Author: history.Author, - Comment: history.Comment, - EmptyLayer: history.EmptyLayer, - } - image.History = append(image.History, ohistory) - } - return image, nil -} - -// makeDockerV2S2Image builds the best docker image structure we can from the -// contents of the OCI image structure. -func makeDockerV2S2Image(oimage *ociv1.Image) (docker.V2Image, error) { - image := docker.V2Image{ - V1Image: docker.V1Image{Created: oimage.Created.UTC(), - Author: oimage.Author, - Architecture: oimage.Architecture, - OS: oimage.OS, - ContainerConfig: docker.Config{ - User: oimage.Config.User, - ExposedPorts: docker.PortSet{}, - Env: oimage.Config.Env, - Entrypoint: oimage.Config.Entrypoint, - Cmd: oimage.Config.Cmd, - Volumes: oimage.Config.Volumes, - WorkingDir: oimage.Config.WorkingDir, - Labels: oimage.Config.Labels, - }, - }, - RootFS: &docker.V2S2RootFS{ - Type: "", - DiffIDs: []digest.Digest{}, - }, - History: []docker.V2S2History{}, - } - for port, what := range oimage.Config.ExposedPorts { - image.ContainerConfig.ExposedPorts[docker.Port(port)] = what - } - if oimage.RootFS.Type == docker.TypeLayers { - image.RootFS.Type = docker.TypeLayers - for _, id := range oimage.RootFS.DiffIDs { - d, err := digest.Parse(id.String()) - if err != nil { - return docker.V2Image{}, err - } - image.RootFS.DiffIDs = append(image.RootFS.DiffIDs, d) - } - } - for _, history := range oimage.History { - dhistory := docker.V2S2History{ - Created: history.Created.UTC(), - CreatedBy: history.CreatedBy, - Author: history.Author, - Comment: history.Comment, - EmptyLayer: history.EmptyLayer, - } - image.History = append(image.History, dhistory) - } - image.Config = &image.ContainerConfig - return image, nil -} - -// makeDockerV2S1Image builds the best docker image structure we can from the -// contents of the V2S1 image structure. -func makeDockerV2S1Image(manifest docker.V2S1Manifest) (docker.V2Image, error) { - // Treat the most recent (first) item in the history as a description of the image. - if len(manifest.History) == 0 { - return docker.V2Image{}, errors.Errorf("error parsing image configuration from manifest") - } - dimage := docker.V2Image{} - err := json.Unmarshal([]byte(manifest.History[0].V1Compatibility), &dimage) - if err != nil { - return docker.V2Image{}, err - } - if dimage.DockerVersion == "" { - return docker.V2Image{}, errors.Errorf("error parsing image configuration from history") - } - // The DiffID list is intended to contain the sums of _uncompressed_ blobs, and these are most - // likely compressed, so leave the list empty to avoid potential confusion later on. We can - // construct a list with the correct values when we prep layers for pushing, so we don't lose. - // information by leaving this part undone. - rootFS := &docker.V2S2RootFS{ - Type: docker.TypeLayers, - DiffIDs: []digest.Digest{}, - } - // Build a filesystem history. - history := []docker.V2S2History{} - for i := range manifest.History { - h := docker.V2S2History{ - Created: time.Now().UTC(), - Author: "", - CreatedBy: "", - Comment: "", - EmptyLayer: false, - } - dcompat := docker.V1Compatibility{} - if err2 := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), &dcompat); err2 == nil { - h.Created = dcompat.Created.UTC() - h.Author = dcompat.Author - h.Comment = dcompat.Comment - if len(dcompat.ContainerConfig.Cmd) > 0 { - h.CreatedBy = fmt.Sprintf("%v", dcompat.ContainerConfig.Cmd) - } - h.EmptyLayer = dcompat.ThrowAway - } - // Prepend this layer to the list, because a v2s1 format manifest's list is in reverse order - // compared to v2s2, which lists earlier layers before later ones. - history = append([]docker.V2S2History{h}, history...) - } - dimage.RootFS = rootFS - dimage.History = history - return dimage, nil -} - -// Annotations gets the anotations of the container or image -func (c *CopyData) Annotations() map[string]string { - return common.CopyStringStringMap(c.ImageAnnotations) -} - -// Save the CopyData to disk -func (c *CopyData) Save() error { - buildstate, err := json.Marshal(c) - if err != nil { - return err - } - cdir, err := c.store.ContainerDirectory(c.ContainerID) - if err != nil { - return err - } - return ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0600) - -} - -// GetContainerCopyData gets the copy data for a container -func GetContainerCopyData(store storage.Store, name string) (*CopyData, error) { - var data *CopyData - var err error - if name != "" { - data, err = openCopyData(store, name) - if os.IsNotExist(errors.Cause(err)) { - data, err = importCopyData(store, name, "") - } - } - if err != nil { - return nil, errors.Wrapf(err, "error reading build container") - } - if data == nil { - return nil, errors.Errorf("error finding build container") - } - return data, nil - -} - -// GetImageCopyData gets the copy data for an image -func GetImageCopyData(store storage.Store, image string) (*CopyData, error) { - if image == "" { - return nil, errors.Errorf("image name must be specified") - } - img, err := FindImage(store, image) - if err != nil { - return nil, errors.Wrapf(err, "error locating image %q for importing settings", image) - } - - systemContext := common.GetSystemContext("") - data, err := ImportCopyDataFromImage(store, systemContext, img.ID, "", "") - if err != nil { - return nil, errors.Wrapf(err, "error reading image") - } - if data == nil { - return nil, errors.Errorf("error mocking up build configuration") - } - return data, nil - -} - -func importCopyData(store storage.Store, container, signaturePolicyPath string) (*CopyData, error) { - if container == "" { - return nil, errors.Errorf("container name must be specified") - } - - c, err := store.Container(container) - if err != nil { - return nil, err - } - - systemContext := common.GetSystemContext(signaturePolicyPath) - - data, err := ImportCopyDataFromImage(store, systemContext, c.ImageID, container, c.ID) - if err != nil { - return nil, err - } - - if data.FromImageID != "" { - if d, err2 := digest.Parse(data.FromImageID); err2 == nil { - data.Docker.Parent = docker.ID(d) - } else { - data.Docker.Parent = docker.ID(digest.NewDigestFromHex(digest.Canonical.String(), data.FromImageID)) - } - } - if data.FromImage != "" { - data.Docker.ContainerConfig.Image = data.FromImage - } - - err = data.Save() - if err != nil { - return nil, errors.Wrapf(err, "error saving CopyData state") - } - - return data, nil -} - -func openCopyData(store storage.Store, container string) (*CopyData, error) { - cdir, err := store.ContainerDirectory(container) - if err != nil { - return nil, err - } - buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile)) - if err != nil { - return nil, err - } - c := &CopyData{} - err = json.Unmarshal(buildstate, &c) - if err != nil { - return nil, err - } - if c.Type != containerType { - return nil, errors.Errorf("container is not a %s container", Package) - } - c.store = store - c.fixupConfig() - return c, nil - -} - -// ImportCopyDataFromImage creates copy data for an image with the given parameters -func ImportCopyDataFromImage(store storage.Store, systemContext *types.SystemContext, imageID, containerName, containerID string) (*CopyData, error) { - manifest := []byte{} - config := []byte{} - imageName := "" - - if imageID != "" { - ref, err := is.Transport.ParseStoreReference(store, "@"+imageID) - if err != nil { - return nil, errors.Wrapf(err, "no such image %q", "@"+imageID) - } - src, err2 := ref.NewImage(systemContext) - if err2 != nil { - return nil, errors.Wrapf(err2, "error instantiating image") - } - defer src.Close() - config, err = src.ConfigBlob() - if err != nil { - return nil, errors.Wrapf(err, "error reading image configuration") - } - manifest, _, err = src.Manifest() - if err != nil { - return nil, errors.Wrapf(err, "error reading image manifest") - } - if img, err3 := store.Image(imageID); err3 == nil { - if len(img.Names) > 0 { - imageName = img.Names[0] - } - } - } - - data := &CopyData{ - store: store, - Type: containerType, - FromImage: imageName, - FromImageID: imageID, - Config: config, - Manifest: manifest, - Container: containerName, - ContainerID: containerID, - ImageAnnotations: map[string]string{}, - ImageCreatedBy: "", - } - - data.initConfig() - - return data, nil - -} - -// MakeImageRef converts a CopyData struct into a types.ImageReference -func (c *CopyData) MakeImageRef(manifestType string, compress archive.Compression, names []string, layerID string, historyTimestamp *time.Time) (types.ImageReference, error) { - var name reference.Named - if len(names) > 0 { - if parsed, err := reference.ParseNamed(names[0]); err == nil { - name = parsed - } - } - if manifestType == "" { - manifestType = OCIv1ImageManifest - } - oconfig, err := json.Marshal(&c.OCIv1) - if err != nil { - return nil, errors.Wrapf(err, "error encoding OCI-format image configuration") - } - dconfig, err := json.Marshal(&c.Docker) - if err != nil { - return nil, errors.Wrapf(err, "error encoding docker-format image configuration") - } - created := time.Now().UTC() - if historyTimestamp != nil { - created = historyTimestamp.UTC() - } - ref := &CopyRef{ - store: c.store, - compression: compress, - name: name, - names: names, - layerID: layerID, - addHistory: false, - oconfig: oconfig, - dconfig: dconfig, - created: created, - createdBy: c.ImageCreatedBy, - annotations: c.ImageAnnotations, - preferredManifestType: manifestType, - exporting: true, - } - return ref, nil -} diff --git a/libpod/images/copy_ref.go b/libpod/images/copy_ref.go deleted file mode 100644 index fc4371aa..00000000 --- a/libpod/images/copy_ref.go +++ /dev/null @@ -1,460 +0,0 @@ -package images - -import ( - "bytes" - "context" - "encoding/json" - "io" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - is "github.com/containers/image/storage" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/archive" - "github.com/docker/docker/pkg/ioutils" - "github.com/kubernetes-incubator/cri-o/cmd/kpod/docker" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go" - "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// CopyRef handles image references used for copying images to/from remotes -type CopyRef struct { - store storage.Store - compression archive.Compression - name reference.Named - names []string - layerID string - addHistory bool - oconfig []byte - dconfig []byte - created time.Time - createdBy string - annotations map[string]string - preferredManifestType string - exporting bool -} - -type copySource struct { - path string - ref *CopyRef - store storage.Store - layerID string - names []string - addHistory bool - compression archive.Compression - config []byte - configDigest digest.Digest - manifest []byte - manifestType string - exporting bool -} - -// NewImage creates a new image from the given system context -func (c *CopyRef) NewImage(sc *types.SystemContext) (types.Image, error) { - src, err := c.NewImageSource(sc, nil) - if err != nil { - return nil, err - } - return image.FromSource(src) -} - -func selectManifestType(preferred string, acceptable, supported []string) string { - selected := preferred - for _, accept := range acceptable { - if preferred == accept { - return preferred - } - for _, support := range supported { - if accept == support { - selected = accept - } - } - } - return selected -} - -// NewImageSource creates a new image source from the given system context and manifest -func (c *CopyRef) NewImageSource(sc *types.SystemContext, manifestTypes []string) (src types.ImageSource, err error) { - // Decide which type of manifest and configuration output we're going to provide. - supportedManifestTypes := []string{v1.MediaTypeImageManifest, docker.V2S2MediaTypeManifest} - manifestType := selectManifestType(c.preferredManifestType, manifestTypes, supportedManifestTypes) - // If it's not a format we support, return an error. - if manifestType != v1.MediaTypeImageManifest && manifestType != docker.V2S2MediaTypeManifest { - return nil, errors.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)", - manifestType, v1.MediaTypeImageManifest, docker.V2S2MediaTypeManifest) - } - // Start building the list of layers using the read-write layer. - layers := []string{} - layerID := c.layerID - layer, err := c.store.Layer(layerID) - if err != nil { - return nil, errors.Wrapf(err, "unable to read layer %q", layerID) - } - // Walk the list of parent layers, prepending each as we go. - for layer != nil { - layers = append(append([]string{}, layerID), layers...) - layerID = layer.Parent - if layerID == "" { - err = nil - break - } - layer, err = c.store.Layer(layerID) - if err != nil { - return nil, errors.Wrapf(err, "unable to read layer %q", layerID) - } - } - logrus.Debugf("layer list: %q", layers) - - // Make a temporary directory to hold blobs. - path, err := ioutil.TempDir(os.TempDir(), "kpod") - if err != nil { - return nil, err - } - logrus.Debugf("using %q to hold temporary data", path) - defer func() { - if src == nil { - err2 := os.RemoveAll(path) - if err2 != nil { - logrus.Errorf("error removing %q: %v", path, err) - } - } - }() - - // Build fresh copies of the configurations so that we don't mess with the values in the Builder - // object itself. - oimage := v1.Image{} - err = json.Unmarshal(c.oconfig, &oimage) - if err != nil { - return nil, err - } - dimage := docker.V2Image{} - err = json.Unmarshal(c.dconfig, &dimage) - if err != nil { - return nil, err - } - - // Start building manifests. - omanifest := v1.Manifest{ - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - Config: v1.Descriptor{ - MediaType: v1.MediaTypeImageConfig, - }, - Layers: []v1.Descriptor{}, - Annotations: c.annotations, - } - dmanifest := docker.V2S2Manifest{ - V2Versioned: docker.V2Versioned{ - SchemaVersion: 2, - MediaType: docker.V2S2MediaTypeManifest, - }, - Config: docker.V2S2Descriptor{ - MediaType: docker.V2S2MediaTypeImageConfig, - }, - Layers: []docker.V2S2Descriptor{}, - } - - oimage.RootFS.Type = docker.TypeLayers - oimage.RootFS.DiffIDs = []digest.Digest{} - dimage.RootFS = &docker.V2S2RootFS{} - dimage.RootFS.Type = docker.TypeLayers - dimage.RootFS.DiffIDs = []digest.Digest{} - - // Extract each layer and compute its digests, both compressed (if requested) and uncompressed. - for _, layerID := range layers { - omediaType := v1.MediaTypeImageLayer - dmediaType := docker.V2S2MediaTypeUncompressedLayer - // Figure out which media type we want to call this. Assume no compression. - if c.compression != archive.Uncompressed { - switch c.compression { - case archive.Gzip: - omediaType = v1.MediaTypeImageLayerGzip - dmediaType = docker.V2S2MediaTypeLayer - logrus.Debugf("compressing layer %q with gzip", layerID) - case archive.Bzip2: - // Until the image specs define a media type for bzip2-compressed layers, even if we know - // how to decompress them, we can't try to compress layers with bzip2. - return nil, errors.New("media type for bzip2-compressed layers is not defined") - default: - logrus.Debugf("compressing layer %q with unknown compressor(?)", layerID) - } - } - // If we're not re-exporting the data, just fake up layer and diff IDs for the manifest. - if !c.exporting { - fakeLayerDigest := digest.NewDigestFromHex(digest.Canonical.String(), layerID) - // Add a note in the manifest about the layer. The blobs should be identified by their - // possibly-compressed blob digests, but just use the layer IDs here. - olayerDescriptor := v1.Descriptor{ - MediaType: omediaType, - Digest: fakeLayerDigest, - Size: -1, - } - omanifest.Layers = append(omanifest.Layers, olayerDescriptor) - dlayerDescriptor := docker.V2S2Descriptor{ - MediaType: dmediaType, - Digest: fakeLayerDigest, - Size: -1, - } - dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor) - // Add a note about the diffID, which should be uncompressed digest of the blob, but - // just use the layer ID here. - oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, fakeLayerDigest) - dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, fakeLayerDigest) - continue - } - // Start reading the layer. - rc, err := c.store.Diff("", layerID, nil) - if err != nil { - return nil, errors.Wrapf(err, "error extracting layer %q", layerID) - } - defer rc.Close() - // Set up to decompress the layer, in case it's coming out compressed. Due to implementation - // differences, the result may not match the digest the blob had when it was originally imported, - // so we have to recompute all of this anyway if we want to be sure the digests we use will be - // correct. - uncompressed, err := archive.DecompressStream(rc) - if err != nil { - return nil, errors.Wrapf(err, "error decompressing layer %q", layerID) - } - defer uncompressed.Close() - srcHasher := digest.Canonical.Digester() - reader := io.TeeReader(uncompressed, srcHasher.Hash()) - // Set up to write the possibly-recompressed blob. - layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600) - if err != nil { - return nil, errors.Wrapf(err, "error opening file for layer %q", layerID) - } - destHasher := digest.Canonical.Digester() - counter := ioutils.NewWriteCounter(layerFile) - multiWriter := io.MultiWriter(counter, destHasher.Hash()) - // Compress the layer, if we're compressing it. - writer, err := archive.CompressStream(multiWriter, c.compression) - if err != nil { - return nil, errors.Wrapf(err, "error compressing layer %q", layerID) - } - size, err := io.Copy(writer, reader) - if err != nil { - return nil, errors.Wrapf(err, "error storing layer %q to file", layerID) - } - writer.Close() - layerFile.Close() - if c.compression == archive.Uncompressed { - if size != counter.Count { - return nil, errors.Errorf("error storing layer %q to file: inconsistent layer size (copied %d, wrote %d)", layerID, size, counter.Count) - } - } else { - size = counter.Count - } - logrus.Debugf("layer %q size is %d bytes", layerID, size) - // Rename the layer so that we can more easily find it by digest later. - err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String())) - if err != nil { - return nil, errors.Wrapf(err, "error storing layer %q to file", layerID) - } - // Add a note in the manifest about the layer. The blobs are identified by their possibly- - // compressed blob digests. - olayerDescriptor := v1.Descriptor{ - MediaType: omediaType, - Digest: destHasher.Digest(), - Size: size, - } - omanifest.Layers = append(omanifest.Layers, olayerDescriptor) - dlayerDescriptor := docker.V2S2Descriptor{ - MediaType: dmediaType, - Digest: destHasher.Digest(), - Size: size, - } - dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor) - // Add a note about the diffID, which is always an uncompressed value. - oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest()) - dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest()) - } - - if c.addHistory { - // Build history notes in the image configurations. - onews := v1.History{ - Created: &c.created, - CreatedBy: c.createdBy, - Author: oimage.Author, - EmptyLayer: false, - } - oimage.History = append(oimage.History, onews) - dnews := docker.V2S2History{ - Created: c.created, - CreatedBy: c.createdBy, - Author: dimage.Author, - EmptyLayer: false, - } - dimage.History = append(dimage.History, dnews) - } - - // Encode the image configuration blob. - oconfig, err := json.Marshal(&oimage) - if err != nil { - return nil, err - } - logrus.Debugf("OCIv1 config = %s", oconfig) - - // Add the configuration blob to the manifest. - omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig) - omanifest.Config.Size = int64(len(oconfig)) - omanifest.Config.MediaType = v1.MediaTypeImageConfig - - // Encode the manifest. - omanifestbytes, err := json.Marshal(&omanifest) - if err != nil { - return nil, err - } - logrus.Debugf("OCIv1 manifest = %s", omanifestbytes) - - // Encode the image configuration blob. - dconfig, err := json.Marshal(&dimage) - if err != nil { - return nil, err - } - logrus.Debugf("Docker v2s2 config = %s", dconfig) - - // Add the configuration blob to the manifest. - dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig) - dmanifest.Config.Size = int64(len(dconfig)) - dmanifest.Config.MediaType = docker.V2S2MediaTypeImageConfig - - // Encode the manifest. - dmanifestbytes, err := json.Marshal(&dmanifest) - if err != nil { - return nil, err - } - logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes) - - // Decide which manifest and configuration blobs we'll actually output. - var config []byte - var manifest []byte - switch manifestType { - case v1.MediaTypeImageManifest: - manifest = omanifestbytes - config = oconfig - case docker.V2S2MediaTypeManifest: - manifest = dmanifestbytes - config = dconfig - default: - panic("unreachable code: unsupported manifest type") - } - src = ©Source{ - path: path, - ref: c, - store: c.store, - layerID: c.layerID, - names: c.names, - addHistory: c.addHistory, - compression: c.compression, - config: config, - configDigest: digest.Canonical.FromBytes(config), - manifest: manifest, - manifestType: manifestType, - exporting: c.exporting, - } - return src, nil -} - -// NewImageDestination creates a new image destination from the given system context -func (c *CopyRef) NewImageDestination(sc *types.SystemContext) (types.ImageDestination, error) { - return nil, errors.Errorf("can't write to a container") -} - -// DockerReference gets the docker reference for the given CopyRef -func (c *CopyRef) DockerReference() reference.Named { - return c.name -} - -// StringWithinTransport returns the first name of the copyRef -func (c *CopyRef) StringWithinTransport() string { - if len(c.names) > 0 { - return c.names[0] - } - return "" -} - -// DeleteImage deletes an image in the CopyRef -func (c *CopyRef) DeleteImage(*types.SystemContext) error { - // we were never here - return nil -} - -// PolicyConfigurationIdentity returns the policy configuration for the CopyRef -func (c *CopyRef) PolicyConfigurationIdentity() string { - return "" -} - -// PolicyConfigurationNamespaces returns the policy configuration namespace for the CopyRef -func (c *CopyRef) PolicyConfigurationNamespaces() []string { - return nil -} - -// Transport returns an ImageTransport for the given CopyRef -func (c *CopyRef) Transport() types.ImageTransport { - return is.Transport -} - -func (cs *copySource) Close() error { - err := os.RemoveAll(cs.path) - if err != nil { - logrus.Errorf("error removing %q: %v", cs.path, err) - } - return err -} - -func (cs *copySource) Reference() types.ImageReference { - return cs.ref -} - -func (cs *copySource) GetSignatures(context.Context) ([][]byte, error) { - return nil, nil -} - -func (cs *copySource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - return []byte{}, "", errors.Errorf("TODO") -} - -func (cs *copySource) GetManifest() ([]byte, string, error) { - return cs.manifest, cs.manifestType, nil -} - -func (cs *copySource) GetBlob(blob types.BlobInfo) (reader io.ReadCloser, size int64, err error) { - if blob.Digest == cs.configDigest { - logrus.Debugf("start reading config") - reader := bytes.NewReader(cs.config) - closer := func() error { - logrus.Debugf("finished reading config") - return nil - } - return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil - } - layerFile, err := os.OpenFile(filepath.Join(cs.path, blob.Digest.String()), os.O_RDONLY, 0600) - if err != nil { - logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err) - return nil, -1, err - } - size = -1 - st, err := layerFile.Stat() - if err != nil { - logrus.Warnf("error reading size of layer %q: %v", blob.Digest.String(), err) - } else { - size = st.Size() - } - logrus.Debugf("reading layer %q", blob.Digest.String()) - closer := func() error { - layerFile.Close() - logrus.Debugf("finished reading layer %q", blob.Digest.String()) - return nil - } - return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil -} diff --git a/libpod/images/image.go b/libpod/images/image.go deleted file mode 100644 index 191b4392..00000000 --- a/libpod/images/image.go +++ /dev/null @@ -1,288 +0,0 @@ -package images - -import ( - "fmt" - "strings" - "time" - - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/kubernetes-incubator/cri-o/libpod/common" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// FilterParams contains the filter options that may be given when outputting images -type FilterParams struct { - dangling string - label string - beforeImage time.Time - sinceImage time.Time - referencePattern string -} - -// ParseFilter takes a set of images and a filter string as input, and returns the -func ParseFilter(store storage.Store, filter string) (*FilterParams, error) { - images, err := store.Images() - if err != nil { - return nil, err - } - params := new(FilterParams) - filterStrings := strings.Split(filter, ",") - for _, param := range filterStrings { - pair := strings.SplitN(param, "=", 2) - switch strings.TrimSpace(pair[0]) { - case "dangling": - if common.IsValidBool(pair[1]) { - params.dangling = pair[1] - } else { - return nil, fmt.Errorf("invalid filter: '%s=[%s]'", pair[0], pair[1]) - } - case "label": - params.label = pair[1] - case "before": - if img, err := findImageInSlice(images, pair[1]); err == nil { - info, err := getImageInspectInfo(store, img) - if err != nil { - return nil, err - } - params.beforeImage = info.Created - } else { - return nil, fmt.Errorf("no such id: %s", pair[0]) - } - case "since": - if img, err := findImageInSlice(images, pair[1]); err == nil { - info, err := getImageInspectInfo(store, img) - if err != nil { - return nil, err - } - params.sinceImage = info.Created - } else { - return nil, fmt.Errorf("no such id: %s``", pair[0]) - } - case "reference": - params.referencePattern = pair[1] - default: - return nil, fmt.Errorf("invalid filter: '%s'", pair[0]) - } - } - return params, nil -} - -func matchesFilter(store storage.Store, image storage.Image, name string, params *FilterParams) bool { - if params == nil { - return true - } - - info, err := getImageInspectInfo(store, image) - if err != nil { - return false - } - if params.dangling != "" && !matchesDangling(name, params.dangling) { - return false - } else if params.label != "" && !matchesLabel(info, store, params.label) { - return false - } else if !params.beforeImage.IsZero() && !matchesBeforeImage(info, name, params) { - return false - } else if !params.sinceImage.IsZero() && !matchesSinceImage(info, name, params) { - return false - } else if params.referencePattern != "" && !MatchesReference(name, params.referencePattern) { - return false - } - return true -} - -func matchesDangling(name string, dangling string) bool { - if common.IsFalse(dangling) && name != "" { - return true - } else if common.IsTrue(dangling) && name == "" { - return true - } - return false -} - -func matchesLabel(info *types.ImageInspectInfo, store storage.Store, label string) bool { - pair := strings.SplitN(label, "=", 2) - for key, value := range info.Labels { - if key == pair[0] { - if len(pair) == 2 { - if value == pair[1] { - return true - } - } else { - return false - } - } - } - return false -} - -// Returns true if the image was created since the filter image. Returns -// false otherwise -func matchesBeforeImage(info *types.ImageInspectInfo, name string, params *FilterParams) bool { - return info.Created.Before(params.beforeImage) -} - -// Returns true if the image was created since the filter image. Returns -// false otherwise -func matchesSinceImage(info *types.ImageInspectInfo, name string, params *FilterParams) bool { - return info.Created.After(params.sinceImage) -} - -// MatchesID returns true if argID is a full or partial match for id -func MatchesID(id, argID string) bool { - return strings.HasPrefix(argID, id) -} - -// MatchesReference returns true if argName is a full or partial match for name -// Partial matches will register only if they match the most specific part of the name available -// For example, take the image docker.io/library/redis:latest -// redis, library,redis, docker.io/library/redis, redis:latest, etc. will match -// But redis:alpine, ry/redis, library, and io/library/redis will not -func MatchesReference(name, argName string) bool { - if argName == "" { - return false - } - splitName := strings.Split(name, ":") - // If the arg contains a tag, we handle it differently than if it does not - if strings.Contains(argName, ":") { - splitArg := strings.Split(argName, ":") - return strings.HasSuffix(splitName[0], splitArg[0]) && (splitName[1] == splitArg[1]) - } - return strings.HasSuffix(splitName[0], argName) -} - -// FormattedSize returns a human-readable formatted size for the image -func FormattedSize(size float64) string { - suffixes := [5]string{"B", "KB", "MB", "GB", "TB"} - - count := 0 - for size >= 1024 && count < 4 { - size /= 1024 - count++ - } - return fmt.Sprintf("%.4g %s", size, suffixes[count]) -} - -// FindImage searches for a *storage.Image with a matching the given name or ID in the given store. -func FindImage(store storage.Store, image string) (*storage.Image, error) { - var img *storage.Image - ref, err := is.Transport.ParseStoreReference(store, image) - if err == nil { - img, err = is.Transport.GetStoreImage(store, ref) - } - if err != nil { - img2, err2 := store.Image(image) - if err2 != nil { - if ref == nil { - return nil, errors.Wrapf(err, "error parsing reference to image %q", image) - } - return nil, errors.Wrapf(err, "unable to locate image %q", image) - } - img = img2 - } - return img, nil -} - -// FindImageRef searches for and returns a new types.Image matching the given name or ID in the given store. -func FindImageRef(store storage.Store, image string) (types.Image, error) { - img, err := FindImage(store, image) - if err != nil { - return nil, errors.Wrapf(err, "unable to locate image %q", image) - } - ref, err := is.Transport.ParseStoreReference(store, "@"+img.ID) - if err != nil { - return nil, errors.Wrapf(err, "error parsing reference to image %q", img.ID) - } - imgRef, err := ref.NewImage(nil) - if err != nil { - return nil, errors.Wrapf(err, "error reading image %q", img.ID) - } - return imgRef, nil -} - -func findImageInSlice(images []storage.Image, ref string) (storage.Image, error) { - for _, image := range images { - if MatchesID(image.ID, ref) { - return image, nil - } - for _, name := range image.Names { - if MatchesReference(name, ref) { - return image, nil - } - } - } - return storage.Image{}, errors.New("could not find image") -} - -// InfoAndDigestAndSize returns the inspection info and size of the image in the given -// store and the digest of its manifest, if it has one, or "" if it doesn't. -func InfoAndDigestAndSize(store storage.Store, img storage.Image) (*types.ImageInspectInfo, digest.Digest, int64, error) { - imgRef, err := FindImageRef(store, "@"+img.ID) - if err != nil { - return nil, "", -1, errors.Wrapf(err, "error reading image %q", img.ID) - } - defer imgRef.Close() - return infoAndDigestAndSize(imgRef) -} - -func infoAndDigestAndSize(imgRef types.Image) (*types.ImageInspectInfo, digest.Digest, int64, error) { - imgSize, err := imgRef.Size() - if err != nil { - return nil, "", -1, errors.Wrapf(err, "error reading size of image %q", transports.ImageName(imgRef.Reference())) - } - manifest, _, err := imgRef.Manifest() - if err != nil { - return nil, "", -1, errors.Wrapf(err, "error reading manifest for image %q", transports.ImageName(imgRef.Reference())) - } - manifestDigest := digest.Digest("") - if len(manifest) > 0 { - manifestDigest = digest.Canonical.FromBytes(manifest) - } - info, err := imgRef.Inspect() - if err != nil { - return nil, "", -1, errors.Wrapf(err, "error inspecting image %q", transports.ImageName(imgRef.Reference())) - } - return info, manifestDigest, imgSize, nil -} - -// GetImagesMatchingFilter returns a slice of all images in the store that match the provided FilterParams. -// Images with more than one name matching the filter will be in the slice once for each name -func GetImagesMatchingFilter(store storage.Store, filter *FilterParams, argName string) ([]storage.Image, error) { - images, err := store.Images() - filteredImages := []storage.Image{} - if err != nil { - return nil, err - } - for _, image := range images { - names := []string{} - if len(image.Names) > 0 { - names = image.Names - } else { - names = append(names, "") - } - for _, name := range names { - if (filter == nil && argName == "") || (filter != nil && matchesFilter(store, image, name, filter)) || MatchesReference(name, argName) { - newImage := image - newImage.Names = []string{name} - filteredImages = append(filteredImages, newImage) - } - } - } - return filteredImages, nil -} - -func getImageInspectInfo(store storage.Store, image storage.Image) (*types.ImageInspectInfo, error) { - storeRef, err := is.Transport.ParseStoreReference(store, "@"+image.ID) - if err != nil { - return nil, err - } - img, err := storeRef.NewImage(nil) - if err != nil { - return nil, err - } - defer img.Close() - return img.Inspect() -} diff --git a/libpod/images/image_data.go b/libpod/images/image_data.go deleted file mode 100644 index f187bc4d..00000000 --- a/libpod/images/image_data.go +++ /dev/null @@ -1,164 +0,0 @@ -package images - -import ( - "encoding/json" - "time" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/transports" - "github.com/containers/storage" - "github.com/kubernetes-incubator/cri-o/libpod/driver" - digest "github.com/opencontainers/go-digest" - ociv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// Data handles the data used when inspecting a container -// nolint -type Data struct { - ID string - Tags []string - Digests []string - Digest digest.Digest - Comment string - Created *time.Time - Container string - Author string - Config ociv1.ImageConfig - Architecture string - OS string - Annotations map[string]string - CreatedBy string - Size uint - VirtualSize uint - GraphDriver driver.Data - RootFS ociv1.RootFS -} - -// ParseImageNames parses the names we've stored with an image into a list of -// tagged references and a list of references which contain digests. -func ParseImageNames(names []string) (tags, digests []string, err error) { - for _, name := range names { - if named, err := reference.ParseNamed(name); err == nil { - if digested, ok := named.(reference.Digested); ok { - canonical, err := reference.WithDigest(named, digested.Digest()) - if err == nil { - digests = append(digests, canonical.String()) - } - } else { - if reference.IsNameOnly(named) { - named = reference.TagNameOnly(named) - } - if tagged, ok := named.(reference.Tagged); ok { - namedTagged, err := reference.WithTag(named, tagged.Tag()) - if err == nil { - tags = append(tags, namedTagged.String()) - } - } - } - } - } - return tags, digests, nil -} - -func annotations(manifest []byte, manifestType string) map[string]string { - annotations := make(map[string]string) - switch manifestType { - case ociv1.MediaTypeImageManifest: - var m ociv1.Manifest - if err := json.Unmarshal(manifest, &m); err == nil { - for k, v := range m.Annotations { - annotations[k] = v - } - } - } - return annotations -} - -// GetData gets the Data for a container with the given name in the given store. -func GetData(store storage.Store, name string) (*Data, error) { - img, err := FindImage(store, name) - if err != nil { - return nil, errors.Wrapf(err, "error reading image %q", name) - } - - imgRef, err := FindImageRef(store, "@"+img.ID) - if err != nil { - return nil, errors.Wrapf(err, "error reading image %q", img.ID) - } - defer imgRef.Close() - - tags, digests, err := ParseImageNames(img.Names) - if err != nil { - return nil, errors.Wrapf(err, "error parsing image names for %q", name) - } - - driverName, err := driver.GetDriverName(store) - if err != nil { - return nil, errors.Wrapf(err, "error reading name of storage driver") - } - - topLayerID := img.TopLayer - - driverMetadata, err := driver.GetDriverMetadata(store, topLayerID) - if err != nil { - return nil, errors.Wrapf(err, "error asking storage driver %q for metadata", driverName) - } - - layer, err := store.Layer(topLayerID) - if err != nil { - return nil, errors.Wrapf(err, "error reading information about layer %q", topLayerID) - } - size, err := store.DiffSize(layer.Parent, layer.ID) - if err != nil { - return nil, errors.Wrapf(err, "error determining size of layer %q", layer.ID) - } - - imgSize, err := imgRef.Size() - if err != nil { - return nil, errors.Wrapf(err, "error determining size of image %q", transports.ImageName(imgRef.Reference())) - } - - manifest, manifestType, err := imgRef.Manifest() - if err != nil { - return nil, errors.Wrapf(err, "error reading manifest for image %q", img.ID) - } - manifestDigest := digest.Digest("") - if len(manifest) > 0 { - manifestDigest = digest.Canonical.FromBytes(manifest) - } - annotations := annotations(manifest, manifestType) - - config, err := imgRef.OCIConfig() - if err != nil { - return nil, errors.Wrapf(err, "error reading image configuration for %q", img.ID) - } - historyComment := "" - historyCreatedBy := "" - if len(config.History) > 0 { - historyComment = config.History[len(config.History)-1].Comment - historyCreatedBy = config.History[len(config.History)-1].CreatedBy - } - - return &Data{ - ID: img.ID, - Tags: tags, - Digests: digests, - Digest: manifestDigest, - Comment: historyComment, - Created: config.Created, - Author: config.Author, - Config: config.Config, - Architecture: config.Architecture, - OS: config.OS, - Annotations: annotations, - CreatedBy: historyCreatedBy, - Size: uint(size), - VirtualSize: uint(size + imgSize), - GraphDriver: driver.Data{ - Name: driverName, - Data: driverMetadata, - }, - RootFS: config.RootFS, - }, nil -} diff --git a/libpod/images/rmi.go b/libpod/images/rmi.go deleted file mode 100644 index b3b0459e..00000000 --- a/libpod/images/rmi.go +++ /dev/null @@ -1,35 +0,0 @@ -package images - -import ( - "github.com/containers/storage" - "github.com/pkg/errors" -) - -// UntagImage removes the tag from the given image -func UntagImage(store storage.Store, image *storage.Image, imgArg string) (string, error) { - // Remove name from image.Names and set the new names - newNames := []string{} - removedName := "" - for _, name := range image.Names { - if MatchesReference(name, imgArg) { - removedName = name - continue - } - newNames = append(newNames, name) - } - if removedName != "" { - if err := store.SetNames(image.ID, newNames); err != nil { - return "", errors.Wrapf(err, "error removing name %q from image %q", removedName, image.ID) - } - } - return removedName, nil -} - -// RemoveImage removes the given image from storage -func RemoveImage(image *storage.Image, store storage.Store) (string, error) { - _, err := store.DeleteImage(image.ID, true) - if err != nil { - return "", errors.Wrapf(err, "could not remove image %q", image.ID) - } - return image.ID, nil -} diff --git a/libpod/layers/layer.go b/libpod/layers/layer.go deleted file mode 100644 index 865cbe70..00000000 --- a/libpod/layers/layer.go +++ /dev/null @@ -1,12 +0,0 @@ -package layers - -import cstorage "github.com/containers/storage" - -// FullID gets the full id of a layer given a partial id or name -func FullID(store cstorage.Store, id string) (string, error) { - layer, err := store.Layer(id) - if err != nil { - return "", err - } - return layer.ID, nil -} diff --git a/libpod/options.go b/libpod/options.go deleted file mode 100644 index ed259e55..00000000 --- a/libpod/options.go +++ /dev/null @@ -1,228 +0,0 @@ -package libpod - -import ( - "fmt" - - "github.com/containers/storage" - "github.com/containers/storage/pkg/idtools" - "github.com/kubernetes-incubator/cri-o/libpod/ctr" - "github.com/kubernetes-incubator/cri-o/libpod/pod" -) - -var ( - errRuntimeFinalized = fmt.Errorf("runtime has already been finalized") - ctrNotImplemented = func(c *ctr.Container) error { - return fmt.Errorf("NOT IMPLEMENTED") - } -) - -const ( - // IPCNamespace represents the IPC namespace - IPCNamespace = "ipc" - // MountNamespace represents the mount namespace - MountNamespace = "mount" - // NetNamespace represents the network namespace - NetNamespace = "net" - // PIDNamespace represents the PID namespace - PIDNamespace = "pid" - // UserNamespace represents the user namespace - UserNamespace = "user" - // UTSNamespace represents the UTS namespace - UTSNamespace = "uts" -) - -// Runtime Creation Options - -// WithStorageConfig uses the given configuration to set up container storage -// If this is not specified, the system default configuration will be used -// instead -func WithStorageConfig(config storage.StoreOptions) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return errRuntimeFinalized - } - - rt.config.StorageConfig.RunRoot = config.RunRoot - rt.config.StorageConfig.GraphRoot = config.GraphRoot - rt.config.StorageConfig.GraphDriverName = config.GraphDriverName - - rt.config.StorageConfig.GraphDriverOptions = make([]string, len(config.GraphDriverOptions)) - copy(rt.config.StorageConfig.GraphDriverOptions, config.GraphDriverOptions) - - rt.config.StorageConfig.UIDMap = make([]idtools.IDMap, len(config.UIDMap)) - copy(rt.config.StorageConfig.UIDMap, config.UIDMap) - - rt.config.StorageConfig.GIDMap = make([]idtools.IDMap, len(config.UIDMap)) - copy(rt.config.StorageConfig.GIDMap, config.GIDMap) - - return nil - } -} - -// WithImageConfig uses the given configuration to set up image handling -// If this is not specified, the system default configuration will be used -// instead -func WithImageConfig(defaultTransport string, insecureRegistries, registries []string) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return errRuntimeFinalized - } - - rt.config.ImageDefaultTransport = defaultTransport - - rt.config.InsecureRegistries = make([]string, len(insecureRegistries)) - copy(rt.config.InsecureRegistries, insecureRegistries) - - rt.config.Registries = make([]string, len(registries)) - copy(rt.config.Registries, registries) - - return nil - } -} - -// WithSignaturePolicy specifies the path of a file which decides how trust is -// managed for images we've pulled. -// If this is not specified, the system default configuration will be used -// instead -func WithSignaturePolicy(path string) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return errRuntimeFinalized - } - - rt.config.SignaturePolicyPath = path - - return nil - } -} - -// WithOCIRuntime specifies an OCI runtime to use for running containers -func WithOCIRuntime(runtimePath string) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return errRuntimeFinalized - } - - rt.config.RuntimePath = runtimePath - - return nil - } -} - -// WithConmonPath specifies the path to the conmon binary which manages the -// runtime -func WithConmonPath(path string) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return errRuntimeFinalized - } - - rt.config.ConmonPath = path - - return nil - } -} - -// WithConmonEnv specifies the environment variable list for the conmon process -func WithConmonEnv(environment []string) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return errRuntimeFinalized - } - - rt.config.ConmonEnvVars = make([]string, len(environment)) - copy(rt.config.ConmonEnvVars, environment) - - return nil - } -} - -// WithCgroupManager specifies the manager implementation name which is used to -// handle cgroups for containers -func WithCgroupManager(manager string) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return errRuntimeFinalized - } - - rt.config.CgroupManager = manager - - return nil - } -} - -// WithSELinux enables SELinux on the container server -func WithSELinux() RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return errRuntimeFinalized - } - - rt.config.SelinuxEnabled = true - - return nil - } -} - -// WithPidsLimit specifies the maximum number of processes each container is -// restricted to -func WithPidsLimit(limit int64) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return errRuntimeFinalized - } - - rt.config.PidsLimit = limit - - return nil - } -} - -// Container Creation Options - -// WithRootFSFromPath uses the given path as a container's root filesystem -// No further setup is performed on this path -func WithRootFSFromPath(path string) CtrCreateOption { - return ctrNotImplemented -} - -// WithRootFSFromImage sets up a fresh root filesystem using the given image -// If useImageConfig is specified, image volumes, environment variables, and -// other configuration from the image will be added to the config -func WithRootFSFromImage(image string, useImageConfig bool) CtrCreateOption { - return ctrNotImplemented -} - -// WithSharedNamespaces sets a container to share namespaces with another -// container. If the from container belongs to a pod, the new container will -// be added to the pod. -// By default no namespaces are shared. To share a namespace, add the Namespace -// string constant to the map as a key -func WithSharedNamespaces(from *ctr.Container, namespaces map[string]string) CtrCreateOption { - return ctrNotImplemented -} - -// WithPod adds the container to a pod -func WithPod(pod *pod.Pod) CtrCreateOption { - return ctrNotImplemented -} - -// WithLabels adds labels to the pod -func WithLabels(labels map[string]string) CtrCreateOption { - return ctrNotImplemented -} - -// WithAnnotations adds annotations to the pod -func WithAnnotations(annotations map[string]string) CtrCreateOption { - return ctrNotImplemented -} - -// WithName sets the container's name -func WithName(name string) CtrCreateOption { - return ctrNotImplemented -} - -// WithStopSignal sets the signal that will be sent to stop the container -func WithStopSignal(signal uint) CtrCreateOption { - return ctrNotImplemented -} diff --git a/libpod/pod/pod.go b/libpod/pod/pod.go deleted file mode 100644 index b552fefe..00000000 --- a/libpod/pod/pod.go +++ /dev/null @@ -1,36 +0,0 @@ -package pod - -import ( - "github.com/kubernetes-incubator/cri-o/libpod/ctr" -) - -// Pod represents a group of containers that may share namespaces -type Pod struct { - // TODO populate -} - -// Start starts all containers within a pod that are not already running -func (p *Pod) Start() error { - return ctr.ErrNotImplemented -} - -// Stop stops all containers within a pod that are not already stopped -func (p *Pod) Stop() error { - return ctr.ErrNotImplemented -} - -// Kill sends a signal to all running containers within a pod -func (p *Pod) Kill(signal uint) error { - return ctr.ErrNotImplemented -} - -// GetContainers retrieves the containers in the pod -func (p *Pod) GetContainers() ([]*ctr.Container, error) { - return nil, ctr.ErrNotImplemented -} - -// Status gets the status of all containers in the pod -// TODO This should return a summary of the states of all containers in the pod -func (p *Pod) Status() error { - return ctr.ErrNotImplemented -} diff --git a/libpod/runtime.go b/libpod/runtime.go deleted file mode 100644 index 05abda40..00000000 --- a/libpod/runtime.go +++ /dev/null @@ -1,214 +0,0 @@ -package libpod - -import ( - "fmt" - "sync" - - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/kubernetes-incubator/cri-o/libpod/ctr" - "github.com/kubernetes-incubator/cri-o/libpod/pod" - "github.com/kubernetes-incubator/cri-o/server/apparmor" - "github.com/kubernetes-incubator/cri-o/server/seccomp" - spec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/ulule/deepcopier" -) - -// A RuntimeOption is a functional option which alters the Runtime created by -// NewRuntime -type RuntimeOption func(*Runtime) error - -// Runtime is the core libpod runtime -type Runtime struct { - config *RuntimeConfig - store storage.Store - imageContext *types.SystemContext - apparmorEnabled bool - seccompEnabled bool - valid bool - lock sync.RWMutex -} - -// RuntimeConfig contains configuration options used to set up the runtime -type RuntimeConfig struct { - StorageConfig storage.StoreOptions - ImageDefaultTransport string - InsecureRegistries []string - Registries []string - SignaturePolicyPath string - RuntimePath string - ConmonPath string - ConmonEnvVars []string - CgroupManager string - SelinuxEnabled bool - PidsLimit int64 -} - -var ( - defaultRuntimeConfig = RuntimeConfig{ - // Leave this empty so containers/storage will use its defaults - StorageConfig: storage.StoreOptions{}, - ImageDefaultTransport: "docker://", - RuntimePath: "/usr/bin/runc", - ConmonPath: "/usr/local/libexec/crio/conmon", - ConmonEnvVars: []string{ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - }, - CgroupManager: "cgroupfs", - SelinuxEnabled: false, - PidsLimit: 1024, - } -) - -// NewRuntime creates a new container runtime -// Options can be passed to override the default configuration for the runtime -func NewRuntime(options ...RuntimeOption) (*Runtime, error) { - runtime := new(Runtime) - runtime.config = new(RuntimeConfig) - - // Copy the default configuration - deepcopier.Copy(defaultRuntimeConfig).To(runtime.config) - - // Overwrite it with user-given configuration options - for _, opt := range options { - if err := opt(runtime); err != nil { - return nil, errors.Wrapf(err, "error configuring runtime") - } - } - - // Set up containers/storage - store, err := storage.GetStore(runtime.config.StorageConfig) - if err != nil { - return nil, err - } - runtime.store = store - - // Set up containers/image - runtime.imageContext = &types.SystemContext{ - SignaturePolicyPath: runtime.config.SignaturePolicyPath, - } - - runtime.seccompEnabled = seccomp.IsEnabled() - runtime.apparmorEnabled = apparmor.IsEnabled() - - // Mark the runtime as valid - ready to be used, cannot be modified - // further - runtime.valid = true - - return runtime, nil -} - -// GetConfig returns a copy of the configuration used by the runtime -func (r *Runtime) GetConfig() *RuntimeConfig { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.valid { - return nil - } - - config := new(RuntimeConfig) - - // Copy so the caller won't be able to modify the actual config - deepcopier.Copy(r.config).To(config) - - return config -} - -// Shutdown shuts down the runtime and associated containers and storage -// If force is true, containers and mounted storage will be shut down before -// cleaning up; if force is false, an error will be returned if there are -// still containers running or mounted -func (r *Runtime) Shutdown(force bool) error { - r.lock.Lock() - defer r.lock.Unlock() - - if !r.valid { - return fmt.Errorf("runtime has already been shut down") - } - - _, err := r.store.Shutdown(force) - return err -} - -// Container API - -// A CtrCreateOption is a functional option which alters the Container created -// by NewContainer -type CtrCreateOption func(*ctr.Container) error - -// ContainerFilter is a function to determine whether a container is included -// in command output. Containers to be outputted are tested using the function. -// A true return will include the container, a false return will exclude it. -type ContainerFilter func(*ctr.Container) bool - -// NewContainer creates a new container from a given OCI config -func (r *Runtime) NewContainer(spec *spec.Spec, options ...CtrCreateOption) (*ctr.Container, error) { - return nil, ctr.ErrNotImplemented -} - -// RemoveContainer removes the given container -// If force is specified, the container will be stopped first -// Otherwise, RemoveContainer will return an error if the container is running -func (r *Runtime) RemoveContainer(c *ctr.Container, force bool) error { - return ctr.ErrNotImplemented -} - -// GetContainer retrieves a container by its ID -func (r *Runtime) GetContainer(id string) (*ctr.Container, error) { - return nil, ctr.ErrNotImplemented -} - -// LookupContainer looks up a container by its name or a partial ID -// If a partial ID is not unique, an error will be returned -func (r *Runtime) LookupContainer(idOrName string) (*ctr.Container, error) { - return nil, ctr.ErrNotImplemented -} - -// GetContainers retrieves all containers from the state -// Filters can be provided which will determine what containers are included in -// the output. Multiple filters are handled by ANDing their output, so only -// containers matching all filters are returned -func (r *Runtime) GetContainers(filters ...ContainerFilter) ([]*ctr.Container, error) { - return nil, ctr.ErrNotImplemented -} - -// Pod API - -// PodFilter is a function to determine whether a pod is included in command -// output. Pods to be outputted are tested using the function. A true return -// will include the pod, a false return will exclude it. -type PodFilter func(*pod.Pod) bool - -// NewPod makes a new, empty pod -func (r *Runtime) NewPod() (*pod.Pod, error) { - return nil, ctr.ErrNotImplemented -} - -// RemovePod removes a pod and all containers in it -// If force is specified, all containers in the pod will be stopped first -// Otherwise, RemovePod will return an error if any container in the pod is running -// Remove acts atomically, removing all containers or no containers -func (r *Runtime) RemovePod(p *pod.Pod, force bool) error { - return ctr.ErrNotImplemented -} - -// GetPod retrieves a pod by its ID -func (r *Runtime) GetPod(id string) (*pod.Pod, error) { - return nil, ctr.ErrNotImplemented -} - -// LookupPod retrieves a pod by its name or a partial ID -// If a partial ID is not unique, an error will be returned -func (r *Runtime) LookupPod(idOrName string) (*pod.Pod, error) { - return nil, ctr.ErrNotImplemented -} - -// GetPods retrieves all pods -// Filters can be provided which will determine which pods are included in the -// output. Multiple filters are handled by ANDing their output, so only pods -// matching all filters are returned -func (r *Runtime) GetPods(filters ...PodFilter) ([]*pod.Pod, error) { - return nil, ctr.ErrNotImplemented -} diff --git a/oci/container.go b/oci/container.go index 433e4c8c..c71152bf 100644 --- a/oci/container.go +++ b/oci/container.go @@ -22,31 +22,34 @@ const ( // Container represents a runtime container. type Container struct { - id string - name string - logPath string - labels fields.Set - annotations fields.Set - image string - sandbox string - netns ns.NetNS - terminal bool - stdin bool - stdinOnce bool - privileged bool - trusted bool - state *ContainerState - metadata *pb.ContainerMetadata - opLock sync.Locker + id string + name string + logPath string + labels fields.Set + annotations fields.Set + crioAnnotations fields.Set + image string + sandbox string + netns ns.NetNS + terminal bool + stdin bool + stdinOnce bool + privileged bool + trusted bool + state *ContainerState + metadata *pb.ContainerMetadata + opLock sync.Locker // this is the /var/run/storage/... directory, erased on reboot bundlePath string // this is the /var/lib/storage/... directory - dir string - stopSignal string - imageName string - imageRef string - volumes []ContainerVolume - mountPoint string + dir string + stopSignal string + imageName string + imageRef string + volumes []ContainerVolume + mountPoint string + seccompProfilePath string + spec *specs.Spec } // ContainerVolume is a bind mount for the container. @@ -68,35 +71,46 @@ type ContainerState struct { } // NewContainer creates a container object. -func NewContainer(id string, name string, bundlePath string, logPath string, netns ns.NetNS, labels map[string]string, annotations map[string]string, image string, imageName string, imageRef string, metadata *pb.ContainerMetadata, sandbox string, terminal bool, stdin bool, stdinOnce bool, privileged bool, trusted bool, dir string, created time.Time, stopSignal string) (*Container, error) { +func NewContainer(id string, name string, bundlePath string, logPath string, netns ns.NetNS, labels map[string]string, crioAnnotations map[string]string, annotations map[string]string, image string, imageName string, imageRef string, metadata *pb.ContainerMetadata, sandbox string, terminal bool, stdin bool, stdinOnce bool, privileged bool, trusted bool, dir string, created time.Time, stopSignal string) (*Container, error) { state := &ContainerState{} state.Created = created c := &Container{ - id: id, - name: name, - bundlePath: bundlePath, - logPath: logPath, - labels: labels, - sandbox: sandbox, - netns: netns, - terminal: terminal, - stdin: stdin, - stdinOnce: stdinOnce, - privileged: privileged, - trusted: trusted, - metadata: metadata, - annotations: annotations, - image: image, - imageName: imageName, - imageRef: imageRef, - dir: dir, - state: state, - stopSignal: stopSignal, - opLock: new(sync.Mutex), + id: id, + name: name, + bundlePath: bundlePath, + logPath: logPath, + labels: labels, + sandbox: sandbox, + netns: netns, + terminal: terminal, + stdin: stdin, + stdinOnce: stdinOnce, + privileged: privileged, + trusted: trusted, + metadata: metadata, + annotations: annotations, + crioAnnotations: crioAnnotations, + image: image, + imageName: imageName, + imageRef: imageRef, + dir: dir, + state: state, + stopSignal: stopSignal, + opLock: new(sync.Mutex), } return c, nil } +// SetSpec loads the OCI spec in the container struct +func (c *Container) SetSpec(s *specs.Spec) { + c.spec = s +} + +// Spec returns a copy of the spec for the container +func (c *Container) Spec() specs.Spec { + return *c.spec +} + // GetStopSignal returns the container's own stop signal configured from the // image configuration or the default one. func (c *Container) GetStopSignal() string { @@ -143,6 +157,16 @@ func (c *Container) ID() string { return c.id } +// SetSeccompProfilePath sets the seccomp profile path +func (c *Container) SetSeccompProfilePath(pp string) { + c.seccompProfilePath = pp +} + +// SeccompProfilePath returns the seccomp profile path +func (c *Container) SeccompProfilePath() string { + return c.seccompProfilePath +} + // BundlePath returns the bundlePath of the container. func (c *Container) BundlePath() string { return c.bundlePath @@ -163,6 +187,11 @@ func (c *Container) Annotations() map[string]string { return c.annotations } +// CrioAnnotations returns the crio annotations of the container. +func (c *Container) CrioAnnotations() map[string]string { + return c.crioAnnotations +} + // Image returns the image of the container. func (c *Container) Image() string { return c.image @@ -233,3 +262,10 @@ func (c *Container) SetMountPoint(mp string) { func (c *Container) MountPoint() string { return c.mountPoint } + +// SetState sets the conainer state +// +// XXX: DO NOT EVER USE THIS, THIS IS JUST USEFUL FOR MOCKING!!! +func (c *Container) SetState(state *ContainerState) { + c.state = state +} diff --git a/oci/memory_store.go b/oci/memory_store.go index 6223ce7f..3f0cac55 100644 --- a/oci/memory_store.go +++ b/oci/memory_store.go @@ -25,8 +25,9 @@ func (c *memoryStore) Add(id string, cont *Container) { // Get returns a container from the store by id. func (c *memoryStore) Get(id string) *Container { + var res *Container c.RLock() - res := c.s[id] + res = c.s[id] c.RUnlock() return res } diff --git a/oci/oci.go b/oci/oci.go index 7f03ef0f..658079a3 100644 --- a/oci/oci.go +++ b/oci/oci.go @@ -17,6 +17,7 @@ import ( "github.com/kubernetes-incubator/cri-o/utils" rspec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" + "golang.org/x/net/context" "golang.org/x/sys/unix" kwait "k8s.io/apimachinery/pkg/util/wait" ) @@ -24,21 +25,39 @@ import ( const ( // ContainerStateCreated represents the created state of a container ContainerStateCreated = "created" + // ContainerStatePaused represents the paused state of a container + ContainerStatePaused = "paused" // ContainerStateRunning represents the running state of a container ContainerStateRunning = "running" // ContainerStateStopped represents the stopped state of a container ContainerStateStopped = "stopped" // ContainerCreateTimeout represents the value of container creating timeout - ContainerCreateTimeout = 10 * time.Second + ContainerCreateTimeout = 240 * time.Second // CgroupfsCgroupsManager represents cgroupfs native cgroup manager CgroupfsCgroupsManager = "cgroupfs" // SystemdCgroupsManager represents systemd native cgroup manager SystemdCgroupsManager = "systemd" + // ContainerExitsDir is the location of container exit dirs + ContainerExitsDir = "/var/run/crio/exits" + // ContainerAttachSocketDir is the location for container attach sockets + ContainerAttachSocketDir = "/var/run/crio" + + // killContainerTimeout is the timeout that we wait for the container to + // be SIGKILLed. + killContainerTimeout = 2 * time.Minute ) // New creates a new Runtime with options provided -func New(runtimeTrustedPath string, runtimeUntrustedPath string, trustLevel string, conmonPath string, conmonEnv []string, cgroupManager string, containerExitsDir string) (*Runtime, error) { +func New(runtimeTrustedPath string, + runtimeUntrustedPath string, + trustLevel string, + conmonPath string, + conmonEnv []string, + cgroupManager string, + containerExitsDir string, + logSizeMax int64, + noPivot bool) (*Runtime, error) { r := &Runtime{ name: filepath.Base(runtimeTrustedPath), trustedPath: runtimeTrustedPath, @@ -48,6 +67,8 @@ func New(runtimeTrustedPath string, runtimeUntrustedPath string, trustLevel stri conmonEnv: conmonEnv, cgroupManager: cgroupManager, containerExitsDir: containerExitsDir, + logSizeMax: logSizeMax, + noPivot: noPivot, } return r, nil } @@ -62,6 +83,8 @@ type Runtime struct { conmonEnv []string cgroupManager string containerExitsDir string + logSizeMax int64 + noPivot bool } // syncInfo is used to return data from monitor process to daemon @@ -135,7 +158,7 @@ func getOCIVersion(name string, args ...string) (string, error) { } // CreateContainer creates a container. -func (r *Runtime) CreateContainer(c *Container, cgroupParent string) error { +func (r *Runtime) CreateContainer(c *Container, cgroupParent string) (err error) { var stderrBuf bytes.Buffer parentPipe, childPipe, err := newPipe() childStartPipe, parentStartPipe, err := newPipe() @@ -156,6 +179,13 @@ func (r *Runtime) CreateContainer(c *Container, cgroupParent string) error { args = append(args, "-p", filepath.Join(c.bundlePath, "pidfile")) args = append(args, "-l", c.logPath) args = append(args, "--exit-dir", r.containerExitsDir) + args = append(args, "--socket-dir-path", ContainerAttachSocketDir) + if r.logSizeMax >= 0 { + args = append(args, "--log-size-max", fmt.Sprintf("%v", r.logSizeMax)) + } + if r.noPivot { + args = append(args, "--no-pivot") + } if c.terminal { args = append(args, "-t") } else if c.stdin { @@ -202,11 +232,12 @@ func (r *Runtime) CreateContainer(c *Container, cgroupParent string) error { if err != nil { logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err) } else { - // XXX: this defer does nothing as the cgroup can't be deleted cause - // it contains the conmon pid in tasks - // we need to remove this defer and delete the cgroup once conmon exits - // maybe need a conmon monitor? - defer control.Delete() + // Here we should defer a crio-connmon- cgroup hierarchy deletion, but it will + // always fail as conmon's pid is still there. + // Fortunately, kubelet takes care of deleting this for us, so the leak will + // only happens in corner case where one does a manual deletion of the container + // through e.g. runc. This should be handled by implementing a conmon monitoring + // routine that does the cgroup cleanup once conmon is terminated. if err := control.Add(cgroups.Process{Pid: cmd.Process.Pid}); err != nil { logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err) } @@ -226,6 +257,13 @@ func (r *Runtime) CreateContainer(c *Container, cgroupParent string) error { return err } + // We will delete all container resources if creation fails + defer func() { + if err != nil { + r.DeleteContainer(c) + } + }() + // Wait to get container pid from conmon type syncStruct struct { si *syncInfo @@ -249,13 +287,14 @@ func (r *Runtime) CreateContainer(c *Container, cgroupParent string) error { logrus.Debugf("Received container pid: %d", ss.si.Pid) if ss.si.Pid == -1 { if ss.si.Message != "" { - logrus.Debugf("Container creation error: %s", ss.si.Message) + logrus.Errorf("Container creation error: %s", ss.si.Message) return fmt.Errorf("container create failed: %s", ss.si.Message) } - logrus.Debugf("Container creation failed") + logrus.Errorf("Container creation failed") return fmt.Errorf("container create failed") } case <-time.After(ContainerCreateTimeout): + logrus.Errorf("Container creation timeout (%v)", ContainerCreateTimeout) return fmt.Errorf("create container timeout") } return nil @@ -320,9 +359,9 @@ func parseLog(log []byte) (stdout, stderr []byte) { continue } - // The format of log lines is "DATE pipe REST". - parts := bytes.SplitN(line, []byte{' '}, 3) - if len(parts) < 3 { + // The format of log lines is "DATE pipe LogTag REST". + parts := bytes.SplitN(line, []byte{' '}, 4) + if len(parts) < 4 { // Ignore the line if it's formatted incorrectly, but complain // about it so it can be debugged. logrus.Warnf("hit invalid log format: %q", string(line)) @@ -330,7 +369,15 @@ func parseLog(log []byte) (stdout, stderr []byte) { } pipe := string(parts[1]) - content := parts[2] + content := parts[3] + + linetype := string(parts[2]) + if linetype == "P" { + contentLen := len(content) + if content[contentLen-1] == '\n' { + content = content[:contentLen-1] + } + } switch pipe { case "stdout": @@ -376,15 +423,6 @@ func (r *Runtime) ExecSync(c *Container, command []string, timeout int64) (resp os.RemoveAll(logPath) }() - f, err := ioutil.TempFile("", "exec-process") - if err != nil { - return nil, ExecSyncError{ - ExitCode: -1, - Err: err, - } - } - defer os.RemoveAll(f.Name()) - var args []string args = append(args, "-c", c.id) args = append(args, "-r", r.Path(c)) @@ -398,28 +436,18 @@ func (r *Runtime) ExecSync(c *Container, command []string, timeout int64) (resp args = append(args, fmt.Sprintf("%d", timeout)) } args = append(args, "-l", logPath) + args = append(args, "--socket-dir-path", ContainerAttachSocketDir) - pspec := rspec.Process{ - Env: r.conmonEnv, - Args: command, - Cwd: "/", - } - processJSON, err := json.Marshal(pspec) + processFile, err := PrepareProcessExec(c, command, false) if err != nil { return nil, ExecSyncError{ ExitCode: -1, Err: err, } } + defer os.RemoveAll(processFile.Name()) - if err := ioutil.WriteFile(f.Name(), processJSON, 0644); err != nil { - return nil, ExecSyncError{ - ExitCode: -1, - Err: err, - } - } - - args = append(args, "--exec-process-spec", f.Name()) + args = append(args, "--exec-process-spec", processFile.Name()) cmd := exec.Command(r.conmonPath, args...) @@ -511,25 +539,26 @@ func (r *Runtime) ExecSync(c *Container, command []string, timeout int64) (resp }, nil } -// StopContainer stops a container. Timeout is given in seconds. -func (r *Runtime) StopContainer(c *Container, timeout int64) error { - c.opLock.Lock() - defer c.opLock.Unlock() +// UpdateContainer updates container resources +func (r *Runtime) UpdateContainer(c *Container, res *rspec.LinuxResources) error { + cmd := exec.Command(r.Path(c), "update", "--resources", "-", c.id) + var stdout bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + jsonResources, err := json.Marshal(res) + if err != nil { + return err + } + cmd.Stdin = bytes.NewReader(jsonResources) - // Check if the process is around before sending a signal - err := unix.Kill(c.state.Pid, 0) - if err == unix.ESRCH { - c.state.Finished = time.Now() - return nil + if err := cmd.Run(); err != nil { + return fmt.Errorf("updating resources for container %q failed: %v %v (%v)", c.id, stderr.String(), stdout.String(), err) } + return nil +} - if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, r.Path(c), "kill", c.id, c.GetStopSignal()); err != nil { - return fmt.Errorf("failed to stop container %s, %v", c.id, err) - } - if timeout == -1 { - // default 10 seconds delay - timeout = 10 - } +func waitContainerStop(ctx context.Context, c *Container, timeout time.Duration) error { done := make(chan struct{}) // we could potentially re-use "done" channel to exit the loop on timeout // but we use another channel "chControl" so that we won't never incur in the @@ -557,7 +586,10 @@ func (r *Runtime) StopContainer(c *Container, timeout int64) error { select { case <-done: return nil - case <-time.After(time.Duration(timeout) * time.Second): + case <-ctx.Done(): + close(chControl) + return ctx.Err() + case <-time.After(timeout): close(chControl) err := unix.Kill(c.state.Pid, unix.SIGKILL) if err != nil && err != unix.ESRCH { @@ -566,10 +598,39 @@ func (r *Runtime) StopContainer(c *Container, timeout int64) error { } c.state.Finished = time.Now() - return nil } +// StopContainer stops a container. Timeout is given in seconds. +func (r *Runtime) StopContainer(ctx context.Context, c *Container, timeout int64) error { + c.opLock.Lock() + defer c.opLock.Unlock() + + // Check if the process is around before sending a signal + err := unix.Kill(c.state.Pid, 0) + if err == unix.ESRCH { + c.state.Finished = time.Now() + return nil + } + + if timeout > 0 { + if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, r.Path(c), "kill", c.id, c.GetStopSignal()); err != nil { + return fmt.Errorf("failed to stop container %s, %v", c.id, err) + } + err = waitContainerStop(ctx, c, time.Duration(timeout)*time.Second) + if err == nil { + return nil + } + logrus.Warnf("Stop container %q timed out: %v", c.ID(), err) + } + + if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, r.Path(c), "kill", "--all", c.id, "KILL"); err != nil { + return fmt.Errorf("failed to stop container %s, %v", c.id, err) + } + + return waitContainerStop(ctx, c, killContainerTimeout) +} + // DeleteContainer deletes a container. func (r *Runtime) DeleteContainer(c *Container) error { c.opLock.Lock() @@ -591,7 +652,7 @@ func (r *Runtime) SetStartFailed(c *Container, err error) { func (r *Runtime) UpdateStatus(c *Container) error { c.opLock.Lock() defer c.opLock.Unlock() - out, err := exec.Command(r.Path(c), "state", c.id).CombinedOutput() + out, err := exec.Command(r.Path(c), "state", c.id).Output() if err != nil { // there are many code paths that could lead to have a bad state in the // underlying runtime. @@ -678,3 +739,43 @@ func (r *Runtime) RuntimeReady() (bool, error) { func (r *Runtime) NetworkReady() (bool, error) { return true, nil } + +// PauseContainer pauses a container. +func (r *Runtime) PauseContainer(c *Container) error { + c.opLock.Lock() + defer c.opLock.Unlock() + _, err := utils.ExecCmd(r.Path(c), "pause", c.id) + return err +} + +// UnpauseContainer unpauses a container. +func (r *Runtime) UnpauseContainer(c *Container) error { + c.opLock.Lock() + defer c.opLock.Unlock() + _, err := utils.ExecCmd(r.Path(c), "resume", c.id) + return err +} + +// PrepareProcessExec returns the path of the process.json used in runc exec -p +// caller is responsible to close the returned *os.File if needed. +func PrepareProcessExec(c *Container, cmd []string, tty bool) (*os.File, error) { + f, err := ioutil.TempFile("", "exec-process-") + if err != nil { + return nil, err + } + + pspec := c.Spec().Process + pspec.Args = cmd + if tty { + pspec.Terminal = true + } + processJSON, err := json.Marshal(pspec) + if err != nil { + return nil, err + } + + if err := ioutil.WriteFile(f.Name(), processJSON, 0644); err != nil { + return nil, err + } + return f, nil +} diff --git a/pause/Makefile b/pause/Makefile index da24f7fe..f0951af7 100644 --- a/pause/Makefile +++ b/pause/Makefile @@ -5,9 +5,9 @@ override LIBS += override CFLAGS += -std=c99 -Os -Wall -Wextra -static pause: $(obj) - $(CC) -o $@ $^ $(CFLAGS) $(LIBS) - strip $@ + $(CC) -o ../bin/$@ $^ $(CFLAGS) $(LIBS) + strip ../bin/$@ .PHONY: clean clean: - rm -f $(obj) pause + rm -f $(obj) ../bin/pause diff --git a/pkg/annotations/annotations.go b/pkg/annotations/annotations.go index 80f943c2..9b5b1352 100644 --- a/pkg/annotations/annotations.go +++ b/pkg/annotations/annotations.go @@ -22,6 +22,9 @@ const ( // IP is the container ipv4 or ipv6 address IP = "io.kubernetes.cri-o.IP" + // SeccompProfilePath is the node seccomp profile path + SeccompProfilePath = "io.kubernetes.cri-o.SeccompProfilePath" + // Image is the container image ID annotation Image = "io.kubernetes.cri-o.Image" @@ -52,6 +55,9 @@ const ( // ResolvPath is the resolver configuration path annotation ResolvPath = "io.kubernetes.cri-o.ResolvPath" + // HostnamePath is the path to /etc/hostname to bind mount annotation + HostnamePath = "io.kubernetes.cri-o.HostnamePath" + // SandboxID is the sandbox ID annotation SandboxID = "io.kubernetes.cri-o.SandboxID" diff --git a/pkg/storage/image.go b/pkg/storage/image.go index 32ca1513..5994d952 100644 --- a/pkg/storage/image.go +++ b/pkg/storage/image.go @@ -2,29 +2,43 @@ package storage import ( "errors" - "fmt" "net" - "path/filepath" - "regexp" + "path" "strings" "github.com/containers/image/copy" "github.com/containers/image/docker/reference" "github.com/containers/image/image" + "github.com/containers/image/manifest" "github.com/containers/image/signature" istorage "github.com/containers/image/storage" "github.com/containers/image/transports/alltransports" "github.com/containers/image/types" "github.com/containers/storage" - distreference "github.com/docker/distribution/reference" + digest "github.com/opencontainers/go-digest" +) + +const ( + minimumTruncatedIDLength = 3 +) + +var ( + // ErrCannotParseImageID is returned when we try to ResolveNames for an image ID + ErrCannotParseImageID = errors.New("cannot parse an image ID") + // ErrImageMultiplyTagged is returned when we try to remove an image that still has multiple names + ErrImageMultiplyTagged = errors.New("image still has multiple names applied") ) // ImageResult wraps a subset of information about an image: its ID, its names, // and the size, if known, or nil if it isn't. type ImageResult struct { - ID string - Names []string - Size *uint64 + ID string + Name string + RepoTags []string + RepoDigests []string + Size *uint64 + Digest digest.Digest + ConfigDigest digest.Digest } type indexInfo struct { @@ -40,6 +54,11 @@ type imageService struct { registries []string } +// sizer knows its size. +type sizer interface { + Size() (int64, error) +} + // ImageServer wraps up various CRI-related activities into a reusable // implementation. type ImageServer interface { @@ -47,8 +66,14 @@ type ImageServer interface { ListImages(systemContext *types.SystemContext, filter string) ([]ImageResult, error) // ImageStatus returns status of an image which matches the filter. ImageStatus(systemContext *types.SystemContext, filter string) (*ImageResult, error) + // PrepareImage returns an Image where the config digest can be grabbed + // for further analysis. Call Close() on the resulting image. + PrepareImage(systemContext *types.SystemContext, imageName string, options *copy.Options) (types.Image, error) // PullImage imports an image from the specified location. PullImage(systemContext *types.SystemContext, imageName string, options *copy.Options) (types.ImageReference, error) + // UntagImage removes a name from the specified image, and if it was + // the only name the image had, removes the image. + UntagImage(systemContext *types.SystemContext, imageName string) error // RemoveImage deletes the specified image. RemoveImage(systemContext *types.SystemContext, imageName string) error // GetStore returns the reference to the storage library Store which @@ -78,6 +103,66 @@ func (svc *imageService) getRef(name string) (types.ImageReference, error) { return ref, nil } +func sortNamesByType(names []string) (bestName string, tags, digests []string) { + for _, name := range names { + if len(name) > 72 && name[len(name)-72:len(name)-64] == "@sha256:" { + digests = append(digests, name) + } else { + tags = append(tags, name) + } + } + if len(digests) > 0 { + bestName = digests[0] + } + if len(tags) > 0 { + bestName = tags[0] + } + return bestName, tags, digests +} + +func (svc *imageService) makeRepoDigests(knownRepoDigests, tags []string, imageID string) (imageDigest digest.Digest, repoDigests []string) { + // Look up the image's digest. + img, err := svc.store.Image(imageID) + if err != nil { + return "", knownRepoDigests + } + imageDigest = img.Digest + if imageDigest == "" { + imgDigest, err := svc.store.ImageBigDataDigest(imageID, storage.ImageDigestBigDataKey) + if err != nil || imgDigest == "" { + return "", knownRepoDigests + } + imageDigest = imgDigest + } + // If there are no names to convert to canonical references, we're done. + if len(tags) == 0 { + return imageDigest, knownRepoDigests + } + // We only want to supplement what's already explicitly in the list, so keep track of values + // that we already know. + digestMap := make(map[string]struct{}) + repoDigests = knownRepoDigests + for _, repoDigest := range knownRepoDigests { + digestMap[repoDigest] = struct{}{} + } + // For each tagged name, parse the name, and if we can extract a named reference, convert + // it into a canonical reference using the digest and add it to the list. + for _, tag := range tags { + if ref, err2 := reference.ParseAnyReference(tag); err2 == nil { + if name, ok := ref.(reference.Named); ok { + trimmed := reference.TrimNamed(name) + if imageRef, err3 := reference.WithDigest(trimmed, imageDigest); err3 == nil { + if _, ok := digestMap[imageRef.String()]; !ok { + repoDigests = append(repoDigests, imageRef.String()) + digestMap[imageRef.String()] = struct{}{} + } + } + } + } + } + return imageDigest, repoDigests +} + func (svc *imageService) ListImages(systemContext *types.SystemContext, filter string) ([]ImageResult, error) { results := []ImageResult{} if filter != "" { @@ -86,16 +171,26 @@ func (svc *imageService) ListImages(systemContext *types.SystemContext, filter s return nil, err } if image, err := istorage.Transport.GetStoreImage(svc.store, ref); err == nil { - img, err := ref.NewImage(systemContext) + img, err := ref.NewImageSource(systemContext) if err != nil { return nil, err } size := imageSize(img) + configDigest, err := imageConfigDigest(img, nil) img.Close() + if err != nil { + return nil, err + } + name, tags, digests := sortNamesByType(image.Names) + imageDigest, repoDigests := svc.makeRepoDigests(digests, tags, image.ID) results = append(results, ImageResult{ - ID: image.ID, - Names: image.Names, - Size: size, + ID: image.ID, + Name: name, + RepoTags: tags, + RepoDigests: repoDigests, + Size: size, + Digest: imageDigest, + ConfigDigest: configDigest, }) } } else { @@ -108,16 +203,26 @@ func (svc *imageService) ListImages(systemContext *types.SystemContext, filter s if err != nil { return nil, err } - img, err := ref.NewImage(systemContext) + img, err := ref.NewImageSource(systemContext) if err != nil { return nil, err } size := imageSize(img) + configDigest, err := imageConfigDigest(img, nil) img.Close() + if err != nil { + return nil, err + } + name, tags, digests := sortNamesByType(image.Names) + imageDigest, repoDigests := svc.makeRepoDigests(digests, tags, image.ID) results = append(results, ImageResult{ - ID: image.ID, - Names: image.Names, - Size: size, + ID: image.ID, + Name: name, + RepoTags: tags, + RepoDigests: repoDigests, + Size: size, + Digest: imageDigest, + ConfigDigest: configDigest, }) } } @@ -142,38 +247,68 @@ func (svc *imageService) ImageStatus(systemContext *types.SystemContext, nameOrI return nil, err } - img, err := ref.NewImage(systemContext) + img, err := ref.NewImageSource(systemContext) if err != nil { return nil, err } + defer img.Close() size := imageSize(img) - img.Close() + configDigest, err := imageConfigDigest(img, nil) + if err != nil { + return nil, err + } - return &ImageResult{ - ID: image.ID, - Names: image.Names, - Size: size, - }, nil + name, tags, digests := sortNamesByType(image.Names) + imageDigest, repoDigests := svc.makeRepoDigests(digests, tags, image.ID) + result := ImageResult{ + ID: image.ID, + Name: name, + RepoTags: tags, + RepoDigests: repoDigests, + Size: size, + Digest: imageDigest, + ConfigDigest: configDigest, + } + + return &result, nil } -func imageSize(img types.Image) *uint64 { - if sum, err := img.Size(); err == nil { - usum := uint64(sum) - return &usum +func imageSize(img types.ImageSource) *uint64 { + if s, ok := img.(sizer); ok { + if sum, err := s.Size(); err == nil { + usum := uint64(sum) + return &usum + } } return nil } +func imageConfigDigest(img types.ImageSource, instanceDigest *digest.Digest) (digest.Digest, error) { + manifestBytes, manifestType, err := img.GetManifest(instanceDigest) + if err != nil { + return "", err + } + imgManifest, err := manifest.FromBlob(manifestBytes, manifestType) + if err != nil { + return "", err + } + return imgManifest.ConfigInfo().Digest, nil +} + func (svc *imageService) CanPull(imageName string, options *copy.Options) (bool, error) { - srcRef, err := svc.prepareImage(imageName, options) + srcRef, err := svc.prepareReference(imageName, options) if err != nil { return false, err } - rawSource, err := srcRef.NewImageSource(options.SourceCtx, nil) + rawSource, err := srcRef.NewImageSource(options.SourceCtx) if err != nil { return false, err } - src, err := image.FromSource(rawSource) + sourceCtx := &types.SystemContext{} + if options.SourceCtx != nil { + sourceCtx = options.SourceCtx + } + src, err := image.FromSource(sourceCtx, rawSource) if err != nil { rawSource.Close() return false, err @@ -182,9 +317,9 @@ func (svc *imageService) CanPull(imageName string, options *copy.Options) (bool, return true, nil } -// prepareImage creates an image reference from an image string and set options +// prepareReference creates an image reference from an image string and set options // for the source context -func (svc *imageService) prepareImage(imageName string, options *copy.Options) (types.ImageReference, error) { +func (svc *imageService) prepareReference(imageName string, options *copy.Options) (types.ImageReference, error) { if imageName == "" { return nil, storage.ErrNotAnImage } @@ -212,6 +347,18 @@ func (svc *imageService) prepareImage(imageName string, options *copy.Options) ( return srcRef, nil } +func (svc *imageService) PrepareImage(systemContext *types.SystemContext, imageName string, options *copy.Options) (types.Image, error) { + if options == nil { + options = ©.Options{} + } + + srcRef, err := svc.prepareReference(imageName, options) + if err != nil { + return nil, err + } + return srcRef.NewImage(systemContext) +} + func (svc *imageService) PullImage(systemContext *types.SystemContext, imageName string, options *copy.Options) (types.ImageReference, error) { policy, err := signature.DefaultPolicy(systemContext) if err != nil { @@ -225,7 +372,7 @@ func (svc *imageService) PullImage(systemContext *types.SystemContext, imageName options = ©.Options{} } - srcRef, err := svc.prepareImage(imageName, options) + srcRef, err := svc.prepareReference(imageName, options) if err != nil { return nil, err } @@ -251,6 +398,57 @@ func (svc *imageService) PullImage(systemContext *types.SystemContext, imageName return destRef, nil } +func (svc *imageService) UntagImage(systemContext *types.SystemContext, nameOrID string) error { + ref, err := alltransports.ParseImageName(nameOrID) + if err != nil { + ref2, err2 := istorage.Transport.ParseStoreReference(svc.store, "@"+nameOrID) + if err2 != nil { + ref3, err3 := istorage.Transport.ParseStoreReference(svc.store, nameOrID) + if err3 != nil { + return err + } + ref2 = ref3 + } + ref = ref2 + } + + img, err := istorage.Transport.GetStoreImage(svc.store, ref) + if err != nil { + return err + } + + if !strings.HasPrefix(img.ID, nameOrID) { + namedRef, err := svc.prepareReference(nameOrID, ©.Options{}) + if err != nil { + return err + } + + name := nameOrID + if namedRef.DockerReference() != nil { + name = namedRef.DockerReference().Name() + if tagged, ok := namedRef.DockerReference().(reference.NamedTagged); ok { + name = name + ":" + tagged.Tag() + } + if canonical, ok := namedRef.DockerReference().(reference.Canonical); ok { + name = name + "@" + canonical.Digest().String() + } + } + + prunedNames := make([]string, 0, len(img.Names)) + for _, imgName := range img.Names { + if imgName != name && imgName != nameOrID { + prunedNames = append(prunedNames, imgName) + } + } + + if len(prunedNames) > 0 { + return svc.store.SetNames(img.ID, prunedNames) + } + } + + return ref.DeleteImage(systemContext) +} + func (svc *imageService) RemoveImage(systemContext *types.SystemContext, nameOrID string) error { ref, err := alltransports.ParseImageName(nameOrID) if err != nil { @@ -307,113 +505,35 @@ func (svc *imageService) isSecureIndex(indexName string) bool { return true } -func isValidHostname(hostname string) bool { - return hostname != "" && !strings.Contains(hostname, "/") && - (strings.Contains(hostname, ".") || - strings.Contains(hostname, ":") || hostname == "localhost") -} - -func isReferenceFullyQualified(reposName reference.Named) bool { - indexName, _, _ := splitReposName(reposName) - return indexName != "" -} - -const ( - // defaultHostname is the default built-in hostname - defaultHostname = "docker.io" - // legacyDefaultHostname is automatically converted to DefaultHostname - legacyDefaultHostname = "index.docker.io" - // defaultRepoPrefix is the prefix used for default repositories in default host - defaultRepoPrefix = "library/" -) - -// splitReposName breaks a reposName into an index name and remote name -func splitReposName(reposName reference.Named) (indexName string, remoteName reference.Named, err error) { - var remoteNameStr string - indexName, remoteNameStr = distreference.SplitHostname(reposName) - if !isValidHostname(indexName) { - // This is a Docker Index repos (ex: samalba/hipache or ubuntu) - // 'docker.io' - indexName = "" - remoteName = reposName - } else { - remoteName, err = withName(remoteNameStr) - } - return -} - -func validateName(name string) error { - if err := validateID(strings.TrimPrefix(name, defaultHostname+"/")); err == nil { - return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) - } - return nil -} - -var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) - -// validateID checks whether an ID string is a valid image ID. -func validateID(id string) error { - if ok := validHex.MatchString(id); !ok { - return fmt.Errorf("image ID %q is invalid", id) - } - return nil -} - -// withName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func withName(name string) (reference.Named, error) { - name, err := normalize(name) - if err != nil { - return nil, err - } - if err := validateName(name); err != nil { - return nil, err - } - r, err := distreference.WithName(name) - return r, err -} - -// splitHostname splits a repository name to hostname and remotename string. -// If no valid hostname is found, empty string will be returned as a resulting -// hostname. Repository name needs to be already validated before. -func splitHostname(name string) (hostname, remoteName string) { +func splitDockerDomain(name string) (domain, remainder string) { i := strings.IndexRune(name, '/') if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - hostname, remoteName = "", name + domain, remainder = "", name } else { - hostname, remoteName = name[:i], name[i+1:] - } - if hostname == legacyDefaultHostname { - hostname = defaultHostname - } - if hostname == defaultHostname && !strings.ContainsRune(remoteName, '/') { - remoteName = defaultRepoPrefix + remoteName + domain, remainder = name[:i], name[i+1:] } return } -// normalize returns a repository name in its normalized form, meaning it -// will contain library/ prefix for official images. -func normalize(name string) (string, error) { - host, remoteName := splitHostname(name) - if strings.ToLower(remoteName) != remoteName { - return "", errors.New("invalid reference format: repository name must be lowercase") - } - if host == defaultHostname { - if strings.HasPrefix(remoteName, defaultRepoPrefix) { - remoteName = strings.TrimPrefix(remoteName, defaultRepoPrefix) - } - return host + "/" + remoteName, nil - } - return name, nil -} - func (svc *imageService) ResolveNames(imageName string) ([]string, error) { - r, err := reference.ParseNormalizedNamed(imageName) + // _Maybe_ it's a truncated image ID. Don't prepend a registry name, then. + if len(imageName) >= minimumTruncatedIDLength && svc.store != nil { + if img, err := svc.store.Image(imageName); err == nil && img != nil && strings.HasPrefix(img.ID, imageName) { + // It's a truncated version of the ID of an image that's present in local storage; + // we need to expand it. + return []string{img.ID}, nil + } + } + // This to prevent any image ID to go through this routine + _, err := reference.ParseNormalizedNamed(imageName) if err != nil { + if strings.Contains(err.Error(), "cannot specify 64-byte hexadecimal strings") { + return nil, ErrCannotParseImageID + } return nil, err } - if isReferenceFullyQualified(r) { + domain, remainder := splitDockerDomain(imageName) + if domain != "" { // this means the image is already fully qualified return []string{imageName}, nil } @@ -425,10 +545,13 @@ func (svc *imageService) ResolveNames(imageName string) ([]string, error) { // this means we got an image in the form of "busybox" // we need to use additional registries... // normalize the unqualified image to be domain/repo/image... - _, rest := splitDomain(r.Name()) images := []string{} for _, r := range svc.registries { - images = append(images, filepath.Join(r, rest)) + rem := remainder + if r == "docker.io" && !strings.ContainsRune(remainder, '/') { + rem = "library/" + rem + } + images = append(images, path.Join(r, rem)) } return images, nil } diff --git a/pkg/storage/image_regexp.go b/pkg/storage/image_regexp.go deleted file mode 100644 index 96de6488..00000000 --- a/pkg/storage/image_regexp.go +++ /dev/null @@ -1,125 +0,0 @@ -package storage - -// This is a fork of docker/distribution code to be used when manipulating image -// references. -// DO NOT EDIT THIS FILE. - -import "regexp" - -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) - - // separatorRegexp defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) - - // domainComponentRegexp restricts the registry domain component of a - // repository name to start with a component as defined by domainRegexp - // and followed by an optional port. - domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - - // domainRegexp defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - domainRegexp = expression( - domainComponentRegexp, - optional(repeated(literal(`.`), domainComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) - - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the domain and name part omitting - // the separating forward slash from either. - NameRegexp = expression( - optional(domainRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) - - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = anchored( - optional(capture(domainRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) - - // IdentifierRegexp is the format for string identifier used as a - // content addressable identifier using sha256. These identifiers - // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = match(`([a-f0-9]{64})`) - - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) -) - -// match compiles the string to a regular expression. -var match = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re -} - -func splitDomain(name string) (string, string) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) -} - -// group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) -} - -// capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) -} diff --git a/pkg/storage/image_test.go b/pkg/storage/image_test.go new file mode 100644 index 00000000..82807206 --- /dev/null +++ b/pkg/storage/image_test.go @@ -0,0 +1,84 @@ +package storage + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestResolveNames(t *testing.T) { + cases := []struct { + name string + additionalRegistries []string + imageName string + expected []string + err bool + errContains string + }{ + { + name: "test unqualified images get correctly qualified in order and correct tag", + additionalRegistries: []string{"testregistry.com", "registry.access.redhat.com", "docker.io"}, + imageName: "openshift3/ose-deployer:sometag", + expected: []string{"testregistry.com/openshift3/ose-deployer:sometag", "registry.access.redhat.com/openshift3/ose-deployer:sometag", "docker.io/openshift3/ose-deployer:sometag"}, + err: false, + }, + { + name: "test unqualified images get correctly qualified in order and correct digest", + additionalRegistries: []string{"testregistry.com", "registry.access.redhat.com", "docker.io"}, + imageName: "openshift3/ose-deployer@sha256:dc5f67a48da730d67bf4bfb8824ea8a51be26711de090d6d5a1ffff2723168a3", + expected: []string{"testregistry.com/openshift3/ose-deployer@sha256:dc5f67a48da730d67bf4bfb8824ea8a51be26711de090d6d5a1ffff2723168a3", "registry.access.redhat.com/openshift3/ose-deployer@sha256:dc5f67a48da730d67bf4bfb8824ea8a51be26711de090d6d5a1ffff2723168a3", "docker.io/openshift3/ose-deployer@sha256:dc5f67a48da730d67bf4bfb8824ea8a51be26711de090d6d5a1ffff2723168a3"}, + err: false, + }, + { + name: "test unqualified images get correctly qualified in order", + additionalRegistries: []string{"testregistry.com", "registry.access.redhat.com", "docker.io"}, + imageName: "openshift3/ose-deployer:latest", + expected: []string{"testregistry.com/openshift3/ose-deployer:latest", "registry.access.redhat.com/openshift3/ose-deployer:latest", "docker.io/openshift3/ose-deployer:latest"}, + err: false, + }, + { + name: "test unqualified images get correctly qualified from official library", + additionalRegistries: []string{"testregistry.com", "registry.access.redhat.com", "docker.io"}, + imageName: "nginx:latest", + expected: []string{"testregistry.com/nginx:latest", "registry.access.redhat.com/nginx:latest", "docker.io/library/nginx:latest"}, + err: false, + }, + { + name: "test qualified images returns just qualified", + additionalRegistries: []string{"testregistry.com", "registry.access.redhat.com", "docker.io"}, + imageName: "mypersonalregistry.com/nginx:latest", + expected: []string{"mypersonalregistry.com/nginx:latest"}, + err: false, + }, + { + name: "test we don't have names w/o registries", + imageName: "openshift3/ose-deployer:latest", + err: true, + }, + { + name: "test we cannot resolve names from an image ID", + imageName: "6ad733544a6317992a6fac4eb19fe1df577d4dec7529efec28a5bd0edad0fd30", + err: true, + errContains: "cannot parse an image ID", + }, + } + for _, c := range cases { + svc := &imageService{ + registries: c.additionalRegistries, + } + names, err := svc.ResolveNames(c.imageName) + if !c.err { + require.NoError(t, err, c.name) + if !reflect.DeepEqual(names, c.expected) { + t.Fatalf("Exepected: %v, Got: %v: %q", c.expected, names, c.name) + } + } else { + require.Error(t, err, c.name) + if c.errContains != "" { + assert.Contains(t, err.Error(), c.errContains) + } + } + } +} diff --git a/seccomp.json b/seccomp.json index b9a4564d..19fadb4b 100644 --- a/seccomp.json +++ b/seccomp.json @@ -55,7 +55,7 @@ "accept", "accept4", "access", - "alarm", + "adjtimex", "alarm", "bind", "brk", @@ -223,10 +223,12 @@ "prctl", "pread64", "preadv", + "preadv2", "prlimit64", "pselect6", "pwrite64", "pwritev", + "pwritev2", "read", "readahead", "readlink", @@ -403,6 +405,40 @@ "includes": {}, "excludes": {} }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 131072, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 131080, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, { "names": [ "personality" @@ -422,6 +458,23 @@ }, { "names": [ + "sync_file_range2" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "ppc64le" + ] + }, + "excludes": {} + }, + { + "names": [ + "arm_fadvise64_64", + "arm_sync_file_range", + "sync_file_range2", "breakpoint", "cacheflush", "set_tls" @@ -508,6 +561,7 @@ "mount", "name_to_handle_at", "perf_event_open", + "quotactl", "setdomainname", "sethostname", "setns", @@ -671,7 +725,7 @@ "names": [ "settimeofday", "stime", - "adjtimex" + "clock_settime" ], "action": "SCMP_ACT_ALLOW", "args": [], @@ -698,4 +752,4 @@ "excludes": {} } ] -} \ No newline at end of file +} diff --git a/server/apparmor/apparmor_common.go b/server/apparmor/apparmor_common.go index 6366a66e..76c640b8 100644 --- a/server/apparmor/apparmor_common.go +++ b/server/apparmor/apparmor_common.go @@ -3,10 +3,6 @@ package apparmor const ( // DefaultApparmorProfile is the name of default apparmor profile name. DefaultApparmorProfile = "crio-default" - - // ContainerAnnotationKeyPrefix is the prefix to an annotation key specifying a container profile. - ContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/" - // ProfileRuntimeDefault is he profile specifying the runtime default. ProfileRuntimeDefault = "runtime/default" // ProfileNamePrefix is the prefix for specifying profiles loaded on the node. diff --git a/server/apparmor/apparmor_supported.go b/server/apparmor/apparmor_supported.go index d765c9de..49360470 100644 --- a/server/apparmor/apparmor_supported.go +++ b/server/apparmor/apparmor_supported.go @@ -11,7 +11,7 @@ import ( "path" "strings" - "github.com/docker/docker/utils/templates" + "github.com/docker/docker/pkg/templates" "github.com/opencontainers/runc/libcontainer/apparmor" ) @@ -34,7 +34,7 @@ type profileData struct { // EnsureDefaultApparmorProfile loads default apparmor profile, if it is not loaded. func EnsureDefaultApparmorProfile() error { - if apparmor.IsEnabled() { + if IsEnabled() { loaded, err := IsLoaded(DefaultApparmorProfile) if err != nil { return fmt.Errorf("Could not check if %s AppArmor profile was loaded: %s", DefaultApparmorProfile, err) @@ -59,12 +59,6 @@ func IsEnabled() bool { return apparmor.IsEnabled() } -// GetProfileNameFromPodAnnotations gets the name of the profile to use with container from -// pod annotations -func GetProfileNameFromPodAnnotations(annotations map[string]string, containerName string) string { - return annotations[ContainerAnnotationKeyPrefix+containerName] -} - // InstallDefault generates a default profile in a temp directory determined by // os.TempDir(), then loads the profile into the kernel using 'apparmor_parser'. func InstallDefault(name string) error { diff --git a/server/apparmor/apparmor_unsupported.go b/server/apparmor/apparmor_unsupported.go index fbd1d87a..20cd15d2 100644 --- a/server/apparmor/apparmor_unsupported.go +++ b/server/apparmor/apparmor_unsupported.go @@ -11,8 +11,3 @@ func IsEnabled() bool { func EnsureDefaultApparmorProfile() error { return nil } - -// GetProfileNameFromPodAnnotations dose nothing, when build without apparmor build tag. -func GetProfileNameFromPodAnnotations(annotations map[string]string, containerName string) string { - return "" -} diff --git a/server/config.go b/server/config.go index 6c2d26cd..541bfdc0 100644 --- a/server/config.go +++ b/server/config.go @@ -5,7 +5,7 @@ import ( "io/ioutil" "github.com/BurntSushi/toml" - "github.com/kubernetes-incubator/cri-o/libkpod" + "github.com/kubernetes-incubator/cri-o/lib" ) //CrioConfigPath is the default location for the conf file @@ -14,7 +14,7 @@ const CrioConfigPath = "/etc/crio/crio.conf" // Config represents the entire set of configuration values that can be set for // the server. This is intended to be loaded from a toml-encoded config file. type Config struct { - libkpod.Config + lib.Config APIConfig } @@ -37,11 +37,11 @@ type APIConfig struct { // conversions. type tomlConfig struct { Crio struct { - libkpod.RootConfig - API struct{ APIConfig } `toml:"api"` - Runtime struct{ libkpod.RuntimeConfig } `toml:"runtime"` - Image struct{ libkpod.ImageConfig } `toml:"image"` - Network struct{ libkpod.NetworkConfig } `toml:"network"` + lib.RootConfig + API struct{ APIConfig } `toml:"api"` + Runtime struct{ lib.RuntimeConfig } `toml:"runtime"` + Image struct{ lib.ImageConfig } `toml:"image"` + Network struct{ lib.NetworkConfig } `toml:"network"` } `toml:"crio"` } @@ -102,9 +102,9 @@ func (c *Config) ToFile(path string) error { // DefaultConfig returns the default configuration for crio. func DefaultConfig() *Config { return &Config{ - Config: *libkpod.DefaultConfig(), + Config: *lib.DefaultConfig(), APIConfig: APIConfig{ - Listen: "/var/run/crio.sock", + Listen: "/var/run/crio/crio.sock", StreamAddress: "", StreamPort: "10010", }, diff --git a/server/config_test.go b/server/config_test.go new file mode 100644 index 00000000..9d8ddf04 --- /dev/null +++ b/server/config_test.go @@ -0,0 +1,89 @@ +package server + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/kubernetes-incubator/cri-o/lib" +) + +const fixturePath = "fixtures/crio.conf" + +func must(t *testing.T, err error) { + if err != nil { + t.Error(err) + } +} + +func assertAllFieldsEquality(t *testing.T, c Config) { + testCases := []struct { + fieldValue, expected interface{} + }{ + {c.RootConfig.Root, "/var/lib/containers/storage"}, + {c.RootConfig.RunRoot, "/var/run/containers/storage"}, + {c.RootConfig.Storage, "overlay"}, + {c.RootConfig.StorageOptions[0], "overlay.override_kernel_check=1"}, + + {c.APIConfig.Listen, "/var/run/crio.sock"}, + {c.APIConfig.StreamPort, "10010"}, + {c.APIConfig.StreamAddress, "localhost"}, + + {c.RuntimeConfig.Runtime, "/usr/local/bin/runc"}, + {c.RuntimeConfig.RuntimeUntrustedWorkload, "untrusted"}, + {c.RuntimeConfig.DefaultWorkloadTrust, "trusted"}, + {c.RuntimeConfig.Conmon, "/usr/local/libexec/crio/conmon"}, + {c.RuntimeConfig.ConmonEnv[0], "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + {c.RuntimeConfig.SELinux, true}, + {c.RuntimeConfig.SeccompProfile, "/etc/crio/seccomp.json"}, + {c.RuntimeConfig.ApparmorProfile, "crio-default"}, + {c.RuntimeConfig.CgroupManager, "cgroupfs"}, + {c.RuntimeConfig.PidsLimit, int64(1024)}, + + {c.ImageConfig.DefaultTransport, "docker://"}, + {c.ImageConfig.PauseImage, "kubernetes/pause"}, + {c.ImageConfig.PauseCommand, "/pause"}, + {c.ImageConfig.SignaturePolicyPath, "/tmp"}, + {c.ImageConfig.ImageVolumes, lib.ImageVolumesType("mkdir")}, + {c.ImageConfig.InsecureRegistries[0], "insecure-registry:1234"}, + {c.ImageConfig.Registries[0], "registry:4321"}, + + {c.NetworkConfig.NetworkDir, "/etc/cni/net.d/"}, + {c.NetworkConfig.PluginDir, "/opt/cni/bin/"}, + } + for _, tc := range testCases { + if tc.fieldValue != tc.expected { + t.Errorf(`Expecting: "%s", got: "%s"`, tc.expected, tc.fieldValue) + } + } +} + +func TestUpdateFromFile(t *testing.T) { + c := Config{} + + must(t, c.UpdateFromFile(fixturePath)) + + assertAllFieldsEquality(t, c) +} + +func TestToFile(t *testing.T) { + configFromFixture := Config{} + + must(t, configFromFixture.UpdateFromFile(fixturePath)) + + f, err := ioutil.TempFile("", "crio.conf") + if err != nil { + t.Error(err) + } + defer os.Remove(f.Name()) + + must(t, configFromFixture.ToFile(f.Name())) + + writtenConfig := Config{} + err = writtenConfig.UpdateFromFile(f.Name()) + if err != nil { + t.Fatal(err) + } + + assertAllFieldsEquality(t, writtenConfig) +} diff --git a/server/container_attach.go b/server/container_attach.go index 2d2fe203..ec9bedab 100644 --- a/server/container_attach.go +++ b/server/container_attach.go @@ -6,6 +6,7 @@ import ( "net" "os" "path/filepath" + "time" "github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/utils" @@ -25,10 +26,15 @@ const ( ) // Attach prepares a streaming endpoint to attach to a running container. -func (s *Server) Attach(ctx context.Context, req *pb.AttachRequest) (*pb.AttachResponse, error) { +func (s *Server) Attach(ctx context.Context, req *pb.AttachRequest) (resp *pb.AttachResponse, err error) { + const operation = "attach" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() logrus.Debugf("AttachRequest %+v", req) - resp, err := s.GetAttach(req) + resp, err = s.GetAttach(req) if err != nil { return nil, fmt.Errorf("unable to prepare attach endpoint") } @@ -67,7 +73,7 @@ func (ss streamService) Attach(containerID string, inputStream io.Reader, output } }) - attachSocketPath := filepath.Join("/var/run/crio", c.ID(), "attach") + attachSocketPath := filepath.Join(oci.ContainerAttachSocketDir, c.ID(), "attach") conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: attachSocketPath, Net: "unixpacket"}) if err != nil { return fmt.Errorf("failed to connect to container %s attach socket: %v", c.ID(), err) diff --git a/server/container_create.go b/server/container_create.go index c90f2baa..a4652cf3 100644 --- a/server/container_create.go +++ b/server/container_create.go @@ -5,18 +5,20 @@ import ( "errors" "fmt" "io" + "io/ioutil" "os" "path/filepath" "regexp" + "sort" "strconv" "strings" "time" - "github.com/docker/distribution/reference" + dockermounts "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/symlink" - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "github.com/kubernetes-incubator/cri-o/lib" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/pkg/annotations" "github.com/kubernetes-incubator/cri-o/pkg/storage" @@ -38,6 +40,7 @@ import ( const ( seccompUnconfined = "unconfined" seccompRuntimeDefault = "runtime/default" + seccompDockerDefault = "docker/default" seccompLocalhostPrefix = "localhost/" scopePrefix = "crio" @@ -45,36 +48,96 @@ const ( defaultSystemdParent = "system.slice" ) -func addOCIBindMounts(sb *sandbox.Sandbox, containerConfig *pb.ContainerConfig, specgen *generate.Generator) ([]oci.ContainerVolume, error) { +type orderedMounts []rspec.Mount + +// Len returns the number of mounts. Used in sorting. +func (m orderedMounts) Len() int { + return len(m) +} + +// Less returns true if the number of parts (a/b/c would be 3 parts) in the +// mount indexed by parameter 1 is less than that of the mount indexed by +// parameter 2. Used in sorting. +func (m orderedMounts) Less(i, j int) bool { + return m.parts(i) < m.parts(j) +} + +// Swap swaps two items in an array of mounts. Used in sorting +func (m orderedMounts) Swap(i, j int) { + m[i], m[j] = m[j], m[i] +} + +// parts returns the number of parts in the destination of a mount. Used in sorting. +func (m orderedMounts) parts(i int) int { + return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) +} + +func addOCIBindMounts(mountLabel string, containerConfig *pb.ContainerConfig, specgen *generate.Generator) ([]oci.ContainerVolume, []rspec.Mount, error) { volumes := []oci.ContainerVolume{} + ociMounts := []rspec.Mount{} mounts := containerConfig.GetMounts() for _, mount := range mounts { dest := mount.ContainerPath if dest == "" { - return nil, fmt.Errorf("Mount.ContainerPath is empty") + return nil, nil, fmt.Errorf("Mount.ContainerPath is empty") } src := mount.HostPath if src == "" { - return nil, fmt.Errorf("Mount.HostPath is empty") + return nil, nil, fmt.Errorf("Mount.HostPath is empty") } if _, err := os.Stat(src); err != nil && os.IsNotExist(err) { if err1 := os.MkdirAll(src, 0644); err1 != nil { - return nil, fmt.Errorf("Failed to mkdir %s: %s", src, err) + return nil, nil, fmt.Errorf("Failed to mkdir %s: %s", src, err) } } + src, err := resolveSymbolicLink(src) + if err != nil { + return nil, nil, fmt.Errorf("failed to resolve symlink %q: %v", src, err) + } + options := []string{"rw"} if mount.Readonly { options = []string{"ro"} } - options = append(options, []string{"rbind", "rprivate"}...) + options = append(options, "rbind") + + // mount propagation + mountInfos, err := dockermounts.GetMounts() + if err != nil { + return nil, nil, err + } + switch mount.GetPropagation() { + case pb.MountPropagation_PROPAGATION_PRIVATE: + options = append(options, "rprivate") + // Since default root propagation in runc is rprivate ignore + // setting the root propagation + case pb.MountPropagation_PROPAGATION_BIDIRECTIONAL: + if err := ensureShared(src, mountInfos); err != nil { + return nil, nil, err + } + options = append(options, "rshared") + specgen.SetLinuxRootPropagation("rshared") + case pb.MountPropagation_PROPAGATION_HOST_TO_CONTAINER: + if err := ensureSharedOrSlave(src, mountInfos); err != nil { + return nil, nil, err + } + options = append(options, "rslave") + if specgen.Spec().Linux.RootfsPropagation != "rshared" && + specgen.Spec().Linux.RootfsPropagation != "rslave" { + specgen.SetLinuxRootPropagation("rslave") + } + default: + logrus.Warnf("Unknown propagation mode for hostPath %q", mount.HostPath) + options = append(options, "rprivate") + } if mount.SelinuxRelabel { // Need a way in kubernetes to determine if the volume is shared or private - if err := label.Relabel(src, sb.MountLabel(), true); err != nil && err != unix.ENOTSUP { - return nil, fmt.Errorf("relabel failed %s: %v", src, err) + if err := label.Relabel(src, mountLabel, true); err != nil && err != unix.ENOTSUP { + return nil, nil, fmt.Errorf("relabel failed %s: %v", src, err) } } @@ -84,45 +147,123 @@ func addOCIBindMounts(sb *sandbox.Sandbox, containerConfig *pb.ContainerConfig, Readonly: mount.Readonly, }) - specgen.AddBindMount(src, dest, options) + ociMounts = append(ociMounts, rspec.Mount{ + Source: src, + Destination: dest, + Options: options, + }) } - return volumes, nil + return volumes, ociMounts, nil } -func addImageVolumes(rootfs string, s *Server, containerInfo *storage.ContainerInfo, specgen *generate.Generator, mountLabel string) error { +// Ensure mount point on which path is mounted, is shared. +func ensureShared(path string, mountInfos []*dockermounts.Info) error { + sourceMount, optionalOpts, err := getSourceMount(path, mountInfos) + if err != nil { + return err + } + + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + return nil + } + } + + return fmt.Errorf("path %q is mounted on %q but it is not a shared mount", path, sourceMount) +} + +// Ensure mount point on which path is mounted, is either shared or slave. +func ensureSharedOrSlave(path string, mountInfos []*dockermounts.Info) error { + sourceMount, optionalOpts, err := getSourceMount(path, mountInfos) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + return nil + } else if strings.HasPrefix(opt, "master:") { + return nil + } + } + return fmt.Errorf("path %q is mounted on %q but it is not a shared or slave mount", path, sourceMount) +} + +func getMountInfo(mountInfos []*dockermounts.Info, dir string) *dockermounts.Info { + for _, m := range mountInfos { + if m.Mountpoint == dir { + return m + } + } + return nil +} + +func getSourceMount(source string, mountInfos []*dockermounts.Info) (string, string, error) { + mountinfo := getMountInfo(mountInfos, source) + if mountinfo != nil { + return source, mountinfo.Optional, nil + } + + path := source + for { + path = filepath.Dir(path) + mountinfo = getMountInfo(mountInfos, path) + if mountinfo != nil { + return path, mountinfo.Optional, nil + } + + if path == "/" { + break + } + } + + // If we are here, we did not find parent mount. Something is wrong. + return "", "", fmt.Errorf("Could not find source mount of %s", source) +} + +func addImageVolumes(rootfs string, s *Server, containerInfo *storage.ContainerInfo, specgen *generate.Generator, mountLabel string) ([]rspec.Mount, error) { + mounts := []rspec.Mount{} for dest := range containerInfo.Config.Config.Volumes { fp, err := symlink.FollowSymlinkInScope(filepath.Join(rootfs, dest), rootfs) if err != nil { - return err + return nil, err } switch s.config.ImageVolumes { - case libkpod.ImageVolumesMkdir: + case lib.ImageVolumesMkdir: if err1 := os.MkdirAll(fp, 0644); err1 != nil { - return err1 + return nil, err1 } - case libkpod.ImageVolumesBind: + case lib.ImageVolumesBind: volumeDirName := stringid.GenerateNonCryptoID() src := filepath.Join(containerInfo.RunDir, "mounts", volumeDirName) if err1 := os.MkdirAll(src, 0644); err1 != nil { - return err1 + return nil, err1 } // Label the source with the sandbox selinux mount label if mountLabel != "" { if err1 := label.Relabel(src, mountLabel, true); err1 != nil && err1 != unix.ENOTSUP { - return fmt.Errorf("relabel failed %s: %v", src, err1) + return nil, fmt.Errorf("relabel failed %s: %v", src, err1) } } logrus.Debugf("Adding bind mounted volume: %s to %s", src, dest) - specgen.AddBindMount(src, dest, []string{"rw"}) - case libkpod.ImageVolumesIgnore: + mounts = append(mounts, rspec.Mount{ + Source: src, + Destination: dest, + Options: []string{"rw"}, + }) + + case lib.ImageVolumesIgnore: logrus.Debugf("Ignoring volume %v", dest) default: logrus.Fatalf("Unrecognized image volumes setting") } } - return nil + return mounts, nil } // resolveSymbolicLink resolves a possbile symlink path. If the path is a symlink, returns resolved @@ -140,7 +281,7 @@ func resolveSymbolicLink(path string) (string, error) { func addDevices(sb *sandbox.Sandbox, containerConfig *pb.ContainerConfig, specgen *generate.Generator) error { sp := specgen.Spec() - if containerConfig.GetLinux().GetSecurityContext().Privileged { + if containerConfig.GetLinux().GetSecurityContext().GetPrivileged() { hostDevices, err := devices.HostDevices() if err != nil { return err @@ -282,18 +423,21 @@ func buildOCIProcessArgs(containerKubeConfig *pb.ContainerConfig, imageOCIConfig } // addOCIHook look for hooks programs installed in hooksDirPath and add them to spec -func addOCIHook(specgen *generate.Generator, hook libkpod.HookParams) error { +func addOCIHook(specgen *generate.Generator, hook lib.HookParams) error { logrus.Debugf("AddOCIHook", hook) for _, stage := range hook.Stage { + h := rspec.Hook{ + Path: hook.Hook, + Args: append([]string{hook.Hook}, hook.Arguments...), + Env: []string{fmt.Sprintf("stage=%s", stage)}, + } switch stage { case "prestart": - specgen.AddPreStartHook(hook.Hook, []string{hook.Hook, "prestart"}) - + specgen.AddPreStartHook(h) case "poststart": - specgen.AddPostStartHook(hook.Hook, []string{hook.Hook, "poststart"}) - + specgen.AddPostStartHook(h) case "poststop": - specgen.AddPostStopHook(hook.Hook, []string{hook.Hook, "poststop"}) + specgen.AddPostStopHook(h) } } return nil @@ -304,11 +448,11 @@ func setupContainerUser(specgen *generate.Generator, rootfs string, sc *pb.Linux if sc != nil { containerUser := "" // Case 1: run as user is set by kubelet - if sc.RunAsUser != nil { + if sc.GetRunAsUser() != nil { containerUser = strconv.FormatInt(sc.GetRunAsUser().Value, 10) } else { // Case 2: run as username is set by kubelet - userName := sc.RunAsUsername + userName := sc.GetRunAsUsername() if userName != "" { containerUser = userName } else { @@ -338,7 +482,7 @@ func setupContainerUser(specgen *generate.Generator, rootfs string, sc *pb.Linux } // Add groups from CRI - groups := sc.SupplementalGroups + groups := sc.GetSupplementalGroups() for _, group := range groups { specgen.AddProcessAdditionalGid(uint32(group)) } @@ -346,6 +490,110 @@ func setupContainerUser(specgen *generate.Generator, rootfs string, sc *pb.Linux return nil } +// setupCapabilities sets process.capabilities in the OCI runtime config. +func setupCapabilities(specgen *generate.Generator, capabilities *pb.Capability) error { + if capabilities == nil { + return nil + } + + toCAPPrefixed := func(cap string) string { + if !strings.HasPrefix(strings.ToLower(cap), "cap_") { + return "CAP_" + strings.ToUpper(cap) + } + return cap + } + + // Add/drop all capabilities if "all" is specified, so that + // following individual add/drop could still work. E.g. + // AddCapabilities: []string{"ALL"}, DropCapabilities: []string{"CHOWN"} + // will be all capabilities without `CAP_CHOWN`. + // see https://github.com/kubernetes/kubernetes/issues/51980 + if inStringSlice(capabilities.GetAddCapabilities(), "ALL") { + for _, c := range getOCICapabilitiesList() { + if err := specgen.AddProcessCapabilityAmbient(c); err != nil { + return err + } + if err := specgen.AddProcessCapabilityBounding(c); err != nil { + return err + } + if err := specgen.AddProcessCapabilityEffective(c); err != nil { + return err + } + if err := specgen.AddProcessCapabilityInheritable(c); err != nil { + return err + } + if err := specgen.AddProcessCapabilityPermitted(c); err != nil { + return err + } + } + } + if inStringSlice(capabilities.GetDropCapabilities(), "ALL") { + for _, c := range getOCICapabilitiesList() { + if err := specgen.DropProcessCapabilityAmbient(c); err != nil { + return err + } + if err := specgen.DropProcessCapabilityBounding(c); err != nil { + return err + } + if err := specgen.DropProcessCapabilityEffective(c); err != nil { + return err + } + if err := specgen.DropProcessCapabilityInheritable(c); err != nil { + return err + } + if err := specgen.DropProcessCapabilityPermitted(c); err != nil { + return err + } + } + } + + for _, cap := range capabilities.GetAddCapabilities() { + if strings.ToUpper(cap) == "ALL" { + continue + } + capPrefixed := toCAPPrefixed(cap) + if err := specgen.AddProcessCapabilityAmbient(capPrefixed); err != nil { + return err + } + if err := specgen.AddProcessCapabilityBounding(capPrefixed); err != nil { + return err + } + if err := specgen.AddProcessCapabilityEffective(capPrefixed); err != nil { + return err + } + if err := specgen.AddProcessCapabilityInheritable(capPrefixed); err != nil { + return err + } + if err := specgen.AddProcessCapabilityPermitted(capPrefixed); err != nil { + return err + } + } + + for _, cap := range capabilities.GetDropCapabilities() { + if strings.ToUpper(cap) == "ALL" { + continue + } + capPrefixed := toCAPPrefixed(cap) + if err := specgen.DropProcessCapabilityAmbient(capPrefixed); err != nil { + return fmt.Errorf("failed to drop cap %s %v", capPrefixed, err) + } + if err := specgen.DropProcessCapabilityBounding(capPrefixed); err != nil { + return fmt.Errorf("failed to drop cap %s %v", capPrefixed, err) + } + if err := specgen.DropProcessCapabilityEffective(capPrefixed); err != nil { + return fmt.Errorf("failed to drop cap %s %v", capPrefixed, err) + } + if err := specgen.DropProcessCapabilityInheritable(capPrefixed); err != nil { + return fmt.Errorf("failed to drop cap %s %v", capPrefixed, err) + } + if err := specgen.DropProcessCapabilityPermitted(capPrefixed); err != nil { + return fmt.Errorf("failed to drop cap %s %v", capPrefixed, err) + } + } + + return nil +} + func hostNetwork(containerConfig *pb.ContainerConfig) bool { securityContext := containerConfig.GetLinux().GetSecurityContext() if securityContext == nil || securityContext.GetNamespaceOptions() == nil { @@ -378,8 +626,23 @@ func ensureSaneLogPath(logPath string) error { return nil } +// addSecretsBindMounts mounts user defined secrets to the container +func addSecretsBindMounts(mountLabel, ctrRunDir string, defaultMounts []string, specgen generate.Generator) ([]rspec.Mount, error) { + containerMounts := specgen.Spec().Mounts + mounts, err := secretMounts(defaultMounts, mountLabel, ctrRunDir, containerMounts) + if err != nil { + return nil, err + } + return mounts, nil +} + // CreateContainer creates a new container in specified PodSandbox func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (res *pb.CreateContainerResponse, err error) { + const operation = "create_container" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() logrus.Debugf("CreateContainerRequest %+v", req) s.updateLock.RLock() @@ -406,7 +669,11 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig is nil") } - name := containerConfig.GetMetadata().Name + if containerConfig.GetMetadata() == nil { + return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Metadata is nil") + } + + name := containerConfig.GetMetadata().GetName() if name == "" { return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Name is empty") } @@ -459,7 +726,7 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq func (s *Server) setupOCIHooks(specgen *generate.Generator, sb *sandbox.Sandbox, containerConfig *pb.ContainerConfig, command string) error { mounts := containerConfig.GetMounts() addedHooks := map[string]struct{}{} - addHook := func(hook libkpod.HookParams) error { + addHook := func(hook lib.HookParams) error { // Only add a hook once if _, ok := addedHooks[hook.Hook]; !ok { if err := addOCIHook(specgen, hook); err != nil { @@ -519,7 +786,31 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, specgen.HostSpecific = true specgen.ClearProcessRlimits() - containerVolumes, err := addOCIBindMounts(sb, containerConfig, &specgen) + var readOnlyRootfs bool + var privileged bool + if containerConfig.GetLinux().GetSecurityContext() != nil { + if containerConfig.GetLinux().GetSecurityContext().Privileged { + privileged = true + } + + if containerConfig.GetLinux().GetSecurityContext().ReadonlyRootfs { + readOnlyRootfs = true + specgen.SetRootReadonly(true) + } + } + + mountLabel := sb.MountLabel() + processLabel := sb.ProcessLabel() + selinuxConfig := containerConfig.GetLinux().GetSecurityContext().GetSelinuxOptions() + if selinuxConfig != nil { + var err error + processLabel, mountLabel, err = getSELinuxLabels(selinuxConfig, privileged) + if err != nil { + return nil, err + } + } + + containerVolumes, ociMounts, err := addOCIBindMounts(mountLabel, containerConfig, &specgen) if err != nil { return nil, err } @@ -530,8 +821,14 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, } specgen.AddAnnotation(annotations.Volumes, string(volumesJSON)) + mnt := rspec.Mount{ + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"nosuid", "noexec", "nodev", "relatime", "ro"}, + } // Add cgroup mount so container process can introspect its own limits - specgen.AddCgroupsMount("ro") + specgen.AddMount(mnt) if err := addDevices(sb, containerConfig, &specgen); err != nil { return nil, err @@ -539,6 +836,10 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, labels := containerConfig.GetLabels() + if err := validateLabels(labels); err != nil { + return nil, err + } + metadata := containerConfig.GetMetadata() kubeAnnotations := containerConfig.GetAnnotations() @@ -553,22 +854,10 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, } } - var readOnlyRootfs bool - var privileged bool - if containerConfig.GetLinux().GetSecurityContext() != nil { - if containerConfig.GetLinux().GetSecurityContext().Privileged { - privileged = true - } - - if containerConfig.GetLinux().GetSecurityContext().ReadonlyRootfs { - readOnlyRootfs = true - specgen.SetRootReadonly(true) - } - } - // set this container's apparmor profile if it is set by sandbox if s.appArmorEnabled && !privileged { - appArmorProfileName := s.getAppArmorProfileName(sb.Annotations(), metadata.Name) + + appArmorProfileName := s.getAppArmorProfileName(containerConfig.GetLinux().GetSecurityContext().GetApparmorProfile()) if appArmorProfileName != "" { // reload default apparmor profile if it is unloaded. if s.appArmorProfile == apparmor.DefaultApparmorProfile { @@ -579,6 +868,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, specgen.SetProcessApparmorProfile(appArmorProfileName) } + } logPath := containerConfig.LogPath @@ -612,28 +902,13 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, if linux != nil { resources := linux.GetResources() if resources != nil { - cpuPeriod := resources.CpuPeriod - if cpuPeriod != 0 { - specgen.SetLinuxResourcesCPUPeriod(uint64(cpuPeriod)) - } - - cpuQuota := resources.CpuQuota - if cpuQuota != 0 { - specgen.SetLinuxResourcesCPUQuota(cpuQuota) - } - - cpuShares := resources.CpuShares - if cpuShares != 0 { - specgen.SetLinuxResourcesCPUShares(uint64(cpuShares)) - } - - memoryLimit := resources.MemoryLimitInBytes - if memoryLimit != 0 { - specgen.SetLinuxResourcesMemoryLimit(memoryLimit) - } - - oomScoreAdj := resources.OomScoreAdj - specgen.SetProcessOOMScoreAdj(int(oomScoreAdj)) + specgen.SetLinuxResourcesCPUPeriod(uint64(resources.GetCpuPeriod())) + specgen.SetLinuxResourcesCPUQuota(resources.GetCpuQuota()) + specgen.SetLinuxResourcesCPUShares(uint64(resources.GetCpuShares())) + specgen.SetLinuxResourcesMemoryLimit(resources.GetMemoryLimitInBytes()) + specgen.SetProcessOOMScoreAdj(int(resources.GetOomScoreAdj())) + specgen.SetLinuxResourcesCPUCpus(resources.GetCpusetCpus()) + specgen.SetLinuxResourcesCPUMems(resources.GetCpusetMems()) } var cgPath string @@ -652,61 +927,18 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, } specgen.SetLinuxCgroupsPath(cgPath) - capabilities := linux.GetSecurityContext().GetCapabilities() if privileged { - // this is setting correct capabilities as well for privileged mode specgen.SetupPrivileged(true) + setOCIBindMountsPrivileged(&specgen) } else { - toCAPPrefixed := func(cap string) string { - if !strings.HasPrefix(strings.ToLower(cap), "cap_") { - return "CAP_" + strings.ToUpper(cap) - } - return cap + err = setupCapabilities(&specgen, linux.GetSecurityContext().GetCapabilities()) + if err != nil { + return nil, err } - - // Add/drop all capabilities if "all" is specified, so that - // following individual add/drop could still work. E.g. - // AddCapabilities: []string{"ALL"}, DropCapabilities: []string{"CHOWN"} - // will be all capabilities without `CAP_CHOWN`. - // see https://github.com/kubernetes/kubernetes/issues/51980 - if inStringSlice(capabilities.GetAddCapabilities(), "ALL") { - for _, c := range getOCICapabilitiesList() { - if err := specgen.AddProcessCapability(c); err != nil { - return nil, err - } - } - } - if inStringSlice(capabilities.GetDropCapabilities(), "ALL") { - for _, c := range getOCICapabilitiesList() { - if err := specgen.DropProcessCapability(c); err != nil { - return nil, err - } - } - } - - if capabilities != nil { - for _, cap := range capabilities.GetAddCapabilities() { - if strings.ToUpper(cap) == "ALL" { - continue - } - if err := specgen.AddProcessCapability(toCAPPrefixed(cap)); err != nil { - return nil, err - } - } - - for _, cap := range capabilities.GetDropCapabilities() { - if strings.ToUpper(cap) == "ALL" { - continue - } - if err := specgen.DropProcessCapability(toCAPPrefixed(cap)); err != nil { - return nil, fmt.Errorf("failed to drop cap %s %v", toCAPPrefixed(cap), err) - } - } - } - specgen.SetProcessSelinuxLabel(sb.ProcessLabel()) } - - specgen.SetLinuxMountLabel(sb.MountLabel()) + specgen.SetProcessSelinuxLabel(processLabel) + specgen.SetLinuxMountLabel(mountLabel) + specgen.SetProcessNoNewPrivileges(linux.GetSecurityContext().GetNoNewPrivs()) if containerConfig.GetLinux().GetSecurityContext() != nil && !containerConfig.GetLinux().GetSecurityContext().Privileged { @@ -716,6 +948,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, "/proc/timer_list", "/proc/timer_stats", "/proc/sched_debug", + "/proc/scsi", "/sys/firmware", } { specgen.AddLinuxMaskedPaths(mp) @@ -739,10 +972,26 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, logrus.Debugf("pod container state %+v", podInfraState) ipcNsPath := fmt.Sprintf("/proc/%d/ns/ipc", podInfraState.Pid) - if err := specgen.AddOrReplaceLinuxNamespace("ipc", ipcNsPath); err != nil { + if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.IPCNamespace), ipcNsPath); err != nil { return nil, err } + utsNsPath := fmt.Sprintf("/proc/%d/ns/uts", podInfraState.Pid) + if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.UTSNamespace), utsNsPath); err != nil { + return nil, err + } + + if containerConfig.GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostPid() { + // kubernetes PodSpec specify to use Host PID namespace + specgen.RemoveLinuxNamespace(string(rspec.PIDNamespace)) + } else if s.config.EnableSharedPIDNamespace { + // share Pod PID namespace + pidNsPath := fmt.Sprintf("/proc/%d/ns/pid", podInfraState.Pid) + if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.PIDNamespace), pidNsPath); err != nil { + return nil, err + } + } + netNsPath := sb.NetNsPath() if netNsPath == "" { // The sandbox does not have a permanent namespace, @@ -750,7 +999,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, netNsPath = fmt.Sprintf("/proc/%d/ns/net", podInfraState.Pid) } - if err := specgen.AddOrReplaceLinuxNamespace("network", netNsPath); err != nil { + if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.NetworkNamespace), netNsPath); err != nil { return nil, err } @@ -765,71 +1014,85 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, } images, err := s.StorageImageServer().ResolveNames(image) if err != nil { - // This means we got an image ID - if strings.Contains(err.Error(), "cannot specify 64-byte hexadecimal strings") { + if err == storage.ErrCannotParseImageID { images = append(images, image) } else { return nil, err } } - image = images[0] - // Get imageName and imageRef that are requested in container status - imageName := image - status, err := s.StorageImageServer().ImageStatus(s.ImageContext(), image) + // Get imageName and imageRef that are later requested in container status + status, err := s.StorageImageServer().ImageStatus(s.ImageContext(), images[0]) if err != nil { return nil, err } - + imageName := status.Name imageRef := status.ID - // - // TODO: https://github.com/kubernetes-incubator/cri-o/issues/531 - // - //for _, n := range status.Names { - //r, err := reference.ParseNormalizedNamed(n) - //if err != nil { - //return nil, fmt.Errorf("failed to normalize image name for ImageRef: %v", err) - //} - //if digested, isDigested := r.(reference.Canonical); isDigested { - //imageRef = reference.FamiliarString(digested) - //break - //} - //} - for _, n := range status.Names { - r, err := reference.ParseNormalizedNamed(n) - if err != nil { - return nil, fmt.Errorf("failed to normalize image name for Image: %v", err) - } - if tagged, isTagged := r.(reference.Tagged); isTagged { - imageName = reference.FamiliarString(tagged) - break - } + if len(status.RepoDigests) > 0 { + imageRef = status.RepoDigests[0] } + specgen.AddAnnotation(annotations.Image, image) specgen.AddAnnotation(annotations.ImageName, imageName) specgen.AddAnnotation(annotations.ImageRef, imageRef) specgen.AddAnnotation(annotations.IP, sb.IP()) + mnt = rspec.Mount{ + Type: "bind", + Source: sb.ShmPath(), + Destination: "/etc/shm", + Options: []string{"rw", "bind"}, + } // bind mount the pod shm - specgen.AddBindMount(sb.ShmPath(), "/dev/shm", []string{"rw"}) + specgen.AddMount(mnt) options := []string{"rw"} if readOnlyRootfs { options = []string{"ro"} } if sb.ResolvPath() != "" { + if err := label.Relabel(sb.ResolvPath(), mountLabel, true); err != nil && err != unix.ENOTSUP { + return nil, err + } + + mnt = rspec.Mount{ + Type: "bind", + Source: sb.ResolvPath(), + Destination: "/etc/resolv.conf", + Options: append(options, "bind"), + } // bind mount the pod resolver file - specgen.AddBindMount(sb.ResolvPath(), "/etc/resolv.conf", options) + specgen.AddMount(mnt) + } + + if sb.HostnamePath() != "" { + if err := label.Relabel(sb.HostnamePath(), mountLabel, true); err != nil && err != unix.ENOTSUP { + return nil, err + } + + mnt = rspec.Mount{ + Type: "bind", + Source: sb.HostnamePath(), + Destination: "/etc/hostname", + Options: append(options, "bind"), + } + specgen.AddMount(mnt) } // Bind mount /etc/hosts for host networking containers if hostNetwork(containerConfig) { - specgen.AddBindMount("/etc/hosts", "/etc/hosts", options) + mnt = rspec.Mount{ + Type: "bind", + Source: "/etc/hosts", + Destination: "/etc/hosts", + Options: append(options, "bind"), + } + specgen.AddMount(mnt) } - if sb.Hostname() != "" { - specgen.SetHostname(sb.Hostname()) - } + // Set hostname and add env for hostname + specgen.SetHostname(sb.Hostname()) + specgen.AddProcessEnv("HOSTNAME", sb.Hostname()) specgen.AddAnnotation(annotations.Name, containerName) specgen.AddAnnotation(annotations.ContainerID, containerID) @@ -840,7 +1103,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, specgen.AddAnnotation(annotations.TTY, fmt.Sprintf("%v", containerConfig.Tty)) specgen.AddAnnotation(annotations.Stdin, fmt.Sprintf("%v", containerConfig.Stdin)) specgen.AddAnnotation(annotations.StdinOnce, fmt.Sprintf("%v", containerConfig.StdinOnce)) - specgen.AddAnnotation(annotations.Image, image) + specgen.AddAnnotation(annotations.ResolvPath, sb.InfraContainer().CrioAnnotations()[annotations.ResolvPath]) created := time.Now() specgen.AddAnnotation(annotations.Created, created.Format(time.RFC3339Nano)) @@ -863,25 +1126,35 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, } specgen.AddAnnotation(annotations.Annotations, string(kubeAnnotationsJSON)) + spp := containerConfig.GetLinux().GetSecurityContext().GetSeccompProfilePath() if !privileged { - if err = s.setupSeccomp(&specgen, containerName, sb.Annotations()); err != nil { + if err = s.setupSeccomp(&specgen, spp); err != nil { return nil, err } } + specgen.AddAnnotation(annotations.SeccompProfilePath, spp) metaname := metadata.Name attempt := metadata.Attempt containerInfo, err := s.StorageRuntimeServer().CreateContainer(s.ImageContext(), sb.Name(), sb.ID(), - image, image, + image, status.ID, containerName, containerID, metaname, attempt, - sb.MountLabel(), + mountLabel, nil) if err != nil { return nil, err } + defer func() { + if err != nil { + err2 := s.StorageRuntimeServer().DeleteContainer(containerInfo.ID) + if err2 != nil { + logrus.Warnf("Failed to cleanup container directory: %v", err2) + } + } + }() mountPoint, err := s.StorageRuntimeServer().StartContainer(containerID) if err != nil { @@ -891,7 +1164,8 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, containerImageConfig := containerInfo.Config if containerImageConfig == nil { - return nil, fmt.Errorf("empty image config for %s", image) + err = fmt.Errorf("empty image config for %s", image) + return nil, err } if containerImageConfig.Config.StopSignal != "" { @@ -900,7 +1174,8 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, } // Add image volumes - if err := addImageVolumes(mountPoint, s, &containerInfo, &specgen, sb.MountLabel()); err != nil { + volumeMounts, err := addImageVolumes(mountPoint, s, &containerInfo, &specgen, mountLabel) + if err != nil { return nil, err } @@ -910,30 +1185,10 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, } specgen.SetProcessArgs(processArgs) - // Add environment variables from CRI and image config - envs := containerConfig.GetEnvs() - if envs != nil { - for _, item := range envs { - key := item.Key - value := item.Value - if key == "" { - continue - } - specgen.AddProcessEnv(key, value) - } - } - if containerImageConfig != nil { - for _, item := range containerImageConfig.Config.Env { - parts := strings.SplitN(item, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid env from image: %s", item) - } - - if parts[0] == "" { - continue - } - specgen.AddProcessEnv(parts[0], parts[1]) - } + envs := mergeEnvs(containerImageConfig, containerConfig.GetEnvs()) + for _, e := range envs { + parts := strings.SplitN(e, "=", 2) + specgen.AddProcessEnv(parts[0], parts[1]) } // Set working directory @@ -950,6 +1205,38 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, containerCwd = runtimeCwd } specgen.SetProcessCwd(containerCwd) + if err := setupWorkingDirectory(mountPoint, mountLabel, containerCwd); err != nil { + if err1 := s.StorageRuntimeServer().StopContainer(containerID); err1 != nil { + return nil, fmt.Errorf("can't umount container after cwd error %v: %v", err, err1) + } + return nil, err + } + + var secretMounts []rspec.Mount + if len(s.config.DefaultMounts) > 0 { + var err error + secretMounts, err = addSecretsBindMounts(mountLabel, containerInfo.RunDir, s.config.DefaultMounts, specgen) + if err != nil { + return nil, fmt.Errorf("failed to mount secrets: %v", err) + } + } + + mounts := []rspec.Mount{} + mounts = append(mounts, ociMounts...) + mounts = append(mounts, volumeMounts...) + mounts = append(mounts, secretMounts...) + + sort.Sort(orderedMounts(mounts)) + + for _, m := range mounts { + mnt = rspec.Mount{ + Type: "bind", + Source: m.Source, + Destination: m.Destination, + Options: append(m.Options, "bind"), + } + specgen.AddMount(mnt) + } if err := s.setupOCIHooks(&specgen, sb, containerConfig, processArgs[0]); err != nil { return nil, err @@ -979,11 +1266,15 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, return nil, err } - container, err := oci.NewContainer(containerID, containerName, containerInfo.RunDir, logPath, sb.NetNs(), labels, kubeAnnotations, image, imageName, imageRef, metadata, sb.ID(), containerConfig.Tty, containerConfig.Stdin, containerConfig.StdinOnce, sb.Privileged(), sb.Trusted(), containerInfo.Dir, created, containerImageConfig.Config.StopSignal) + crioAnnotations := specgen.Spec().Annotations + + container, err := oci.NewContainer(containerID, containerName, containerInfo.RunDir, logPath, sb.NetNs(), labels, crioAnnotations, kubeAnnotations, image, imageName, imageRef, metadata, sb.ID(), containerConfig.Tty, containerConfig.Stdin, containerConfig.StdinOnce, sb.Privileged(), sb.Trusted(), containerInfo.Dir, created, containerImageConfig.Config.StopSignal) if err != nil { return nil, err } + container.SetSpec(specgen.Spec()) container.SetMountPoint(mountPoint) + container.SetSeccompProfilePath(spp) for _, cv := range containerVolumes { container.AddVolume(cv) @@ -992,14 +1283,11 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, return container, nil } -func (s *Server) setupSeccomp(specgen *generate.Generator, cname string, sbAnnotations map[string]string) error { - profile, ok := sbAnnotations["security.alpha.kubernetes.io/seccomp/container/"+cname] - if !ok { - profile, ok = sbAnnotations["security.alpha.kubernetes.io/seccomp/pod"] - if !ok { - // running w/o seccomp, aka unconfined - profile = seccompUnconfined - } +func (s *Server) setupSeccomp(specgen *generate.Generator, profile string) error { + if profile == "" { + // running w/o seccomp, aka unconfined + specgen.Spec().Linux.Seccomp = nil + return nil } if !s.seccompEnabled { if profile != seccompUnconfined { @@ -1012,25 +1300,22 @@ func (s *Server) setupSeccomp(specgen *generate.Generator, cname string, sbAnnot specgen.Spec().Linux.Seccomp = nil return nil } - if profile == seccompRuntimeDefault { + if profile == seccompRuntimeDefault || profile == seccompDockerDefault { return seccomp.LoadProfileFromStruct(s.seccompProfile, specgen) } if !strings.HasPrefix(profile, seccompLocalhostPrefix) { return fmt.Errorf("unknown seccomp profile option: %q", profile) } - //file, err := ioutil.ReadFile(filepath.Join(s.seccompProfileRoot, strings.TrimPrefix(profile, seccompLocalhostPrefix))) - //if err != nil { - //return err - //} - // TODO(runcom): setup from provided node's seccomp profile - // can't do this yet, see https://issues.k8s.io/36997 - return nil + fname := strings.TrimPrefix(profile, "localhost/") + file, err := ioutil.ReadFile(filepath.FromSlash(fname)) + if err != nil { + return fmt.Errorf("cannot load seccomp profile %q: %v", fname, err) + } + return seccomp.LoadProfileFromBytes(file, specgen) } // getAppArmorProfileName gets the profile name for the given container. -func (s *Server) getAppArmorProfileName(annotations map[string]string, ctrName string) string { - profile := apparmor.GetProfileNameFromPodAnnotations(annotations, ctrName) - +func (s *Server) getAppArmorProfileName(profile string) string { if profile == "" { return "" } @@ -1085,3 +1370,44 @@ func getUserInfo(rootfs string, userName string) (uint32, uint32, []uint32, erro return uid, gid, additionalGids, nil } + +func setOCIBindMountsPrivileged(g *generate.Generator) { + spec := g.Spec() + // clear readonly for /sys and cgroup + for i, m := range spec.Mounts { + if spec.Mounts[i].Destination == "/sys" && !spec.Root.Readonly { + clearReadOnly(&spec.Mounts[i]) + } + if m.Type == "cgroup" { + clearReadOnly(&spec.Mounts[i]) + } + } + spec.Linux.ReadonlyPaths = nil + spec.Linux.MaskedPaths = nil +} + +func clearReadOnly(m *rspec.Mount) { + var opt []string + for _, o := range m.Options { + if o != "ro" { + opt = append(opt, o) + } + } + m.Options = opt +} + +func setupWorkingDirectory(rootfs, mountLabel, containerCwd string) error { + fp, err := symlink.FollowSymlinkInScope(filepath.Join(rootfs, containerCwd), rootfs) + if err != nil { + return err + } + if err := os.MkdirAll(fp, 0755); err != nil { + return err + } + if mountLabel != "" { + if err1 := label.Relabel(fp, mountLabel, true); err1 != nil && err1 != unix.ENOTSUP { + return fmt.Errorf("relabel failed %s: %v", fp, err1) + } + } + return nil +} diff --git a/server/container_exec.go b/server/container_exec.go index 0cdb9579..3bb37749 100644 --- a/server/container_exec.go +++ b/server/container_exec.go @@ -5,6 +5,7 @@ import ( "io" "os" "os/exec" + "time" "github.com/docker/docker/pkg/pools" "github.com/kubernetes-incubator/cri-o/oci" @@ -13,15 +14,21 @@ import ( "k8s.io/client-go/tools/remotecommand" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - utilexec "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/term" + utilexec "k8s.io/utils/exec" ) // Exec prepares a streaming endpoint to execute a command in the container. -func (s *Server) Exec(ctx context.Context, req *pb.ExecRequest) (*pb.ExecResponse, error) { +func (s *Server) Exec(ctx context.Context, req *pb.ExecRequest) (resp *pb.ExecResponse, err error) { + const operation = "exec" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("ExecRequest %+v", req) - resp, err := s.GetExec(req) + resp, err = s.GetExec(req) if err != nil { return nil, fmt.Errorf("unable to prepare exec endpoint") } @@ -46,12 +53,15 @@ func (ss streamService) Exec(containerID string, cmd []string, stdin io.Reader, return fmt.Errorf("container is not created or running") } - args := []string{"exec"} - if tty { - args = append(args, "-t") + processFile, err := oci.PrepareProcessExec(c, cmd, tty) + if err != nil { + return err } + defer os.RemoveAll(processFile.Name()) + + args := []string{"exec"} + args = append(args, "--process", processFile.Name()) args = append(args, c.ID()) - args = append(args, cmd...) execCmd := exec.Command(ss.runtimeServer.Runtime().Path(c), args...) var cmdErr error if tty { diff --git a/server/container_execsync.go b/server/container_execsync.go index 35f7896c..4d7b6718 100644 --- a/server/container_execsync.go +++ b/server/container_execsync.go @@ -2,6 +2,7 @@ package server import ( "fmt" + "time" "github.com/kubernetes-incubator/cri-o/oci" "github.com/sirupsen/logrus" @@ -10,7 +11,12 @@ import ( ) // ExecSync runs a command in a container synchronously. -func (s *Server) ExecSync(ctx context.Context, req *pb.ExecSyncRequest) (*pb.ExecSyncResponse, error) { +func (s *Server) ExecSync(ctx context.Context, req *pb.ExecSyncRequest) (resp *pb.ExecSyncResponse, err error) { + const operation = "exec_sync" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() logrus.Debugf("ExecSyncRequest %+v", req) c, err := s.GetContainerFromRequest(req.ContainerId) if err != nil { @@ -35,7 +41,7 @@ func (s *Server) ExecSync(ctx context.Context, req *pb.ExecSyncRequest) (*pb.Exe if err != nil { return nil, err } - resp := &pb.ExecSyncResponse{ + resp = &pb.ExecSyncResponse{ Stdout: execResp.Stdout, Stderr: execResp.Stderr, ExitCode: execResp.ExitCode, diff --git a/server/container_list.go b/server/container_list.go index 995b7e1b..060fa2af 100644 --- a/server/container_list.go +++ b/server/container_list.go @@ -1,6 +1,8 @@ package server import ( + "time" + "github.com/kubernetes-incubator/cri-o/oci" "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -27,41 +29,53 @@ func filterContainer(c *pb.Container, filter *pb.ContainerFilter) bool { } // ListContainers lists all containers by filters. -func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersRequest) (*pb.ListContainersResponse, error) { +func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersRequest) (resp *pb.ListContainersResponse, err error) { + const operation = "list_containers" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() logrus.Debugf("ListContainersRequest %+v", req) + var ctrs []*pb.Container - filter := req.Filter + filter := req.GetFilter() ctrList, err := s.ContainerServer.ListContainers() if err != nil { return nil, err } - // Filter using container id and pod id first. - if filter.Id != "" { - id, err := s.CtrIDIndex().Get(filter.Id) - if err != nil { - return nil, err - } - c := s.ContainerServer.GetContainer(id) - if c != nil { - if filter.PodSandboxId != "" { - if c.Sandbox() == filter.PodSandboxId { - ctrList = []*oci.Container{c} - } else { - ctrList = []*oci.Container{} - } + if filter != nil { - } else { - ctrList = []*oci.Container{c} + // Filter using container id and pod id first. + if filter.Id != "" { + id, err := s.CtrIDIndex().Get(filter.Id) + if err != nil { + // If we don't find a container ID with a filter, it should not + // be considered an error. Log a warning and return an empty struct + logrus.Warn("unable to find container ID %s", filter.Id) + return &pb.ListContainersResponse{}, nil } - } - } else { - if filter.PodSandboxId != "" { - pod := s.ContainerServer.GetSandbox(filter.PodSandboxId) - if pod == nil { - ctrList = []*oci.Container{} - } else { - ctrList = pod.Containers().List() + c := s.ContainerServer.GetContainer(id) + if c != nil { + if filter.PodSandboxId != "" { + if c.Sandbox() == filter.PodSandboxId { + ctrList = []*oci.Container{c} + } else { + ctrList = []*oci.Container{} + } + + } else { + ctrList = []*oci.Container{c} + } + } + } else { + if filter.PodSandboxId != "" { + pod := s.ContainerServer.GetSandbox(filter.PodSandboxId) + if pod == nil { + ctrList = []*oci.Container{} + } else { + ctrList = pod.Containers().List() + } } } } @@ -83,6 +97,7 @@ func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersReque Metadata: ctr.Metadata(), Annotations: ctr.Annotations(), Image: img, + ImageRef: ctr.ImageRef(), } switch cState.Status { @@ -101,7 +116,7 @@ func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersReque } } - resp := &pb.ListContainersResponse{ + resp = &pb.ListContainersResponse{ Containers: ctrs, } logrus.Debugf("ListContainersResponse: %+v", resp) diff --git a/server/container_portforward.go b/server/container_portforward.go index 97dd5342..38d33bea 100644 --- a/server/container_portforward.go +++ b/server/container_portforward.go @@ -6,6 +6,7 @@ import ( "io" "os/exec" "strings" + "time" "github.com/docker/docker/pkg/pools" "github.com/kubernetes-incubator/cri-o/oci" @@ -15,11 +16,15 @@ import ( ) // PortForward prepares a streaming endpoint to forward ports from a PodSandbox. -func (s *Server) PortForward(ctx context.Context, req *pb.PortForwardRequest) (*pb.PortForwardResponse, error) { +func (s *Server) PortForward(ctx context.Context, req *pb.PortForwardRequest) (resp *pb.PortForwardResponse, err error) { + const operation = "port_forward" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() logrus.Debugf("PortForwardRequest %+v", req) - resp, err := s.GetPortForward(req) - + resp, err = s.GetPortForward(req) if err != nil { return nil, fmt.Errorf("unable to prepare portforward endpoint") } diff --git a/server/container_remove.go b/server/container_remove.go index cedfc602..d29e9fb2 100644 --- a/server/container_remove.go +++ b/server/container_remove.go @@ -1,6 +1,8 @@ package server import ( + "time" + "github.com/sirupsen/logrus" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" @@ -8,13 +10,20 @@ import ( // RemoveContainer removes the container. If the container is running, the container // should be force removed. -func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*pb.RemoveContainerResponse, error) { - _, err := s.ContainerServer.Remove(req.ContainerId, true) +func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (resp *pb.RemoveContainerResponse, err error) { + const operation = "remove_container" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("RemoveContainerRequest: %+v", req) + + _, err = s.ContainerServer.Remove(ctx, req.ContainerId, true) if err != nil { return nil, err } - resp := &pb.RemoveContainerResponse{} + resp = &pb.RemoveContainerResponse{} logrus.Debugf("RemoveContainerResponse: %+v", resp) return resp, nil } diff --git a/server/container_start.go b/server/container_start.go index 85be0948..b4dd222f 100644 --- a/server/container_start.go +++ b/server/container_start.go @@ -2,6 +2,7 @@ package server import ( "fmt" + "time" "github.com/kubernetes-incubator/cri-o/oci" "github.com/sirupsen/logrus" @@ -10,7 +11,12 @@ import ( ) // StartContainer starts the container. -func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (*pb.StartContainerResponse, error) { +func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (resp *pb.StartContainerResponse, err error) { + const operation = "start_container" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() logrus.Debugf("StartContainerRequest %+v", req) c, err := s.GetContainerFromRequest(req.ContainerId) if err != nil { @@ -37,7 +43,7 @@ func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerReque return nil, fmt.Errorf("failed to start container %s: %v", c.ID(), err) } - resp := &pb.StartContainerResponse{} + resp = &pb.StartContainerResponse{} logrus.Debugf("StartContainerResponse %+v", resp) return resp, nil } diff --git a/server/container_stats.go b/server/container_stats.go index 22b87c45..17df31ad 100644 --- a/server/container_stats.go +++ b/server/container_stats.go @@ -2,6 +2,7 @@ package server import ( "fmt" + "time" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" @@ -9,6 +10,11 @@ import ( // ContainerStats returns stats of the container. If the container does not // exist, the call returns an error. -func (s *Server) ContainerStats(ctx context.Context, req *pb.ContainerStatsRequest) (*pb.ContainerStatsResponse, error) { +func (s *Server) ContainerStats(ctx context.Context, req *pb.ContainerStatsRequest) (resp *pb.ContainerStatsResponse, err error) { + const operation = "container_stats" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() return nil, fmt.Errorf("not implemented") } diff --git a/server/container_stats_list.go b/server/container_stats_list.go index 92922099..2c564714 100644 --- a/server/container_stats_list.go +++ b/server/container_stats_list.go @@ -2,12 +2,18 @@ package server import ( "fmt" + "time" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) // ListContainerStats returns stats of all running containers. -func (s *Server) ListContainerStats(ctx context.Context, req *pb.ListContainerStatsRequest) (*pb.ListContainerStatsResponse, error) { +func (s *Server) ListContainerStats(ctx context.Context, req *pb.ListContainerStatsRequest) (resp *pb.ListContainerStatsResponse, err error) { + const operation = "list_container_stats" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() return nil, fmt.Errorf("not implemented") } diff --git a/server/container_status.go b/server/container_status.go index b4684c9c..3b84468f 100644 --- a/server/container_status.go +++ b/server/container_status.go @@ -1,6 +1,9 @@ package server import ( + "time" + + "github.com/containers/image/types" "github.com/kubernetes-incubator/cri-o/oci" "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -14,7 +17,12 @@ const ( ) // ContainerStatus returns status of the container. -func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (*pb.ContainerStatusResponse, error) { +func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (resp *pb.ContainerStatusResponse, err error) { + const operation = "container_status" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() logrus.Debugf("ContainerStatusRequest %+v", req) c, err := s.GetContainerFromRequest(req.ContainerId) if err != nil { @@ -22,7 +30,7 @@ func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusReq } containerID := c.ID() - resp := &pb.ContainerStatusResponse{ + resp = &pb.ContainerStatusResponse{ Status: &pb.ContainerStatus{ Id: containerID, Metadata: c.Metadata(), @@ -31,7 +39,10 @@ func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusReq ImageRef: c.ImageRef(), }, } - resp.Status.Image = &pb.ImageSpec{Image: c.ImageName()} + resp.Status.Image = &pb.ImageSpec{Image: c.Image()} + if status, err := s.StorageImageServer().ImageStatus(&types.SystemContext{}, c.ImageRef()); err == nil { + resp.Status.Image.Image = status.Name + } mounts := []*pb.Mount{} for _, cv := range c.Volumes() { diff --git a/server/container_stop.go b/server/container_stop.go index c0093cfd..6846f90d 100644 --- a/server/container_stop.go +++ b/server/container_stop.go @@ -1,19 +1,28 @@ package server import ( + "time" + "github.com/sirupsen/logrus" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) // StopContainer stops a running container with a grace period (i.e., timeout). -func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest) (*pb.StopContainerResponse, error) { - _, err := s.ContainerServer.ContainerStop(req.ContainerId, req.Timeout) +func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest) (resp *pb.StopContainerResponse, err error) { + const operation = "stop_container" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("StopContainerRequest %+v", req) + + _, err = s.ContainerServer.ContainerStop(ctx, req.ContainerId, req.Timeout) if err != nil { return nil, err } - resp := &pb.StopContainerResponse{} + resp = &pb.StopContainerResponse{} logrus.Debugf("StopContainerResponse %s: %+v", req.ContainerId, resp) return resp, nil } diff --git a/server/container_update_resources.go b/server/container_update_resources.go new file mode 100644 index 00000000..c58f3484 --- /dev/null +++ b/server/container_update_resources.go @@ -0,0 +1,55 @@ +package server + +import ( + "fmt" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/kubernetes-incubator/cri-o/oci" + rspec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" + "golang.org/x/net/context" + pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" +) + +// UpdateContainerResources updates ContainerConfig of the container. +func (s *Server) UpdateContainerResources(ctx context.Context, req *pb.UpdateContainerResourcesRequest) (resp *pb.UpdateContainerResourcesResponse, err error) { + const operation = "update_container_resources" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("UpdateContainerResources %+v", req) + + c, err := s.GetContainerFromRequest(req.GetContainerId()) + if err != nil { + return nil, err + } + state := s.Runtime().ContainerStatus(c) + if !(state.Status == oci.ContainerStateRunning || state.Status == oci.ContainerStateCreated) { + return nil, fmt.Errorf("container %s is not running or created state: %s", c.ID(), state.Status) + } + + resources := toOCIResources(req.GetLinux()) + if err := s.Runtime().UpdateContainer(c, resources); err != nil { + return nil, err + } + return &pb.UpdateContainerResourcesResponse{}, nil +} + +// toOCIResources converts CRI resource constraints to OCI. +func toOCIResources(r *pb.LinuxContainerResources) *rspec.LinuxResources { + return &rspec.LinuxResources{ + CPU: &rspec.LinuxCPU{ + Shares: proto.Uint64(uint64(r.GetCpuShares())), + Quota: proto.Int64(r.GetCpuQuota()), + Period: proto.Uint64(uint64(r.GetCpuPeriod())), + Cpus: r.GetCpusetCpus(), + Mems: r.GetCpusetMems(), + }, + Memory: &rspec.LinuxMemory{ + Limit: proto.Int64(r.GetMemoryLimitInBytes()), + }, + // TODO(runcom): OOMScoreAdj is missing + } +} diff --git a/server/container_updateruntimeconfig.go b/server/container_updateruntimeconfig.go index b900c9b1..b976fc67 100644 --- a/server/container_updateruntimeconfig.go +++ b/server/container_updateruntimeconfig.go @@ -1,11 +1,19 @@ package server import ( + "time" + "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) // UpdateRuntimeConfig updates the configuration of a running container. -func (s *Server) UpdateRuntimeConfig(ctx context.Context, req *pb.UpdateRuntimeConfigRequest) (*pb.UpdateRuntimeConfigResponse, error) { +func (s *Server) UpdateRuntimeConfig(ctx context.Context, req *pb.UpdateRuntimeConfigRequest) (resp *pb.UpdateRuntimeConfigResponse, err error) { + const operation = "update_runtime_config" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + return &pb.UpdateRuntimeConfigResponse{}, nil } diff --git a/server/fixtures/crio.conf b/server/fixtures/crio.conf new file mode 100644 index 00000000..d901990a --- /dev/null +++ b/server/fixtures/crio.conf @@ -0,0 +1,41 @@ +[crio] +root = "/var/lib/containers/storage" +runroot = "/var/run/containers/storage" +storage_driver = "overlay" +storage_option = ["overlay.override_kernel_check=1"] + +[crio.api] +listen = "/var/run/crio.sock" +stream_address = "localhost" +stream_port = "10010" + +[crio.runtime] +runtime = "/usr/local/bin/runc" +runtime_untrusted_workload = "untrusted" +default_workload_trust = "trusted" +conmon = "/usr/local/libexec/crio/conmon" +conmon_env = [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", +] +selinux = true +seccomp_profile = "/etc/crio/seccomp.json" +apparmor_profile = "crio-default" +cgroup_manager = "cgroupfs" +pids_limit = 1024 + +[crio.image] +default_transport = "docker://" +pause_image = "kubernetes/pause" +pause_command = "/pause" +signature_policy = "/tmp" +image_volumes = "mkdir" +insecure_registries = [ + "insecure-registry:1234", +] +registries = [ + "registry:4321", +] + +[crio.network] +network_dir = "/etc/cni/net.d/" +plugin_dir = "/opt/cni/bin/" diff --git a/server/fixtures/resolv.conf b/server/fixtures/resolv.conf new file mode 100644 index 00000000..19fa6252 --- /dev/null +++ b/server/fixtures/resolv.conf @@ -0,0 +1,4 @@ +search 192.30.253.113 192.30.252.153 +nameserver cri-o.io +nameserver github.com +options timeout:5 attempts:3 diff --git a/server/fixtures/secret/testDataA b/server/fixtures/secret/testDataA new file mode 100644 index 00000000..ec9068d4 --- /dev/null +++ b/server/fixtures/secret/testDataA @@ -0,0 +1 @@ +secretDataA \ No newline at end of file diff --git a/server/fixtures/secret/testDataB b/server/fixtures/secret/testDataB new file mode 100644 index 00000000..3ff8ea8f --- /dev/null +++ b/server/fixtures/secret/testDataB @@ -0,0 +1 @@ +secretDataB \ No newline at end of file diff --git a/server/image_fs_info.go b/server/image_fs_info.go index 969bdc34..bfa297a7 100644 --- a/server/image_fs_info.go +++ b/server/image_fs_info.go @@ -2,12 +2,19 @@ package server import ( "fmt" + "time" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) // ImageFsInfo returns information of the filesystem that is used to store images. -func (s *Server) ImageFsInfo(ctx context.Context, req *pb.ImageFsInfoRequest) (*pb.ImageFsInfoResponse, error) { +func (s *Server) ImageFsInfo(ctx context.Context, req *pb.ImageFsInfoRequest) (resp *pb.ImageFsInfoResponse, err error) { + const operation = "image_fs_info" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + return nil, fmt.Errorf("not implemented") } diff --git a/server/image_list.go b/server/image_list.go index ebcc6f6a..bcdc1036 100644 --- a/server/image_list.go +++ b/server/image_list.go @@ -1,13 +1,21 @@ package server import ( + "time" + "github.com/sirupsen/logrus" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) // ListImages lists existing images. -func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb.ListImagesResponse, error) { +func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (resp *pb.ListImagesResponse, err error) { + const operation = "list_images" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("ListImagesRequest: %+v", req) filter := "" reqFilter := req.GetFilter() @@ -21,21 +29,23 @@ func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb if err != nil { return nil, err } - response := pb.ListImagesResponse{} + resp = &pb.ListImagesResponse{} for _, result := range results { if result.Size != nil { - response.Images = append(response.Images, &pb.Image{ - Id: result.ID, - RepoTags: result.Names, - Size_: *result.Size, + resp.Images = append(resp.Images, &pb.Image{ + Id: result.ID, + RepoTags: result.RepoTags, + RepoDigests: result.RepoDigests, + Size_: *result.Size, }) } else { - response.Images = append(response.Images, &pb.Image{ - Id: result.ID, - RepoTags: result.Names, + resp.Images = append(resp.Images, &pb.Image{ + Id: result.ID, + RepoTags: result.RepoTags, + RepoDigests: result.RepoDigests, }) } } - logrus.Debugf("ListImagesResponse: %+v", response) - return &response, nil + logrus.Debugf("ListImagesResponse: %+v", resp) + return resp, nil } diff --git a/server/image_pull.go b/server/image_pull.go index 26d08912..67dfc469 100644 --- a/server/image_pull.go +++ b/server/image_pull.go @@ -3,16 +3,24 @@ package server import ( "encoding/base64" "strings" + "time" "github.com/containers/image/copy" "github.com/containers/image/types" + "github.com/kubernetes-incubator/cri-o/pkg/storage" "github.com/sirupsen/logrus" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) // PullImage pulls a image with authentication config. -func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.PullImageResponse, error) { +func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (resp *pb.PullImageResponse, err error) { + const operation = "pull_image" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("PullImageRequest: %+v", req) // TODO: what else do we need here? (Signatures when the story isn't just pulling from docker://) image := "" @@ -24,7 +32,6 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P var ( images []string pulled string - err error ) images, err = s.StorageImageServer().ResolveNames(image) if err != nil { @@ -67,11 +74,23 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P } // let's be smart, docker doesn't repull if image already exists. - _, err = s.StorageImageServer().ImageStatus(s.ImageContext(), img) + var storedImage *storage.ImageResult + storedImage, err = s.StorageImageServer().ImageStatus(s.ImageContext(), img) if err == nil { - logrus.Debugf("image %s already in store, skipping pull", img) - pulled = img - break + tmpImg, err := s.StorageImageServer().PrepareImage(s.ImageContext(), img, options) + if err == nil { + tmpImgConfigDigest := tmpImg.ConfigInfo().Digest + if tmpImgConfigDigest.String() == "" { + // this means we are playing with a schema1 image, in which + // case, we're going to repull the image in any case + logrus.Debugf("image config digest is empty, re-pulling image") + } else if tmpImgConfigDigest.String() == storedImage.ConfigDigest.String() { + logrus.Debugf("image %s already in store, skipping pull", img) + pulled = img + break + } + } + logrus.Debugf("image in store has different ID, re-pulling %s", img) } _, err = s.StorageImageServer().PullImage(s.ImageContext(), img, options) @@ -85,8 +104,16 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P if pulled == "" && err != nil { return nil, err } - resp := &pb.PullImageResponse{ - ImageRef: pulled, + status, err := s.StorageImageServer().ImageStatus(s.ImageContext(), pulled) + if err != nil { + return nil, err + } + imageRef := status.ID + if len(status.RepoDigests) > 0 { + imageRef = status.RepoDigests[0] + } + resp = &pb.PullImageResponse{ + ImageRef: imageRef, } logrus.Debugf("PullImageResponse: %+v", resp) return resp, nil diff --git a/server/image_remove.go b/server/image_remove.go index 32ca4066..d1f1e884 100644 --- a/server/image_remove.go +++ b/server/image_remove.go @@ -2,15 +2,22 @@ package server import ( "fmt" - "strings" + "time" + "github.com/kubernetes-incubator/cri-o/pkg/storage" "github.com/sirupsen/logrus" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) // RemoveImage removes the image. -func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*pb.RemoveImageResponse, error) { +func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (resp *pb.RemoveImageResponse, err error) { + const operation = "remove_image" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("RemoveImageRequest: %+v", req) image := "" img := req.GetImage() @@ -22,20 +29,18 @@ func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (* } var ( images []string - err error deleted bool ) images, err = s.StorageImageServer().ResolveNames(image) if err != nil { - // This means we got an image ID - if strings.Contains(err.Error(), "cannot specify 64-byte hexadecimal strings") { + if err == storage.ErrCannotParseImageID { images = append(images, image) } else { return nil, err } } for _, img := range images { - err = s.StorageImageServer().RemoveImage(s.ImageContext(), img) + err = s.StorageImageServer().UntagImage(s.ImageContext(), img) if err != nil { logrus.Debugf("error deleting image %s: %v", img, err) continue @@ -46,7 +51,7 @@ func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (* if !deleted && err != nil { return nil, err } - resp := &pb.RemoveImageResponse{} + resp = &pb.RemoveImageResponse{} logrus.Debugf("RemoveImageResponse: %+v", resp) return resp, nil } diff --git a/server/image_status.go b/server/image_status.go index 1e362a43..4e2e6a0e 100644 --- a/server/image_status.go +++ b/server/image_status.go @@ -2,9 +2,10 @@ package server import ( "fmt" - "strings" + "time" "github.com/containers/storage" + pkgstorage "github.com/kubernetes-incubator/cri-o/pkg/storage" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -12,7 +13,13 @@ import ( ) // ImageStatus returns the status of the image. -func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*pb.ImageStatusResponse, error) { +func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (resp *pb.ImageStatusResponse, err error) { + const operation = "image_status" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("ImageStatusRequest: %+v", req) image := "" img := req.GetImage() @@ -24,8 +31,7 @@ func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (* } images, err := s.StorageImageServer().ResolveNames(image) if err != nil { - // This means we got an image ID - if strings.Contains(err.Error(), "cannot specify 64-byte hexadecimal strings") { + if err == pkgstorage.ErrCannotParseImageID { images = append(images, image) } else { return nil, err @@ -40,12 +46,12 @@ func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (* } return nil, err } - resp := &pb.ImageStatusResponse{ + resp = &pb.ImageStatusResponse{ Image: &pb.Image{ - Id: status.ID, - RepoTags: status.Names, - Size_: *status.Size, - // TODO: https://github.com/kubernetes-incubator/cri-o/issues/531 + Id: status.ID, + RepoTags: status.RepoTags, + RepoDigests: status.RepoDigests, + Size_: *status.Size, }, } logrus.Debugf("ImageStatusResponse: %+v", resp) diff --git a/server/inspect.go b/server/inspect.go index f65b4903..d1fe6abe 100644 --- a/server/inspect.go +++ b/server/inspect.go @@ -2,31 +2,71 @@ package server import ( "encoding/json" + "errors" "fmt" "net/http" + cimage "github.com/containers/image/types" "github.com/go-zoo/bone" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" + "github.com/kubernetes-incubator/cri-o/oci" + "github.com/kubernetes-incubator/cri-o/types" + "github.com/sirupsen/logrus" ) -// ContainerInfo stores information about containers -type ContainerInfo struct { - Name string `json:"name"` - Pid int `json:"pid"` - Image string `json:"image"` - CreatedTime int64 `json:"created_time"` - Labels map[string]string `json:"labels"` - Annotations map[string]string `json:"annotations"` - LogPath string `json:"log_path"` - Root string `json:"root"` - Sandbox string `json:"sandbox"` - IP string `json:"ip_address"` +func (s *Server) getInfo() types.CrioInfo { + return types.CrioInfo{ + StorageDriver: s.config.Config.Storage, + StorageRoot: s.config.Config.Root, + CgroupDriver: s.config.Config.CgroupManager, + } } -// CrioInfo stores information about the crio daemon -type CrioInfo struct { - StorageDriver string `json:"storage_driver"` - StorageRoot string `json:"storage_root"` - CgroupDriver string `json:"cgroup_driver"` +var ( + errCtrNotFound = errors.New("container not found") + errCtrStateNil = errors.New("container state is nil") + errSandboxNotFound = errors.New("sandbox for container not found") +) + +func (s *Server) getContainerInfo(id string, getContainerFunc func(id string) *oci.Container, getInfraContainerFunc func(id string) *oci.Container, getSandboxFunc func(id string) *sandbox.Sandbox) (types.ContainerInfo, error) { + ctr := getContainerFunc(id) + if ctr == nil { + ctr = getInfraContainerFunc(id) + if ctr == nil { + return types.ContainerInfo{}, errCtrNotFound + } + } + // TODO(mrunalp): should we call UpdateStatus()? + ctrState := ctr.State() + if ctrState == nil { + return types.ContainerInfo{}, errCtrStateNil + } + sb := getSandboxFunc(ctr.Sandbox()) + if sb == nil { + logrus.Debugf("can't find sandbox %s for container %s", ctr.Sandbox(), id) + return types.ContainerInfo{}, errSandboxNotFound + } + image := ctr.Image() + if s.ContainerServer != nil && s.ContainerServer.StorageImageServer() != nil { + if status, err := s.ContainerServer.StorageImageServer().ImageStatus(&cimage.SystemContext{}, ctr.ImageRef()); err == nil { + image = status.Name + } + } + return types.ContainerInfo{ + Name: ctr.Name(), + Pid: ctrState.Pid, + Image: image, + ImageRef: ctr.ImageRef(), + CreatedTime: ctrState.Created.UnixNano(), + Labels: ctr.Labels(), + Annotations: ctr.Annotations(), + CrioAnnotations: ctr.CrioAnnotations(), + Root: ctr.MountPoint(), + LogPath: ctr.LogPath(), + Sandbox: ctr.Sandbox(), + IP: sb.IP(), + }, nil + } // GetInfoMux returns the mux used to serve info requests @@ -34,11 +74,7 @@ func (s *Server) GetInfoMux() *bone.Mux { mux := bone.New() mux.Get("/info", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - ci := CrioInfo{ - StorageDriver: s.config.Config.Storage, - StorageRoot: s.config.Config.Root, - CgroupDriver: s.config.Config.CgroupManager, - } + ci := s.getInfo() js, err := json.Marshal(ci) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) @@ -50,36 +86,20 @@ func (s *Server) GetInfoMux() *bone.Mux { mux.Get("/containers/:id", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { containerID := bone.GetValue(req, "id") - ctr := s.GetContainer(containerID) - if ctr == nil { - ctr = s.getInfraContainer(containerID) - if ctr == nil { - http.Error(w, fmt.Sprintf("container with id: %s not found", containerID), http.StatusNotFound) - return + ci, err := s.getContainerInfo(containerID, s.GetContainer, s.getInfraContainer, s.getSandbox) + if err != nil { + switch err { + case errCtrNotFound: + http.Error(w, fmt.Sprintf("can't find the container with id %s", containerID), http.StatusNotFound) + case errCtrStateNil: + http.Error(w, fmt.Sprintf("can't find container state for container with id %s", containerID), http.StatusInternalServerError) + case errSandboxNotFound: + http.Error(w, fmt.Sprintf("can't find the sandbox for container id %s", containerID), http.StatusNotFound) + default: + http.Error(w, err.Error(), http.StatusInternalServerError) } - } - ctrState := ctr.State() - if ctrState == nil { - http.Error(w, fmt.Sprintf("container %s state is nil", containerID), http.StatusNotFound) return } - sb := s.getSandbox(ctr.Sandbox()) - if sb == nil { - http.Error(w, fmt.Sprintf("can't find the sandbox for container id, sandbox id %s: %s", containerID, ctr.Sandbox()), http.StatusNotFound) - return - } - ci := ContainerInfo{ - Name: ctr.Name(), - Pid: ctrState.Pid, - Image: ctr.Image(), - CreatedTime: ctrState.Created.UnixNano(), - Labels: ctr.Labels(), - Annotations: ctr.Annotations(), - Root: ctr.MountPoint(), - LogPath: ctr.LogPath(), - Sandbox: ctr.Sandbox(), - IP: sb.IP(), - } js, err := json.Marshal(ci) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) diff --git a/server/inspect_test.go b/server/inspect_test.go new file mode 100644 index 00000000..7246ef86 --- /dev/null +++ b/server/inspect_test.go @@ -0,0 +1,238 @@ +package server + +import ( + "testing" + "time" + + "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" + + "github.com/containernetworking/plugins/pkg/ns" + "github.com/kubernetes-incubator/cri-o/lib" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" + "github.com/kubernetes-incubator/cri-o/oci" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func TestGetInfo(t *testing.T) { + c := lib.DefaultConfig() + c.RootConfig.Storage = "afoobarstorage" + c.RootConfig.Root = "afoobarroot" + c.RuntimeConfig.CgroupManager = "systemd" + apiConfig := APIConfig{} + s := &Server{ + config: Config{*c, apiConfig}, + } + ci := s.getInfo() + if ci.CgroupDriver != "systemd" { + t.Fatalf("expected 'systemd', got %q", ci.CgroupDriver) + } + if ci.StorageDriver != "afoobarstorage" { + t.Fatalf("expected 'afoobarstorage', got %q", ci.StorageDriver) + } + if ci.StorageRoot != "afoobarroot" { + t.Fatalf("expected 'afoobarroot', got %q", ci.StorageRoot) + } +} + +type mockNetNS struct { +} + +func (ns mockNetNS) Close() error { + return nil +} +func (ns mockNetNS) Fd() uintptr { + ptr := new(uintptr) + return *ptr +} +func (ns mockNetNS) Do(toRun func(ns.NetNS) error) error { + return nil +} +func (ns mockNetNS) Set() error { + return nil +} +func (ns mockNetNS) Path() string { + return "" +} + +func TestGetContainerInfo(t *testing.T) { + s := &Server{} + created := time.Now() + labels := map[string]string{ + "io.kubernetes.container.name": "POD", + "io.kubernetes.test2": "value2", + "io.kubernetes.test3": "value3", + } + annotations := map[string]string{ + "io.kubernetes.test": "value", + "io.kubernetes.test1": "value1", + } + getContainerFunc := func(id string) *oci.Container { + container, err := oci.NewContainer("testid", "testname", "", "/container/logs", mockNetNS{}, labels, annotations, annotations, "image", "imageName", "imageRef", &runtime.ContainerMetadata{}, "testsandboxid", false, false, false, false, false, "/root/for/container", created, "SIGKILL") + if err != nil { + t.Fatal(err) + } + container.SetMountPoint("/var/foo/container") + cstate := &oci.ContainerState{} + cstate.State = specs.State{ + Pid: 42, + } + cstate.Created = created + container.SetState(cstate) + return container + } + getInfraContainerFunc := func(id string) *oci.Container { + return nil + } + getSandboxFunc := func(id string) *sandbox.Sandbox { + s := &sandbox.Sandbox{} + s.AddIP("1.1.1.42") + return s + } + ci, err := s.getContainerInfo("", getContainerFunc, getInfraContainerFunc, getSandboxFunc) + if err != nil { + t.Fatal(err) + } + if ci.CreatedTime != created.UnixNano() { + t.Fatalf("expected same created time %d, got %d", created.UnixNano(), ci.CreatedTime) + } + if ci.Pid != 42 { + t.Fatalf("expected pid 42, got %v", ci.Pid) + } + if ci.Name != "testname" { + t.Fatalf("expected name testname, got %s", ci.Name) + } + if ci.Image != "image" { + t.Fatalf("expected image name image, got %s", ci.Image) + } + if ci.ImageRef != "imageRef" { + t.Fatalf("expected image ref imageRef, got %s", ci.ImageRef) + } + if ci.Root != "/var/foo/container" { + t.Fatalf("expected root to be /var/foo/container, got %s", ci.Root) + } + if ci.LogPath != "/container/logs" { + t.Fatalf("expected log path to be /containers/logs, got %s", ci.LogPath) + } + if ci.Sandbox != "testsandboxid" { + t.Fatalf("expected sandbox to be testsandboxid, got %s", ci.Sandbox) + } + if ci.IP != "1.1.1.42" { + t.Fatalf("expected ip 1.1.1.42, got %s", ci.IP) + } + if len(ci.Annotations) == 0 { + t.Fatal("annotations are empty") + } + if len(ci.Labels) == 0 { + t.Fatal("labels are empty") + } + if len(ci.Annotations) != len(annotations) { + t.Fatalf("container info annotations len (%d) isn't the same as original annotations len (%d)", len(ci.Annotations), len(annotations)) + } + if len(ci.Labels) != len(labels) { + t.Fatalf("container info labels len (%d) isn't the same as original labels len (%d)", len(ci.Labels), len(labels)) + } + var found bool + for k, v := range annotations { + found = false + for key, value := range ci.Annotations { + if k == key && v == value { + found = true + break + } + } + if !found { + t.Fatalf("key %s with value %v wasn't in container info annotations", k, v) + } + } + for k, v := range labels { + found = false + for key, value := range ci.Labels { + if k == key && v == value { + found = true + break + } + } + if !found { + t.Fatalf("key %s with value %v wasn't in container info labels", k, v) + } + } +} + +func TestGetContainerInfoCtrNotFound(t *testing.T) { + s := &Server{} + getContainerFunc := func(id string) *oci.Container { + return nil + } + getInfraContainerFunc := func(id string) *oci.Container { + return nil + } + getSandboxFunc := func(id string) *sandbox.Sandbox { + return nil + } + _, err := s.getContainerInfo("", getContainerFunc, getInfraContainerFunc, getSandboxFunc) + if err == nil { + t.Fatal("expected an error but got nothing") + } + if err != errCtrNotFound { + t.Fatalf("expected errCtrNotFound error, got %v", err) + } +} + +func TestGetContainerInfoCtrStateNil(t *testing.T) { + s := &Server{} + created := time.Now() + labels := map[string]string{} + annotations := map[string]string{} + getContainerFunc := func(id string) *oci.Container { + container, err := oci.NewContainer("testid", "testname", "", "/container/logs", mockNetNS{}, labels, annotations, annotations, "imageName", "imageName", "imageRef", &runtime.ContainerMetadata{}, "testsandboxid", false, false, false, false, false, "/root/for/container", created, "SIGKILL") + if err != nil { + t.Fatal(err) + } + container.SetMountPoint("/var/foo/container") + container.SetState(nil) + return container + } + getInfraContainerFunc := func(id string) *oci.Container { + return nil + } + getSandboxFunc := func(id string) *sandbox.Sandbox { + s := &sandbox.Sandbox{} + s.AddIP("1.1.1.42") + return s + } + _, err := s.getContainerInfo("", getContainerFunc, getInfraContainerFunc, getSandboxFunc) + if err == nil { + t.Fatal("expected an error but got nothing") + } + if err != errCtrStateNil { + t.Fatalf("expected errCtrStateNil error, got %v", err) + } +} + +func TestGetContainerInfoSandboxNotFound(t *testing.T) { + s := &Server{} + created := time.Now() + labels := map[string]string{} + annotations := map[string]string{} + getContainerFunc := func(id string) *oci.Container { + container, err := oci.NewContainer("testid", "testname", "", "/container/logs", mockNetNS{}, labels, annotations, annotations, "imageName", "imageName", "imageRef", &runtime.ContainerMetadata{}, "testsandboxid", false, false, false, false, false, "/root/for/container", created, "SIGKILL") + if err != nil { + t.Fatal(err) + } + container.SetMountPoint("/var/foo/container") + return container + } + getInfraContainerFunc := func(id string) *oci.Container { + return nil + } + getSandboxFunc := func(id string) *sandbox.Sandbox { + return nil + } + _, err := s.getContainerInfo("", getContainerFunc, getInfraContainerFunc, getSandboxFunc) + if err == nil { + t.Fatal("expected an error but got nothing") + } + if err != errSandboxNotFound { + t.Fatalf("expected errSandboxNotFound error, got %v", err) + } +} diff --git a/server/metrics/metrics.go b/server/metrics/metrics.go new file mode 100644 index 00000000..b0527bcc --- /dev/null +++ b/server/metrics/metrics.go @@ -0,0 +1,70 @@ +package metrics + +import ( + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + // CRIOOperationsKey is the key for CRI-O operation metrics. + CRIOOperationsKey = "crio_operations" + // CRIOOperationsLatencyKey is the key for the operation latency metrics. + CRIOOperationsLatencyKey = "crio_operations_latency_microseconds" + // CRIOOperationsErrorsKey is the key for the operation error metrics. + CRIOOperationsErrorsKey = "crio_operations_errors" + + // TODO(runcom): + // timeouts + + subsystem = "container_runtime" +) + +var ( + // CRIOOperations collects operation counts by operation type. + CRIOOperations = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: subsystem, + Name: CRIOOperationsKey, + Help: "Cumulative number of CRI-O operations by operation type.", + }, + []string{"operation_type"}, + ) + // CRIOOperationsLatency collects operation latency numbers by operation + // type. + CRIOOperationsLatency = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Subsystem: subsystem, + Name: CRIOOperationsLatencyKey, + Help: "Latency in microseconds of CRI-O operations. Broken down by operation type.", + }, + []string{"operation_type"}, + ) + // CRIOOperationsErrors collects operation errors by operation + // type. + CRIOOperationsErrors = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: subsystem, + Name: CRIOOperationsErrorsKey, + Help: "Cumulative number of CRI-O operation errors by operation type.", + }, + []string{"operation_type"}, + ) +) + +var registerMetrics sync.Once + +// Register all metrics +func Register() { + registerMetrics.Do(func() { + prometheus.MustRegister(CRIOOperations) + prometheus.MustRegister(CRIOOperationsLatency) + prometheus.MustRegister(CRIOOperationsErrors) + }) +} + +// SinceInMicroseconds gets the time since the specified start in microseconds. +func SinceInMicroseconds(start time.Time) float64 { + return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) +} diff --git a/server/runtime_status.go b/server/runtime_status.go index 67fc87b6..5632fab3 100644 --- a/server/runtime_status.go +++ b/server/runtime_status.go @@ -1,12 +1,19 @@ package server import ( + "time" + "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) // Status returns the status of the runtime -func (s *Server) Status(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) { +func (s *Server) Status(ctx context.Context, req *pb.StatusRequest) (resp *pb.StatusResponse, err error) { + const operation = "status" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() // Deal with Runtime conditions runtimeReady, err := s.Runtime().RuntimeReady() @@ -22,7 +29,7 @@ func (s *Server) Status(ctx context.Context, req *pb.StatusRequest) (*pb.StatusR runtimeReadyConditionString := pb.RuntimeReady networkReadyConditionString := pb.NetworkReady - resp := &pb.StatusResponse{ + resp = &pb.StatusResponse{ Status: &pb.RuntimeStatus{ Conditions: []*pb.RuntimeCondition{ { diff --git a/server/sandbox_list.go b/server/sandbox_list.go index e3cac025..4d629b88 100644 --- a/server/sandbox_list.go +++ b/server/sandbox_list.go @@ -1,7 +1,9 @@ package server import ( - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "time" + + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -28,7 +30,13 @@ func filterSandbox(p *pb.PodSandbox, filter *pb.PodSandboxFilter) bool { } // ListPodSandbox returns a list of SandBoxes. -func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxRequest) (*pb.ListPodSandboxResponse, error) { +func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxRequest) (resp *pb.ListPodSandboxResponse, err error) { + const operation = "list_pod_sandbox" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("ListPodSandboxRequest %+v", req) var pods []*pb.PodSandbox var podList []*sandbox.Sandbox @@ -42,7 +50,11 @@ func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxReque if filter.Id != "" { id, err := s.PodIDIndex().Get(filter.Id) if err != nil { - return nil, err + // Not finding an ID in a filtered list should not be considered + // and error; it might have been deleted when stop was done. + // Log and return an empty struct. + logrus.Warn("unable to find pod %s with filter", filter.Id) + return &pb.ListPodSandboxResponse{}, nil } sb := s.getSandbox(id) if sb == nil { @@ -82,7 +94,7 @@ func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxReque } } - resp := &pb.ListPodSandboxResponse{ + resp = &pb.ListPodSandboxResponse{ Items: pods, } logrus.Debugf("ListPodSandboxResponse %+v", resp) diff --git a/server/sandbox_network.go b/server/sandbox_network.go new file mode 100644 index 00000000..9b054bdc --- /dev/null +++ b/server/sandbox_network.go @@ -0,0 +1,70 @@ +package server + +import ( + "fmt" + "net" + + "github.com/kubernetes-incubator/cri-o/lib/sandbox" + "github.com/sirupsen/logrus" + "k8s.io/kubernetes/pkg/kubelet/network/hostport" +) + +// networkStart sets up the sandbox's network and returns the pod IP on success +// or an error +func (s *Server) networkStart(hostNetwork bool, sb *sandbox.Sandbox) (string, error) { + if hostNetwork { + return s.BindAddress(), nil + } + + podNetwork := newPodNetwork(sb) + err := s.netPlugin.SetUpPod(podNetwork) + if err != nil { + return "", fmt.Errorf("failed to create pod network sandbox %s(%s): %v", sb.Name(), sb.ID(), err) + } + + var ip string + if ip, err = s.netPlugin.GetPodNetworkStatus(podNetwork); err != nil { + return "", fmt.Errorf("failed to get network status for pod sandbox %s(%s): %v", sb.Name(), sb.ID(), err) + } + + if len(sb.PortMappings()) > 0 { + ip4 := net.ParseIP(ip).To4() + if ip4 == nil { + return "", fmt.Errorf("failed to get valid ipv4 address for sandbox %s(%s)", sb.Name(), sb.ID()) + } + + if err = s.hostportManager.Add(sb.ID(), &hostport.PodPortMapping{ + Name: sb.Name(), + PortMappings: sb.PortMappings(), + IP: ip4, + HostNetwork: false, + }, "lo"); err != nil { + return "", fmt.Errorf("failed to add hostport mapping for sandbox %s(%s): %v", sb.Name(), sb.ID(), err) + } + + } + return ip, nil +} + +// networkStop cleans up and removes a pod's network. It is best-effort and +// must call the network plugin even if the network namespace is already gone +func (s *Server) networkStop(hostNetwork bool, sb *sandbox.Sandbox) error { + if !hostNetwork { + if err := s.hostportManager.Remove(sb.ID(), &hostport.PodPortMapping{ + Name: sb.Name(), + PortMappings: sb.PortMappings(), + HostNetwork: false, + }); err != nil { + logrus.Warnf("failed to remove hostport for pod sandbox %s(%s): %v", + sb.Name(), sb.ID(), err) + } + + podNetwork := newPodNetwork(sb) + if err := s.netPlugin.TearDownPod(podNetwork); err != nil { + logrus.Warnf("failed to destroy network for pod sandbox %s(%s): %v", + sb.Name(), sb.ID(), err) + } + } + + return nil +} diff --git a/server/sandbox_remove.go b/server/sandbox_remove.go index 856b8938..62b2c698 100644 --- a/server/sandbox_remove.go +++ b/server/sandbox_remove.go @@ -2,9 +2,10 @@ package server import ( "fmt" + "time" "github.com/containers/storage" - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" pkgstorage "github.com/kubernetes-incubator/cri-o/pkg/storage" "github.com/pkg/errors" @@ -15,7 +16,13 @@ import ( // RemovePodSandbox deletes the sandbox. If there are any running containers in the // sandbox, they should be force deleted. -func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (*pb.RemovePodSandboxResponse, error) { +func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (resp *pb.RemovePodSandboxResponse, err error) { + const operation = "remove_pod_sandbox" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("RemovePodSandboxRequest %+v", req) sb, err := s.getPodSandboxFromRequest(req.PodSandboxId) if err != nil { @@ -27,7 +34,7 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR // the the CRI interface which expects to not error out in not found // cases. - resp := &pb.RemovePodSandboxResponse{} + resp = &pb.RemovePodSandboxResponse{} logrus.Warnf("could not get sandbox %s, it's probably been removed already: %v", req.PodSandboxId, err) return resp, nil } @@ -41,7 +48,7 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR if !sb.Stopped() { cState := s.Runtime().ContainerStatus(c) if cState.Status == oci.ContainerStateCreated || cState.Status == oci.ContainerStateRunning { - if err := s.Runtime().StopContainer(c, -1); err != nil { + if err := s.Runtime().StopContainer(ctx, c, 10); err != nil { // Assume container is already stopped logrus.Warnf("failed to stop container %s: %v", c.Name(), err) } @@ -92,7 +99,7 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR return nil, fmt.Errorf("failed to delete pod sandbox %s from index: %v", sb.ID(), err) } - resp := &pb.RemovePodSandboxResponse{} + resp = &pb.RemovePodSandboxResponse{} logrus.Debugf("RemovePodSandboxResponse %+v", resp) return resp, nil } diff --git a/server/sandbox_run.go b/server/sandbox_run.go index a5fb092e..5ba007c2 100644 --- a/server/sandbox_run.go +++ b/server/sandbox_run.go @@ -3,7 +3,7 @@ package server import ( "encoding/json" "fmt" - "net" + "io/ioutil" "os" "path" "path/filepath" @@ -13,16 +13,17 @@ import ( "time" "github.com/containers/storage" - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/pkg/annotations" + runtimespec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/net/context" "golang.org/x/sys/unix" - "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/api/core/v1" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" "k8s.io/kubernetes/pkg/kubelet/leaky" "k8s.io/kubernetes/pkg/kubelet/network/hostport" @@ -82,10 +83,7 @@ func (s *Server) runContainer(container *oci.Container, cgroupParent string) err if err := s.Runtime().CreateContainer(container, cgroupParent); err != nil { return err } - if err := s.Runtime().StartContainer(container); err != nil { - return err - } - return nil + return s.Runtime().StartContainer(container) } var ( @@ -94,19 +92,29 @@ var ( // RunPodSandbox creates and runs a pod-level sandbox. func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest) (resp *pb.RunPodSandboxResponse, err error) { + const operation = "run_pod_sandbox" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + s.updateLock.RLock() defer s.updateLock.RUnlock() + if req.GetConfig().GetMetadata() == nil { + return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Metadata is nil") + } + logrus.Debugf("RunPodSandboxRequest %+v", req) - var processLabel, mountLabel, netNsPath, resolvPath string + var processLabel, mountLabel, resolvPath string // process req.Name - kubeName := req.GetConfig().GetMetadata().Name + kubeName := req.GetConfig().GetMetadata().GetName() if kubeName == "" { return nil, fmt.Errorf("PodSandboxConfig.Name should not be empty") } - namespace := req.GetConfig().GetMetadata().Namespace - attempt := req.GetConfig().GetMetadata().Attempt + namespace := req.GetConfig().GetMetadata().GetNamespace() + attempt := req.GetConfig().GetMetadata().GetAttempt() id, name, err := s.generatePodIDandName(req.GetConfig()) if err != nil { @@ -152,8 +160,8 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest name, id, s.config.PauseImage, "", containerName, - req.GetConfig().GetMetadata().Name, - req.GetConfig().GetMetadata().Uid, + req.GetConfig().GetMetadata().GetName(), + req.GetConfig().GetMetadata().GetUid(), namespace, attempt, nil) @@ -188,12 +196,6 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest g.SetProcessArgs([]string{s.config.PauseCommand}) } - // set hostname - hostname := req.GetConfig().Hostname - if hostname != "" { - g.SetHostname(hostname) - } - // set DNS options if req.GetConfig().GetDnsConfig() != nil { dnsServers := req.GetConfig().GetDnsConfig().Servers @@ -209,7 +211,16 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest } return nil, err } - g.AddBindMount(resolvPath, "/etc/resolv.conf", []string{"ro"}) + if err := label.Relabel(resolvPath, mountLabel, true); err != nil && err != unix.ENOTSUP { + return nil, err + } + mnt := runtimespec.Mount{ + Type: "bind", + Source: resolvPath, + Destination: "/etc/resolv.conf", + Options: []string{"ro", "bind"}, + } + g.AddMount(mnt) } // add metadata @@ -222,13 +233,20 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest // add labels labels := req.GetConfig().GetLabels() - // Add special container name label for the infra container - labels[types.KubernetesContainerNameLabel] = leaky.PodInfraContainerName - labelsJSON, err := json.Marshal(labels) - if err != nil { + if err := validateLabels(labels); err != nil { return nil, err } + // Add special container name label for the infra container + labelsJSON := []byte{} + if labels != nil { + labels[types.KubernetesContainerNameLabel] = leaky.PodInfraContainerName + labelsJSON, err = json.Marshal(labels) + if err != nil { + return nil, err + } + } + // add annotations kubeAnnotations := req.GetConfig().GetAnnotations() kubeAnnotationsJSON, err := json.Marshal(kubeAnnotations) @@ -249,19 +267,28 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest return nil, fmt.Errorf("requested logDir for sbox id %s is a relative path: %s", id, logDir) } - // Don't use SELinux separation with Host Pid or IPC Namespace, - if !req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().HostPid && !req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().HostIpc { - processLabel, mountLabel, err = getSELinuxLabels(nil) - if err != nil { - return nil, err - } - g.SetProcessSelinuxLabel(processLabel) - g.SetLinuxMountLabel(mountLabel) + privileged := s.privilegedSandbox(req) + + securityContext := req.GetConfig().GetLinux().GetSecurityContext() + if securityContext == nil { + logrus.Warn("no security context found in config.") } + processLabel, mountLabel, err = getSELinuxLabels(securityContext.GetSelinuxOptions(), privileged) + if err != nil { + return nil, err + } + + // Don't use SELinux separation with Host Pid or IPC Namespace or privileged. + if securityContext.GetNamespaceOptions().GetHostPid() || securityContext.GetNamespaceOptions().GetHostIpc() { + processLabel, mountLabel = "", "" + } + g.SetProcessSelinuxLabel(processLabel) + g.SetLinuxMountLabel(mountLabel) + // create shm mount for the pod containers. var shmPath string - if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().HostIpc { + if securityContext.GetNamespaceOptions().GetHostIpc() { shmPath = "/dev/shm" } else { shmPath, err = setupShm(podContainer.RunDir, mountLabel) @@ -302,7 +329,14 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest return nil, err } - privileged := s.privilegedSandbox(req) + hostNetwork := securityContext.GetNamespaceOptions().GetHostNetwork() + + hostname, err := getHostname(id, req.GetConfig().Hostname, hostNetwork) + if err != nil { + return nil, err + } + g.SetHostname(hostname) + trusted := s.trustedSandbox(req) g.AddAnnotation(annotations.Metadata, string(metadataJSON)) g.AddAnnotation(annotations.Labels, string(labelsJSON)) @@ -330,7 +364,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest portMappings := convertPortMappings(req.GetConfig().GetPortMappings()) // setup cgroup settings - cgroupParent := req.GetConfig().GetLinux().CgroupParent + cgroupParent := req.GetConfig().GetLinux().GetCgroupParent() if cgroupParent != "" { if s.config.CgroupManager == oci.SystemdCgroupsManager { if len(cgroupParent) <= 6 || !strings.HasSuffix(path.Base(cgroupParent), ".slice") { @@ -383,15 +417,8 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest } // extract linux sysctls from annotations and pass down to oci runtime - safe, unsafe, err := SysctlsFromPodAnnotations(kubeAnnotations) - if err != nil { - return nil, err - } - for _, sysctl := range safe { - g.AddLinuxSysctl(sysctl.Name, sysctl.Value) - } - for _, sysctl := range unsafe { - g.AddLinuxSysctl(sysctl.Name, sysctl.Value) + for key, value := range req.GetConfig().GetLinux().GetSysctls() { + g.AddLinuxSysctl(key, value) } // Set OOM score adjust of the infra container to be very low @@ -400,16 +427,9 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest g.SetLinuxResourcesCPUShares(PodInfraCPUshares) - hostNetwork := req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().HostNetwork - // set up namespaces if hostNetwork { - err = g.RemoveLinuxNamespace("network") - if err != nil { - return nil, err - } - - netNsPath, err = sandbox.HostNetNsPath() + err = g.RemoveLinuxNamespace(string(runtimespec.NetworkNamespace)) if err != nil { return nil, err } @@ -430,23 +450,21 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest }() // Pass the created namespace path to the runtime - err = g.AddOrReplaceLinuxNamespace("network", sb.NetNsPath()) - if err != nil { - return nil, err - } - - netNsPath = sb.NetNsPath() - } - - if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().HostPid { - err = g.RemoveLinuxNamespace("pid") + err = g.AddOrReplaceLinuxNamespace(string(runtimespec.NetworkNamespace), sb.NetNsPath()) if err != nil { return nil, err } } - if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().HostIpc { - err = g.RemoveLinuxNamespace("ipc") + if securityContext.GetNamespaceOptions().GetHostPid() { + err = g.RemoveLinuxNamespace(string(runtimespec.PIDNamespace)) + if err != nil { + return nil, err + } + } + + if securityContext.GetNamespaceOptions().GetHostIpc() { + err = g.RemoveLinuxNamespace(string(runtimespec.IPCNamespace)) if err != nil { return nil, err } @@ -464,49 +482,55 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest g.AddAnnotation(annotations.MountPoint, mountPoint) g.SetRootPath(mountPoint) - container, err := oci.NewContainer(id, containerName, podContainer.RunDir, logPath, sb.NetNs(), labels, kubeAnnotations, "", "", "", nil, id, false, false, false, sb.Privileged(), sb.Trusted(), podContainer.Dir, created, podContainer.Config.Config.StopSignal) + hostnamePath := fmt.Sprintf("%s/hostname", podContainer.RunDir) + if err := ioutil.WriteFile(hostnamePath, []byte(hostname+"\n"), 0644); err != nil { + return nil, err + } + if err := label.Relabel(hostnamePath, mountLabel, true); err != nil && err != unix.ENOTSUP { + return nil, err + } + mnt := runtimespec.Mount{ + Type: "bind", + Source: hostnamePath, + Destination: "/etc/hostname", + Options: []string{"ro", "bind"}, + } + g.AddMount(mnt) + g.AddAnnotation(annotations.HostnamePath, hostnamePath) + sb.AddHostnamePath(hostnamePath) + + container, err := oci.NewContainer(id, containerName, podContainer.RunDir, logPath, sb.NetNs(), labels, g.Spec().Annotations, kubeAnnotations, "", "", "", nil, id, false, false, false, sb.Privileged(), sb.Trusted(), podContainer.Dir, created, podContainer.Config.Config.StopSignal) if err != nil { return nil, err } + container.SetSpec(g.Spec()) container.SetMountPoint(mountPoint) sb.SetInfraContainer(container) var ip string - // setup the network - if !hostNetwork { - podNetwork := newPodNetwork(sb.Namespace(), sb.KubeName(), sb.ID(), netNsPath) - if err = s.netPlugin.SetUpPod(podNetwork); err != nil { - return nil, fmt.Errorf("failed to create pod network sandbox %s(%s): %v", sb.Name(), id, err) - } - - if ip, err = s.netPlugin.GetPodNetworkStatus(podNetwork); err != nil { - return nil, fmt.Errorf("failed to get network status for pod sandbox %s(%s): %v", sb.Name(), id, err) - } - - if len(portMappings) != 0 { - ip4 := net.ParseIP(ip).To4() - if ip4 == nil { - return nil, fmt.Errorf("failed to get valid ipv4 address for sandbox %s(%s)", sb.Name(), id) - } - - if err = s.hostportManager.Add(id, &hostport.PodPortMapping{ - Name: sb.Name(), - PortMappings: portMappings, - IP: ip4, - HostNetwork: false, - }, "lo"); err != nil { - return nil, fmt.Errorf("failed to add hostport mapping for sandbox %s(%s): %v", sb.Name(), id, err) - } - - } - } else { - ip = s.BindAddress() + ip, err = s.networkStart(hostNetwork, sb) + if err != nil { + return nil, err } + defer func() { + if err != nil { + s.networkStop(hostNetwork, sb) + } + }() g.AddAnnotation(annotations.IP, ip) sb.AddIP(ip) + spp := req.GetConfig().GetLinux().GetSecurityContext().GetSeccompProfilePath() + g.AddAnnotation(annotations.SeccompProfilePath, spp) + sb.SetSeccompProfilePath(spp) + if !privileged { + if err = s.setupSeccomp(&g, spp); err != nil { + return nil, err + } + } + err = g.SaveToFile(filepath.Join(podContainer.Dir, "config.json"), saveOptions) if err != nil { return nil, fmt.Errorf("failed to save template configuration for pod sandbox %s(%s): %v", sb.Name(), id, err) @@ -544,6 +568,23 @@ func convertPortMappings(in []*pb.PortMapping) []*hostport.PortMapping { return out } +func getHostname(id, hostname string, hostNetwork bool) (string, error) { + if hostNetwork { + if hostname == "" { + h, err := os.Hostname() + if err != nil { + return "", err + } + hostname = h + } + } else { + if hostname == "" { + hostname = id[:12] + } + } + return hostname, nil +} + func (s *Server) setPodSandboxMountLabel(id, mountLabel string) error { storageMetadata, err := s.StorageRuntimeServer().GetContainerMetadata(id) if err != nil { @@ -553,31 +594,26 @@ func (s *Server) setPodSandboxMountLabel(id, mountLabel string) error { return s.StorageRuntimeServer().SetContainerMetadata(id, storageMetadata) } -func getSELinuxLabels(selinuxOptions *pb.SELinuxOption) (processLabel string, mountLabel string, err error) { - processLabel = "" - if selinuxOptions != nil { - user := selinuxOptions.User - if user == "" { - return "", "", fmt.Errorf("SELinuxOption.User is empty") - } - - role := selinuxOptions.Role - if role == "" { - return "", "", fmt.Errorf("SELinuxOption.Role is empty") - } - - t := selinuxOptions.Type - if t == "" { - return "", "", fmt.Errorf("SELinuxOption.Type is empty") - } - - level := selinuxOptions.Level - if level == "" { - return "", "", fmt.Errorf("SELinuxOption.Level is empty") - } - processLabel = fmt.Sprintf("%s:%s:%s:%s", user, role, t, level) +func getSELinuxLabels(selinuxOptions *pb.SELinuxOption, privileged bool) (processLabel string, mountLabel string, err error) { + if privileged { + return "", "", nil } - return label.InitLabels(label.DupSecOpt(processLabel)) + labels := []string{} + if selinuxOptions != nil { + if selinuxOptions.User != "" { + labels = append(labels, "user:"+selinuxOptions.User) + } + if selinuxOptions.Role != "" { + labels = append(labels, "role:"+selinuxOptions.Role) + } + if selinuxOptions.Type != "" { + labels = append(labels, "type:"+selinuxOptions.Type) + } + if selinuxOptions.Level != "" { + labels = append(labels, "level:"+selinuxOptions.Level) + } + } + return label.InitLabels(labels) } func setupShm(podSandboxRunDir, mountLabel string) (shmPath string, err error) { diff --git a/server/sandbox_status.go b/server/sandbox_status.go index f5b6dd09..90193e71 100644 --- a/server/sandbox_status.go +++ b/server/sandbox_status.go @@ -1,6 +1,8 @@ package server import ( + "time" + "github.com/kubernetes-incubator/cri-o/oci" "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -8,7 +10,13 @@ import ( ) // PodSandboxStatus returns the Status of the PodSandbox. -func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (*pb.PodSandboxStatusResponse, error) { +func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (resp *pb.PodSandboxStatusResponse, err error) { + const operation = "pod_sandbox_status" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("PodSandboxStatusRequest %+v", req) sb, err := s.getPodSandboxFromRequest(req.PodSandboxId) if err != nil { @@ -24,7 +32,7 @@ func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusR } sandboxID := sb.ID() - resp := &pb.PodSandboxStatusResponse{ + resp = &pb.PodSandboxStatusResponse{ Status: &pb.PodSandboxStatus{ Id: sandboxID, CreatedAt: podInfraContainer.CreatedAt().UnixNano(), diff --git a/server/sandbox_stop.go b/server/sandbox_stop.go index 1f422db5..75e97291 100644 --- a/server/sandbox_stop.go +++ b/server/sandbox_stop.go @@ -2,12 +2,12 @@ package server import ( "fmt" - "os" + "time" "github.com/containers/storage" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/symlink" - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" @@ -15,12 +15,17 @@ import ( "golang.org/x/net/context" "golang.org/x/sys/unix" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" - "k8s.io/kubernetes/pkg/kubelet/network/hostport" ) // StopPodSandbox stops the sandbox. If there are any running containers in the // sandbox, they should be force terminated. -func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (*pb.StopPodSandboxResponse, error) { +func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (resp *pb.StopPodSandboxResponse, err error) { + const operation = "stop_pod_sandbox" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("StopPodSandboxRequest %+v", req) sb, err := s.getPodSandboxFromRequest(req.PodSandboxId) if err != nil { @@ -32,55 +37,33 @@ func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxReque // the the CRI interface which expects to not error out in not found // cases. - resp := &pb.StopPodSandboxResponse{} + resp = &pb.StopPodSandboxResponse{} logrus.Warnf("could not get sandbox %s, it's probably been stopped already: %v", req.PodSandboxId, err) logrus.Debugf("StopPodSandboxResponse %s: %+v", req.PodSandboxId, resp) return resp, nil } if sb.Stopped() { - resp := &pb.StopPodSandboxResponse{} + resp = &pb.StopPodSandboxResponse{} logrus.Debugf("StopPodSandboxResponse %s: %+v", sb.ID(), resp) return resp, nil } - podInfraContainer := sb.InfraContainer() - netnsPath, err := podInfraContainer.NetNsPath() - if err != nil { - return nil, err - } - if _, err := os.Stat(netnsPath); err == nil { - if err2 := s.hostportManager.Remove(sb.ID(), &hostport.PodPortMapping{ - Name: sb.Name(), - PortMappings: sb.PortMappings(), - HostNetwork: false, - }); err2 != nil { - logrus.Warnf("failed to remove hostport for pod sandbox %s(%s): %v", - podInfraContainer.Name(), sb.ID(), err2) - } - - podNetwork := newPodNetwork(sb.Namespace(), sb.KubeName(), sb.ID(), netnsPath) - if err2 := s.netPlugin.TearDownPod(podNetwork); err2 != nil { - logrus.Warnf("failed to destroy network for pod sandbox %s(%s): %v", - sb.Name(), sb.ID(), err2) - } - } else if !os.IsNotExist(err) { // it's ok for netnsPath to *not* exist - return nil, fmt.Errorf("failed to stat netns path for pod sandbox %s(%s) before tearing down the network: %v", - sb.Name(), sb.ID(), err) - } - - // Close the sandbox networking namespace. + // Clean up sandbox networking and close its network namespace. + hostNetwork := sb.NetNsPath() == "" + s.networkStop(hostNetwork, sb) if err := sb.NetNsRemove(); err != nil { return nil, err } + podInfraContainer := sb.InfraContainer() containers := sb.Containers().List() containers = append(containers, podInfraContainer) for _, c := range containers { cStatus := s.Runtime().ContainerStatus(c) if cStatus.Status != oci.ContainerStateStopped { - if err := s.Runtime().StopContainer(c, -1); err != nil { + if err := s.Runtime().StopContainer(ctx, c, 10); err != nil { return nil, fmt.Errorf("failed to stop container %s in pod sandbox %s: %v", c.Name(), sb.ID(), err) } if c.ID() == podInfraContainer.ID() { @@ -119,7 +102,7 @@ func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxReque } sb.SetStopped() - resp := &pb.StopPodSandboxResponse{} + resp = &pb.StopPodSandboxResponse{} logrus.Debugf("StopPodSandboxResponse %s: %+v", sb.ID(), resp) return resp, nil } diff --git a/server/seccomp/seccomp.go b/server/seccomp/seccomp.go index d8ec63d2..cf77c827 100644 --- a/server/seccomp/seccomp.go +++ b/server/seccomp/seccomp.go @@ -11,6 +11,7 @@ import ( specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" libseccomp "github.com/seccomp/libseccomp-golang" + "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -24,6 +25,7 @@ func IsEnabled() bool { enabled = true } } + logrus.Debugf("seccomp status: %v", enabled) return enabled } diff --git a/server/secrets.go b/server/secrets.go new file mode 100644 index 00000000..56d3ba81 --- /dev/null +++ b/server/secrets.go @@ -0,0 +1,162 @@ +package server + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + rspec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// SecretData info +type SecretData struct { + Name string + Data []byte +} + +// SaveTo saves secret data to given directory +func (s SecretData) SaveTo(dir string) error { + path := filepath.Join(dir, s.Name) + if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil && !os.IsExist(err) { + return err + } + return ioutil.WriteFile(path, s.Data, 0700) +} + +func readAll(root, prefix string) ([]SecretData, error) { + path := filepath.Join(root, prefix) + + data := []SecretData{} + + files, err := ioutil.ReadDir(path) + if err != nil { + if os.IsNotExist(err) { + return data, nil + } + + return nil, err + } + + for _, f := range files { + fileData, err := readFile(root, filepath.Join(prefix, f.Name())) + if err != nil { + // If the file did not exist, might be a dangling symlink + // Ignore the error + if os.IsNotExist(err) { + continue + } + return nil, err + } + data = append(data, fileData...) + } + + return data, nil +} + +func readFile(root, name string) ([]SecretData, error) { + path := filepath.Join(root, name) + + s, err := os.Stat(path) + if err != nil { + return nil, err + } + + if s.IsDir() { + dirData, err := readAll(root, name) + if err != nil { + return nil, err + } + return dirData, nil + } + bytes, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + return []SecretData{{Name: name, Data: bytes}}, nil +} + +// getHostAndCtrDir separates the host:container paths +func getMountsMap(path string) (string, string, error) { + arr := strings.SplitN(path, ":", 2) + if len(arr) == 2 { + return arr[0], arr[1], nil + } + return "", "", errors.Errorf("unable to get host and container dir") +} + +func getHostSecretData(hostDir string) ([]SecretData, error) { + var allSecrets []SecretData + hostSecrets, err := readAll(hostDir, "") + if err != nil { + return nil, errors.Wrapf(err, "failed to read secrets from %q", hostDir) + } + return append(allSecrets, hostSecrets...), nil +} + +// secretMount copies the contents of host directory to container directory +// and returns a list of mounts +func secretMounts(defaultMountsPaths []string, mountLabel, containerWorkingDir string, runtimeMounts []rspec.Mount) ([]rspec.Mount, error) { + var mounts []rspec.Mount + for _, path := range defaultMountsPaths { + hostDir, ctrDir, err := getMountsMap(path) + if err != nil { + return nil, err + } + // skip if the hostDir path doesn't exist + if _, err := os.Stat(hostDir); os.IsNotExist(err) { + logrus.Warnf("%q doesn't exist, skipping", hostDir) + continue + } + + ctrDirOnHost := filepath.Join(containerWorkingDir, ctrDir) + // skip if ctrDir has already been mounted by caller + if isAlreadyMounted(runtimeMounts, ctrDir) { + logrus.Warnf("%q has already been mounted; cannot override mount", ctrDir) + continue + } + + if err := os.RemoveAll(ctrDirOnHost); err != nil { + return nil, fmt.Errorf("remove container directory failed: %v", err) + } + + if err := os.MkdirAll(ctrDirOnHost, 0755); err != nil { + return nil, fmt.Errorf("making container directory failed: %v", err) + } + + hostDir, err = resolveSymbolicLink(hostDir) + if err != nil { + return nil, err + } + + data, err := getHostSecretData(hostDir) + if err != nil { + return nil, errors.Wrapf(err, "getting host secret data failed") + } + for _, s := range data { + s.SaveTo(ctrDirOnHost) + } + label.Relabel(ctrDirOnHost, mountLabel, false) + + m := rspec.Mount{ + Source: ctrDirOnHost, + Destination: ctrDir, + } + + mounts = append(mounts, m) + } + return mounts, nil +} + +func isAlreadyMounted(mounts []rspec.Mount, mountPath string) bool { + for _, mount := range mounts { + if mount.Destination == mountPath { + return true + } + } + return false +} diff --git a/server/secrets_test.go b/server/secrets_test.go new file mode 100644 index 00000000..e7e2a205 --- /dev/null +++ b/server/secrets_test.go @@ -0,0 +1,61 @@ +package server + +import ( + "testing" +) + +const ( + defaultError = "unable to get host and container dir" + secretDataPath = "fixtures/secret" + emptyPath = "fixtures/secret/empty" +) + +func TestGetMountsMap(t *testing.T) { + testCases := []struct { + Path, HostDir, CtrDir string + Error string + }{ + {"", "", "", defaultError}, + {"/tmp:/home/crio", "/tmp", "/home/crio", ""}, + {"crio/logs:crio/logs", "crio/logs", "crio/logs", ""}, + {"/tmp", "", "", defaultError}, + } + for _, c := range testCases { + hostDir, ctrDir, err := getMountsMap(c.Path) + if hostDir != c.HostDir || ctrDir != c.CtrDir || (err != nil && err.Error() != c.Error) { + t.Errorf("expect: (%v, %v, %v) \n but got: (%v, %v, %v) \n", + c.HostDir, c.CtrDir, c.Error, hostDir, ctrDir, err) + } + } +} + +func TestGetHostSecretData(t *testing.T) { + testCases := []struct { + Path string + Want []SecretData + }{ + { + "emptyPath", + []SecretData{}, + }, + { + secretDataPath, + []SecretData{ + {"testDataA", []byte("secretDataA")}, + {"testDataB", []byte("secretDataB")}, + }, + }, + } + for _, c := range testCases { + if secretData, err := getHostSecretData(c.Path); err != nil { + t.Error(err) + } else { + for index, data := range secretData { + if data.Name != c.Want[index].Name || string(data.Data) != string(c.Want[index].Data) { + t.Errorf("expect: (%v, %v) \n but got: (%v, %v) \n", + c.Want[index].Name, string(c.Want[index].Data), data.Name, string(data.Data)) + } + } + } + } +} diff --git a/server/server.go b/server/server.go index 50621939..b42496c7 100644 --- a/server/server.go +++ b/server/server.go @@ -15,11 +15,12 @@ import ( "github.com/cri-o/ocicni/pkg/ocicni" "github.com/fsnotify/fsnotify" - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "github.com/kubernetes-incubator/cri-o/lib" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/pkg/storage" "github.com/kubernetes-incubator/cri-o/server/apparmor" + "github.com/kubernetes-incubator/cri-o/server/metrics" "github.com/kubernetes-incubator/cri-o/server/seccomp" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -30,13 +31,12 @@ import ( "k8s.io/kubernetes/pkg/kubelet/server/streaming" iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables" utildbus "k8s.io/kubernetes/pkg/util/dbus" - utilexec "k8s.io/kubernetes/pkg/util/exec" utiliptables "k8s.io/kubernetes/pkg/util/iptables" + utilexec "k8s.io/utils/exec" ) const ( - runtimeAPIVersion = "v1alpha1" - shutdownFile = "/var/lib/crio/crio.shutdown" + shutdownFile = "/var/lib/crio/crio.shutdown" ) func isTrue(annotaton string) bool { @@ -53,7 +53,7 @@ type streamService struct { // Server implements the RuntimeService and ImageService type Server struct { - *libkpod.ContainerServer + *lib.ContainerServer config Config updateLock sync.RWMutex @@ -184,13 +184,13 @@ func New(config *Config) (*Server, error) { return nil, err } - config.ContainerExitsDir = "/var/run/crio/exits" + config.ContainerExitsDir = oci.ContainerExitsDir // This is used to monitor container exits using inotify if err := os.MkdirAll(config.ContainerExitsDir, 0755); err != nil { return nil, err } - containerServer, err := libkpod.New(&config.Config) + containerServer, err := lib.New(&config.Config) if err != nil { return nil, err } @@ -201,11 +201,10 @@ func New(config *Config) (*Server, error) { } iptInterface := utiliptables.New(utilexec.New(), utildbus.New(), utiliptables.ProtocolIpv4) iptInterface.EnsureChain(utiliptables.TableNAT, iptablesproxy.KubeMarkMasqChain) - hostportManager := hostport.NewHostportManager() + hostportManager := hostport.NewHostportManager(iptInterface) s := &Server{ ContainerServer: containerServer, - netPlugin: netPlugin, hostportManager: hostportManager, config: *config, @@ -350,6 +349,7 @@ func (s *Server) getPodSandboxFromRequest(podSandboxID string) (*sandbox.Sandbox // CreateMetricsEndpoint creates a /metrics endpoint // for prometheus monitoring func (s *Server) CreateMetricsEndpoint() (*http.ServeMux, error) { + metrics.Register() mux := &http.ServeMux{} mux.Handle("/metrics", prometheus.Handler()) return mux, nil @@ -419,6 +419,7 @@ func (s *Server) StartExitMonitor() { }() if err := watcher.Add(s.config.ContainerExitsDir); err != nil { logrus.Errorf("watcher.Add(%q) failed: %s", s.config.ContainerExitsDir, err) + close(done) } <-done } diff --git a/server/utils.go b/server/utils.go index 26c347f4..0512ea4b 100644 --- a/server/utils.go +++ b/server/utils.go @@ -5,16 +5,23 @@ import ( "io" "os" "strings" + "time" "github.com/cri-o/ocicni/pkg/ocicni" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" + "github.com/kubernetes-incubator/cri-o/server/metrics" + "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/runtime-tools/validate" "github.com/syndtr/gocapability/capability" + pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) const ( // According to http://man7.org/linux/man-pages/man5/resolv.conf.5.html: // "The search list is currently limited to six domains with a total of 256 characters." maxDNSSearches = 6 + + maxLabelSize = 4096 ) func copyFile(src, dest string) error { @@ -149,12 +156,12 @@ func SysctlsFromPodAnnotation(annotation string) ([]Sysctl, error) { return sysctls, nil } -func newPodNetwork(namespace, name, id, netns string) ocicni.PodNetwork { +func newPodNetwork(sb *sandbox.Sandbox) ocicni.PodNetwork { return ocicni.PodNetwork{ - Name: name, - Namespace: namespace, - ID: id, - NetNS: netns, + Name: sb.KubeName(), + Namespace: sb.Namespace(), + ID: sb.ID(), + NetNS: sb.NetNsPath(), } } @@ -180,3 +187,69 @@ func getOCICapabilitiesList() []string { } return caps } + +func recordOperation(operation string, start time.Time) { + metrics.CRIOOperations.WithLabelValues(operation).Inc() + metrics.CRIOOperationsLatency.WithLabelValues(operation).Observe(metrics.SinceInMicroseconds(start)) +} + +// recordError records error for metric if an error occurred. +func recordError(operation string, err error) { + if err != nil { + // TODO(runcom): handle timeout from ctx as well + metrics.CRIOOperationsErrors.WithLabelValues(operation).Inc() + } +} + +func validateLabels(labels map[string]string) error { + for k, v := range labels { + if (len(k) + len(v)) > maxLabelSize { + if len(k) > 10 { + k = k[:10] + } + return fmt.Errorf("label key and value greater than maximum size (%d bytes), key: %s", maxLabelSize, k) + } + } + return nil +} + +func mergeEnvs(imageConfig *v1.Image, kubeEnvs []*pb.KeyValue) []string { + envs := []string{} + if kubeEnvs == nil && imageConfig != nil { + envs = imageConfig.Config.Env + } else { + for _, item := range kubeEnvs { + if item.GetKey() == "" { + continue + } + envs = append(envs, item.GetKey()+"="+item.GetValue()) + } + if imageConfig != nil { + for _, imageEnv := range imageConfig.Config.Env { + var found bool + parts := strings.SplitN(imageEnv, "=", 2) + if len(parts) != 2 { + continue + } + imageEnvKey := parts[0] + if imageEnvKey == "" { + continue + } + for _, kubeEnv := range envs { + kubeEnvKey := strings.SplitN(kubeEnv, "=", 2)[0] + if kubeEnvKey == "" { + continue + } + if imageEnvKey == kubeEnvKey { + found = true + break + } + } + if !found { + envs = append(envs, imageEnv) + } + } + } + } + return envs +} diff --git a/server/utils_test.go b/server/utils_test.go new file mode 100644 index 00000000..f943c2ea --- /dev/null +++ b/server/utils_test.go @@ -0,0 +1,143 @@ +package server + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/opencontainers/image-spec/specs-go/v1" + pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" +) + +const ( + defaultDNSPath = "/etc/resolv.conf" + testDNSPath = "fixtures/resolv_test.conf" + dnsPath = "fixtures/resolv.conf" +) + +func TestParseDNSOptions(t *testing.T) { + testCases := []struct { + Servers, Searches, Options []string + Path string + Want string + }{ + { + []string{}, + []string{}, + []string{}, + testDNSPath, defaultDNSPath, + }, + { + []string{"cri-o.io", "github.com"}, + []string{"192.30.253.113", "192.30.252.153"}, + []string{"timeout:5", "attempts:3"}, + testDNSPath, dnsPath, + }, + } + + for _, c := range testCases { + if err := parseDNSOptions(c.Servers, c.Searches, + c.Options, c.Path); err != nil { + t.Error(err) + } + + expect, _ := ioutil.ReadFile(c.Want) + result, _ := ioutil.ReadFile(c.Path) + if string(expect) != string(result) { + t.Errorf("expect %v: \n but got : %v", string(expect), string(result)) + } + os.Remove(c.Path) + } +} + +func TestSysctlsFromPodAnnotations(t *testing.T) { + testCases := []struct { + Annotations map[string]string + SafeSysctls []Sysctl + UnsafeSysctls []Sysctl + }{ + { + map[string]string{ + "foo-": "bar", + SysctlsPodAnnotationKey: "kernel.shmmax=100000000,safe=20000000", + }, + []Sysctl{ + {"kernel.shmmax", "100000000"}, + {"safe", "20000000"}, + }, + []Sysctl{}, + }, + { + map[string]string{ + UnsafeSysctlsPodAnnotationKey: "kernel.shmmax=10,unsafe=20", + }, + []Sysctl{}, + []Sysctl{ + {"kernel.shmmax", "10"}, + {"unsafe", "20"}, + }, + }, + { + map[string]string{ + "bar..": "42", + SysctlsPodAnnotationKey: "kernel.shmmax=20000000,safe=40000000", + UnsafeSysctlsPodAnnotationKey: "kernel.shmmax=10,unsafe=20", + }, + []Sysctl{ + {"kernel.shmmax", "20000000"}, + {"safe", "40000000"}, + }, + []Sysctl{ + {"kernel.shmmax", "10"}, + {"unsafe", "20"}, + }, + }, + } + + for _, c := range testCases { + safe, unsafe, err := SysctlsFromPodAnnotations(c.Annotations) + if err != nil { + t.Error(err) + } + for index, sysctl := range safe { + if sysctl.Name != safe[index].Name || sysctl.Value != safe[index].Value { + t.Errorf("Expect safe: %v, but got: %v\n", safe[index], sysctl) + } + } + for index, sysctl := range unsafe { + if sysctl.Name != unsafe[index].Name || sysctl.Value != unsafe[index].Value { + t.Errorf("Expect unsafe: %v, but got: %v\n", safe[index], sysctl) + } + } + } +} + +func TestMergeEnvs(t *testing.T) { + configImage := &v1.Image{ + Config: v1.ImageConfig{ + Env: []string{"VAR1=1", "VAR2=2"}, + }, + } + + configKube := []*pb.KeyValue{ + { + Key: "VAR2", + Value: "3", + }, + { + Key: "VAR3", + Value: "3", + }, + } + + mergedEnvs := mergeEnvs(configImage, configKube) + + if len(mergedEnvs) != 3 { + t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(mergedEnvs)) + } + for _, env := range mergedEnvs { + if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { + t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) + } + } +} diff --git a/server/version.go b/server/version.go index d55cd046..74f4799b 100644 --- a/server/version.go +++ b/server/version.go @@ -1,29 +1,35 @@ package server import ( + "time" + + "github.com/kubernetes-incubator/cri-o/version" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) -// Version returns the runtime name, runtime version and runtime API version -func (s *Server) Version(ctx context.Context, req *pb.VersionRequest) (*pb.VersionResponse, error) { - - runtimeVersion, err := s.Runtime().Version() - if err != nil { - return nil, err - } - +const ( + // kubeAPIVersion is the api version of kubernetes. // TODO: Track upstream code. For now it expects 0.1.0 - version := "0.1.0" + kubeAPIVersion = "0.1.0" + // containerName is the name prepended in kubectl describe->Container ID: + // cri-o:// + containerName = "cri-o" + runtimeAPIVersion = "v1alpha1" +) - // taking const address - rav := runtimeAPIVersion - runtimeName := s.Runtime().Name() +// Version returns the runtime name, runtime version and runtime API version +func (s *Server) Version(ctx context.Context, req *pb.VersionRequest) (resp *pb.VersionResponse, err error) { + const operation = "version" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() return &pb.VersionResponse{ - Version: version, - RuntimeName: runtimeName, - RuntimeVersion: runtimeVersion, - RuntimeApiVersion: rav, + Version: kubeAPIVersion, + RuntimeName: containerName, + RuntimeVersion: version.Version, + RuntimeApiVersion: runtimeAPIVersion, }, nil } diff --git a/test/README.md b/test/README.md index 1dd2e3c7..1d1742b0 100644 --- a/test/README.md +++ b/test/README.md @@ -41,11 +41,12 @@ You will also need to install the [CNI](https://github.com/containernetworking/c the the default pod test template runs without host networking: ``` -$ go get github.com/containernetworking/cni -$ cd "$GOPATH/src/github.com/containernetworking/cni" -$ git checkout -q d4bbce1865270cd2d2be558d6a23e63d314fe769 -$ ./build.sh \ -$ mkdir -p /opt/cni/bin \ +$ cd "$GOPATH/src/github.com/containernetworking" +$ git clone https://github.com/containernetworking/plugins.git +$ cd plugins +$ git checkout -q dcf7368eeab15e2affc6256f0bb1e84dd46a34de +$ ./build.sh +$ mkdir -p /opt/cni/bin $ cp bin/* /opt/cni/bin/ ``` @@ -69,17 +70,16 @@ Tests on the host will run with `runc` as the default runtime. However you can select other OCI compatible runtimes by setting the `RUNTIME` environment variable. -For example one could use the [Clear Containers](https://github.com/01org/cc-oci-runtime/wiki/Installation) +For example one could use the [Clear Containers](https://github.com/clearcontainers/runtime) runtime instead of `runc`: ``` -make localintegration RUNTIME=cc-oci-runtime +make localintegration RUNTIME=cc-runtime ``` ## Writing integration tests -[Helper functions] -(https://github.com/kubernetes-incubator/crio/blob/master/test/helpers.bash) +[Helper functions](https://github.com/kubernetes-incubator/cri-o/blob/master/test/helpers.bash) are provided in order to facilitate writing tests. ```sh @@ -97,9 +97,9 @@ function teardown() { cleanup_test } -@test "crioctl runtimeversion" { +@test "crictl runtimeversion" { start_crio - crioctl runtimeversion + crictl runtimeversion [ "$status" -eq 0 ] } diff --git a/test/apparmor.bats b/test/apparmor.bats index babfb170..65e853e2 100644 --- a/test/apparmor.bats +++ b/test/apparmor.bats @@ -19,20 +19,18 @@ function teardown() { sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname1": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor1.json - run crioctl pod run --name apparmor1 --config "$TESTDIR"/apparmor1.json + run crictl runs "$TESTDIR"/apparmor1.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --name testname1 --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDIR"/apparmor1.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" touch test.txt + run crictl exec --sync "$ctr_id" touch test.txt echo "$output" [ "$status" -eq 0 ] - cleanup_ctrs cleanup_pods stop_crio @@ -52,16 +50,15 @@ function teardown() { sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname2": "apparmor-test-deny-write"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor2.json - run crioctl pod run --name apparmor2 --config "$TESTDIR"/apparmor2.json + run crictl runs "$TESTDIR"/apparmor2.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --name testname2 --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDIR"/apparmor2.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" touch test.txt + run crictl exec --sync "$ctr_id" touch test.txt echo "$output" [ "$status" -ne 0 ] [[ "$output" =~ "Permission denied" ]] @@ -86,16 +83,15 @@ function teardown() { sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname3": "apparmor-test-deny-write"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor3.json - run crioctl pod run --name apparmor3 --config "$TESTDIR"/apparmor3.json + run crictl runs "$TESTDIR"/apparmor3.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --name testname3 --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDIR"/apparmor3.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" touch test.txt + run crictl exec --sync "$ctr_id" touch test.txt echo "$output" [ "$status" -ne 0 ] [[ "$output" =~ "Permission denied" ]] @@ -119,16 +115,15 @@ function teardown() { sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname4": "not-exists"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor4.json - run crioctl pod run --name apparmor4 --config "$TESTDIR"/apparmor4.json + run crictl runs "$TESTDIR"/apparmor4.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --name testname4 --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDIR"/apparmor4.json echo "$output" [ "$status" -ne 0 ] [[ "$output" =~ "Creating container failed" ]] - cleanup_ctrs cleanup_pods stop_crio @@ -148,20 +143,18 @@ function teardown() { sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname5": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor5.json - run crioctl pod run --name apparmor5 --config "$TESTDIR"/apparmor5.json + run crictl runs "$TESTDIR"/apparmor5.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --name testname5 --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDIR"/apparmor5.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" touch test.txt + run crictl exec --sync "$ctr_id" touch test.txt echo "$output" [ "$status" -eq 0 ] - cleanup_ctrs cleanup_pods stop_crio diff --git a/test/cgroups.bats b/test/cgroups.bats index 44d1acfc..03990f1f 100644 --- a/test/cgroups.bats +++ b/test/cgroups.bats @@ -11,30 +11,30 @@ function teardown() { skip "pids cgroup controller is not mounted" fi PIDS_LIMIT=1234 start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" pids_limit_config=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin); obj["command"] = ["/bin/sleep", "600"]; json.dump(obj, sys.stdout)') echo "$pids_limit_config" > "$TESTDIR"/container_pids_limit.json - run crioctl ctr create --config "$TESTDIR"/container_pids_limit.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/container_pids_limit.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" cat /sys/fs/cgroup/pids/pids.max + run crictl exec --sync "$ctr_id" cat /sys/fs/cgroup/pids/pids.max echo "$output" [ "$status" -eq 0 ] [[ "$output" =~ "1234" ]] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs cleanup_pods stop_crio -} \ No newline at end of file +} diff --git a/test/command.bats b/test/command.bats new file mode 100644 index 00000000..86e58f9d --- /dev/null +++ b/test/command.bats @@ -0,0 +1,12 @@ +#!/usr/bin/env bats + +load helpers + +@test "crio commands" { + run ${CRIO_BINARY} --config /dev/null config > /dev/null + echo "$output" + [ "$status" -eq 0 ] + run ${CRIO_BINARY} badoption > /dev/null + echo "$output" + [ "$status" -ne 0 ] +} diff --git a/test/ctr.bats b/test/ctr.bats index 0707df5d..5f37c708 100644 --- a/test/ctr.bats +++ b/test/ctr.bats @@ -8,31 +8,31 @@ function teardown() { @test "ctr not found correct error message" { start_crio - run crioctl ctr status --id randomid + run crictl inspect "container_not_exist" echo "$output" [ "$status" -eq 1 ] - [[ "$output" =~ "container with ID starting with randomid not found" ]] stop_crio } @test "ctr termination reason Completed" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr status --id "$ctr_id" + run sleep 5 + run crictl inspect --output yaml "$ctr_id" echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "Reason: Completed" ]] + [[ "$output" =~ "reason: Completed" ]] cleanup_ctrs cleanup_pods @@ -41,23 +41,24 @@ function teardown() { @test "ctr termination reason Error" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" errorconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["command"] = ["false"]; json.dump(obj, sys.stdout)') echo "$errorconfig" > "$TESTDIR"/container_config_error.json - run crioctl ctr create --config "$TESTDIR"/container_config_error.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/container_config_error.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr status --id "$ctr_id" + run sleep 5 + run crictl inspect --output yaml "$ctr_id" echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "Reason: Error" ]] + [[ "$output" =~ "reason: Error" ]] cleanup_ctrs cleanup_pods @@ -66,24 +67,24 @@ function teardown() { @test "ctr remove" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr remove --id "$ctr_id" + run crictl rm "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -93,65 +94,69 @@ function teardown() { @test "ctr lifecycle" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl pod list + run crictl sandboxes --quiet echo "$output" [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + [[ "$output" == "$pod_id" ]] + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr list + run crictl ps --quiet echo "$output" [ "$status" -eq 0 ] - run crioctl ctr status --id "$ctr_id" + [[ "$output" == "$ctr_id" ]] + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr list + run crictl ps --quiet echo "$output" [ "$status" -eq 0 ] - run crioctl ctr stop --id "$ctr_id" + [[ "$output" == "$ctr_id" ]] + run crictl stop "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr list + run crictl ps --quiet echo "$output" [ "$status" -eq 0 ] - run crioctl ctr remove --id "$ctr_id" + [[ "$output" == "$ctr_id" ]] + run crictl rm "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr list + run crictl ps --quiet echo "$output" [ "$status" -eq 0 ] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod list + run crictl sandboxes --quiet echo "$output" [ "$status" -eq 0 ] - run crioctl ctr list + [[ "$output" == "$pod_id" ]] + run crictl ps --quiet echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + [[ "$output" == "" ]] + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod list - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr list + run crictl sandboxes --quiet echo "$output" [ "$status" -eq 0 ] + [[ "$output" == "" ]] cleanup_ctrs cleanup_pods stop_crio @@ -159,31 +164,28 @@ function teardown() { @test "ctr logging" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl pod list - echo "$output" - [ "$status" -eq 0 ] # Create a new container. newconfig=$(mktemp --tmpdir crio-config.XXXXXX.json) cp "$TESTDATA"/container_config_logging.json "$newconfig" sed -i 's|"%shellcommand%"|"echo here is some output \&\& echo and some from stderr >\&2"|' "$newconfig" - run crioctl ctr create --config "$newconfig" --pod "$pod_id" + run crictl create "$pod_id" "$newconfig" "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr stop --id "$ctr_id" + run crictl stop "$ctr_id" echo "$output" # Ignore errors on stop. - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" [ "$status" -eq 0 ] - run crioctl ctr remove --id "$ctr_id" + run crictl rm "$ctr_id" echo "$output" [ "$status" -eq 0 ] @@ -191,13 +193,13 @@ function teardown() { logpath="$DEFAULT_LOG_PATH/$pod_id/$ctr_id.log" [ -f "$logpath" ] echo "$logpath :: $(cat "$logpath")" - grep -E "^[^\n]+ stdout here is some output$" "$logpath" - grep -E "^[^\n]+ stderr and some from stderr$" "$logpath" + grep -E "^[^\n]+ stdout F here is some output$" "$logpath" + grep -E "^[^\n]+ stderr F and some from stderr$" "$logpath" - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] @@ -208,32 +210,29 @@ function teardown() { @test "ctr logging [tty=true]" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl pod list - echo "$output" - [ "$status" -eq 0 ] # Create a new container. newconfig=$(mktemp --tmpdir crio-config.XXXXXX.json) cp "$TESTDATA"/container_config_logging.json "$newconfig" sed -i 's|"%shellcommand%"|"echo here is some output"|' "$newconfig" sed -i 's|"tty": false,|"tty": true,|' "$newconfig" - run crioctl ctr create --config "$newconfig" --pod "$pod_id" + run crictl create "$pod_id" "$newconfig" "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr stop --id "$ctr_id" + run crictl stop "$ctr_id" echo "$output" # Ignore errors on stop. - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" [ "$status" -eq 0 ] - run crioctl ctr remove --id "$ctr_id" + run crictl rm "$ctr_id" echo "$output" [ "$status" -eq 0 ] @@ -241,12 +240,101 @@ function teardown() { logpath="$DEFAULT_LOG_PATH/$pod_id/$ctr_id.log" [ -f "$logpath" ] echo "$logpath :: $(cat "$logpath")" - grep --binary -P "^[^\n]+ stdout here is some output\x0d$" "$logpath" + grep --binary -P "^[^\n]+ stdout F here is some output\x0d$" "$logpath" - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" + echo "$output" + [ "$status" -eq 0 ] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "ctr log max" { + LOG_SIZE_MAX_LIMIT=10000 start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + + # Create a new container. + newconfig=$(mktemp --tmpdir crio-config.XXXXXX.json) + cp "$TESTDATA"/container_config_logging.json "$newconfig" + sed -i 's|"%shellcommand%"|"for i in $(seq 250); do echo $i; done"|' "$newconfig" + run crictl create "$pod_id" "$newconfig" "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + sleep 6 + run crictl inspect "$ctr_id" + [ "$status" -eq 0 ] + run crictl rm "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + + # Check that the output is what we expect. + logpath="$DEFAULT_LOG_PATH/$pod_id/$ctr_id.log" + [ -f "$logpath" ] + echo "$logpath :: $(cat "$logpath")" + len=$(wc -l "$logpath" | awk '{print $1}') + [ $len -lt 250 ] + + run crictl stops "$pod_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl rms "$pod_id" + echo "$output" + [ "$status" -eq 0 ] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "ctr partial line logging" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + + # Create a new container. + newconfig=$(mktemp --tmpdir crio-config.XXXXXX.json) + cp "$TESTDATA"/container_config_logging.json "$newconfig" + sed -i 's|"%shellcommand%"|"echo -n hello"|' "$newconfig" + run crictl create "$pod_id" "$newconfig" "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl stop "$ctr_id" + echo "$output" + # Ignore errors on stop. + run crictl inspect "$ctr_id" + [ "$status" -eq 0 ] + run crictl rm "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + + # Check that the output is what we expect. + logpath="$DEFAULT_LOG_PATH/$pod_id/$ctr_id.log" + [ -f "$logpath" ] + echo "$logpath :: $(cat "$logpath")" + grep -E "^[^\n]+ stdout P hello$" "$logpath" + + run crictl stops "$pod_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] @@ -258,22 +346,24 @@ function teardown() { # regression test for #127 @test "ctrs status for a pod" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] + ctr_id="$output" - run crioctl ctr list --quiet + run crictl ps --quiet echo "$output" [ "$status" -eq 0 ] - [[ "${output}" != "" ]] + [[ "$output" != "" ]] + [[ "$output" == "$ctr_id" ]] printf '%s\n' "$output" | while IFS= read -r id do - run crioctl ctr status --id "$id" + run crictl inspect "$id" echo "$output" [ "$status" -eq 0 ] done @@ -284,105 +374,101 @@ function teardown() { } @test "ctr list filtering" { + # start 3 redis sandbox + # pod1 ctr1 create & start + # pod2 ctr2 create + # pod3 ctr3 create & start & stop start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json --name pod1 + run crictl runs "$TESTDATA"/sandbox1_config.json echo "$output" [ "$status" -eq 0 ] pod1_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod1_id" + run crictl create "$pod1_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox1_config.json echo "$output" [ "$status" -eq 0 ] ctr1_id="$output" - run crioctl ctr start --id "$ctr1_id" + run crictl start "$ctr1_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json --name pod2 + run crictl runs "$TESTDATA"/sandbox2_config.json echo "$output" [ "$status" -eq 0 ] pod2_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod2_id" + run crictl create "$pod2_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox2_config.json echo "$output" [ "$status" -eq 0 ] ctr2_id="$output" - run crioctl pod run --config "$TESTDATA"/sandbox_config.json --name pod3 + run crictl runs "$TESTDATA"/sandbox3_config.json echo "$output" [ "$status" -eq 0 ] pod3_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod3_id" + run crictl create "$pod3_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox3_config.json echo "$output" [ "$status" -eq 0 ] ctr3_id="$output" - run crioctl ctr start --id "$ctr3_id" + run crictl start "$ctr3_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr stop --id "$ctr3_id" + run crictl stop "$ctr3_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr list --id "$ctr1_id" --quiet + + run crictl ps --id "$ctr1_id" --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr1_id" ]] - run crioctl ctr list --id "${ctr1_id:0:4}" --quiet + [[ "$output" == "$ctr1_id" ]] + run crictl ps --id "${ctr1_id:0:4}" --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr1_id" ]] - run crioctl ctr list --id "$ctr2_id" --pod "$pod2_id" --quiet + [[ "$output" == "$ctr1_id" ]] + run crictl ps --id "$ctr2_id" --sandbox "$pod2_id" --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr2_id" ]] - run crioctl ctr list --id "$ctr2_id" --pod "$pod3_id" --quiet + [[ "$output" == "$ctr2_id" ]] + run crictl ps --id "$ctr2_id" --sandbox "$pod3_id" --quiet echo "$output" [ "$status" -eq 0 ] [[ "$output" == "" ]] - run crioctl ctr list --state created --quiet + run crictl ps --state created --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr2_id" ]] - run crioctl ctr list --state running --quiet + [[ "$output" == "$ctr2_id" ]] + run crictl ps --state running --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr1_id" ]] - run crioctl ctr list --state stopped --quiet + [[ "$output" == "$ctr1_id" ]] + run crictl ps --state stopped --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr3_id" ]] - run crioctl ctr list --pod "$pod1_id" --quiet + [[ "$output" == "$ctr3_id" ]] + run crictl ps --sandbox "$pod1_id" --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr1_id" ]] - run crioctl ctr list --pod "$pod2_id" --quiet + [[ "$output" == "$ctr1_id" ]] + run crictl ps --sandbox "$pod2_id" --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr2_id" ]] - run crioctl ctr list --pod "$pod3_id" --quiet + [[ "$output" == "$ctr2_id" ]] + run crictl ps --sandbox "$pod3_id" --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr3_id" ]] - run crioctl pod stop --id "$pod1_id" + [[ "$output" == "$ctr3_id" ]] + run crictl stops "$pod1_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod1_id" + run crictl rms "$pod1_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod stop --id "$pod2_id" + run crictl stops "$pod2_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod2_id" + run crictl rms "$pod2_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod stop --id "$pod3_id" + run crictl stops "$pod3_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod3_id" + run crictl rms "$pod3_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -391,49 +477,64 @@ function teardown() { } @test "ctr list label filtering" { + # start a pod with 3 containers + # ctr1 with labels: group=test container=redis version=v1.0.0 + # ctr2 with labels: group=test container=redis version=v1.0.0 + # ctr3 with labels: group=test container=redis version=v1.1.0 start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" --name ctr1 --label "a=b" --label "c=d" --label "e=f" + + ctrconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["metadata"]["name"] = "ctr1";obj["labels"]["group"] = "test";obj["labels"]["name"] = "ctr1";obj["labels"]["version"] = "v1.0.0"; json.dump(obj, sys.stdout)') + echo "$ctrconfig" > "$TESTDATA"/labeled_container_redis.json + run crictl create "$pod_id" "$TESTDATA"/labeled_container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr1_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" --name ctr2 --label "a=b" --label "c=d" + + ctrconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["metadata"]["name"] = "ctr2";obj["labels"]["group"] = "test";obj["labels"]["name"] = "ctr2";obj["labels"]["version"] = "v1.0.0"; json.dump(obj, sys.stdout)') + echo "$ctrconfig" > "$TESTDATA"/labeled_container_redis.json + run crictl create "$pod_id" "$TESTDATA"/labeled_container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr2_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" --name ctr3 --label "a=b" + + ctrconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["metadata"]["name"] = "ctr3";obj["labels"]["group"] = "test";obj["labels"]["name"] = "ctr3";obj["labels"]["version"] = "v1.1.0"; json.dump(obj, sys.stdout)') + echo "$ctrconfig" > "$TESTDATA"/labeled_container_redis.json + run crictl create "$pod_id" "$TESTDATA"/labeled_container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr3_id="$output" - run crioctl ctr list --label "tier=backend" --label "a=b" --label "c=d" --label "e=f" --quiet + + run crictl ps --label "group=test" --label "name=ctr1" --label "version=v1.0.0" --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr1_id" ]] - run crioctl ctr list --label "tier=frontend" --quiet + [[ "$output" == "$ctr1_id" ]] + run crictl ps --label "group=production" --quiet echo "$output" [ "$status" -eq 0 ] [[ "$output" == "" ]] - run crioctl ctr list --label "a=b" --label "c=d" --quiet + run crictl ps --label "group=test" --label "version=v1.0.0" --quiet echo "$output" [ "$status" -eq 0 ] [[ "$output" != "" ]] - [[ "$output" =~ "$ctr1_id" ]] - [[ "$output" =~ "$ctr2_id" ]] - run crioctl ctr list --label "a=b" --quiet + [[ "$output" =~ "$ctr1_id" ]] + [[ "$output" =~ "$ctr2_id" ]] + [[ "$output" != "$ctr3_id" ]] + run crictl ps --label "group=test" --quiet echo "$output" [ "$status" -eq 0 ] [[ "$output" != "" ]] [[ "$output" =~ "$ctr1_id" ]] [[ "$output" =~ "$ctr2_id" ]] [[ "$output" =~ "$ctr3_id" ]] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -443,23 +544,23 @@ function teardown() { @test "ctr metadata in list & status" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr list --id "$ctr_id" + run crictl ps --id "$ctr_id" --output yaml echo "$output" [ "$status" -eq 0 ] # TODO: expected value should not hard coded here - [[ "$output" =~ "Name: container1" ]] - [[ "$output" =~ "Attempt: 1" ]] + [[ "$output" =~ "name: container1" ]] + [[ "$output" =~ "attempt: 1" ]] - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] # TODO: expected value should not hard coded here @@ -473,21 +574,21 @@ function teardown() { @test "ctr execsync conflicting with conmon flags parsing" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" sh -c "echo hello world" + run crictl exec --sync "$ctr_id" sh -c "echo hello world" echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "hello world" ]] + [[ "$output" == "hello world" ]] cleanup_ctrs cleanup_pods stop_crio @@ -495,28 +596,29 @@ function teardown() { @test "ctr execsync" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" echo HELLO + run crictl exec --sync "$ctr_id" echo HELLO echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "HELLO" ]] - run crioctl ctr execsync --id "$ctr_id" --timeout 1 sleep 10 + [[ "$output" == "HELLO" ]] + run crictl exec --sync --timeout 1 "$ctr_id" sleep 3 echo "$output" [[ "$output" =~ "command timed out" ]] - run crioctl pod stop --id "$pod_id" + [ "$status" -ne 0 ] + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -526,25 +628,50 @@ function teardown() { @test "ctr device add" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis_device.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis_device.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" ls /dev/mynull + run crictl exec --sync "$ctr_id" ls /dev/mynull echo "$output" [ "$status" -eq 0 ] [[ "$output" =~ "/dev/mynull" ]] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" + echo "$output" + [ "$status" -eq 0 ] + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "ctr hostname env" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDATA"/container_config.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl exec --sync "$ctr_id" env + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "HOSTNAME" ]] + run crictl stops "$pod_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -554,18 +681,18 @@ function teardown() { @test "ctr execsync failure" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" doesnotexist + run crictl exec --sync "$ctr_id" doesnotexist echo "$output" [ "$status" -ne 0 ] @@ -576,18 +703,18 @@ function teardown() { @test "ctr execsync exit code" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" false + run crictl exec --sync "$ctr_id" false echo "$output" [ "$status" -eq 0 ] [[ "$output" =~ "Exit code: 1" ]] @@ -598,39 +725,39 @@ function teardown() { @test "ctr execsync std{out,err}" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" echo hello0 stdout + run crictl exec --sync "$ctr_id" echo hello0 stdout echo "$output" [ "$status" -eq 0 ] - [[ "$output" == *"$(printf "Stdout:\nhello0 stdout")"* ]] + [[ "$output" =~ "hello0 stdout" ]] stderrconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["image"]["image"] = "runcom/stderr-test"; obj["command"] = ["/bin/sleep", "600"]; json.dump(obj, sys.stdout)') echo "$stderrconfig" > "$TESTDIR"/container_config_stderr.json - run crioctl ctr create --config "$TESTDIR"/container_config_stderr.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/container_config_stderr.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" stderr + run crictl exec --sync "$ctr_id" stderr echo "$output" [ "$status" -eq 0 ] - [[ "$output" == *"$(printf "Stderr:\nthis goes to stderr")"* ]] - run crioctl pod stop --id "$pod_id" + [[ "$output" =~ "this goes to stderr" ]] + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -640,21 +767,21 @@ function teardown() { @test "ctr stop idempotent" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr stop --id "$ctr_id" + run crictl stop "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr stop --id "$ctr_id" + run crictl stop "$ctr_id" echo "$output" [ "$status" -eq 0 ] @@ -665,13 +792,13 @@ function teardown() { @test "ctr caps drop" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" capsconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["linux"]["security_context"]["capabilities"] = {u"add_capabilities": [], u"drop_capabilities": [u"mknod", u"kill", u"sys_chroot", u"setuid", u"setgid"]}; json.dump(obj, sys.stdout)') echo "$capsconfig" > "$TESTDIR"/container_config_caps.json - run crioctl ctr create --config "$TESTDIR"/container_config_caps.json --pod "$pod_id" + run crictl create "$TESTDIR"/container_config_caps.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] @@ -682,16 +809,16 @@ function teardown() { @test "run ctr with image with Config.Volumes" { start_crio - run crioctl image pull gcr.io/k8s-testimages/redis:e2e + run crictl pull gcr.io/k8s-testimages/redis:e2e echo "$output" [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" volumesconfig=$(cat "$TESTDATA"/container_redis.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["image"]["image"] = "gcr.io/k8s-testimages/redis:e2e"; obj["args"] = []; json.dump(obj, sys.stdout)') echo "$volumesconfig" > "$TESTDIR"/container_config_volumes.json - run crioctl ctr create --config "$TESTDIR"/container_config_volumes.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/container_config_volumes.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] @@ -705,29 +832,36 @@ function teardown() { skip "travis container tests don't support testing OOM" fi start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" oomconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["image"]["image"] = "mrunalp/oom"; obj["linux"]["resources"]["memory_limit_in_bytes"] = 5120000; obj["command"] = ["/oom"]; json.dump(obj, sys.stdout)') echo "$oomconfig" > "$TESTDIR"/container_config_oom.json - run crioctl ctr create --config "$TESTDIR"/container_config_oom.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/container_config_oom.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] # Wait for container to OOM - run sleep 100 - run crioctl ctr status --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] + attempt=0 + while [ $attempt -le 100 ]; do + attempt=$((attempt+1)) + run crictl inspect --output yaml "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + if [[ "$output" =~ "OOMKilled" ]]; then + break + fi + sleep 10 + done [[ "$output" =~ "OOMKilled" ]] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -737,37 +871,34 @@ function teardown() { @test "ctr /etc/resolv.conf rw/ro mode" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config_resolvconf.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config_resolvconf.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "Status: CONTAINER_EXITED" ]] + [[ "$output" =~ "State: CONTAINER_EXITED" ]] [[ "$output" =~ "Exit Code: 0" ]] - [[ "$output" =~ "Reason: Completed" ]] - run crioctl ctr create --name roctr --config "$TESTDATA"/container_config_resolvconf_ro.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config_resolvconf_ro.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "Status: CONTAINER_EXITED" ]] - [[ "$output" =~ "Exit Code: 1" ]] - [[ "$output" =~ "Reason: Error" ]] + [[ "$output" =~ "State: CONTAINER_EXITED" ]] cleanup_ctrs cleanup_pods @@ -776,19 +907,19 @@ function teardown() { @test "ctr create with non-existent command" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" newconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["command"] = ["nonexistent"]; json.dump(obj, sys.stdout)') echo "$newconfig" > "$TESTDIR"/container_nonexistent.json - run crioctl ctr create --config "$TESTDIR"/container_nonexistent.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/container_nonexistent.json "$TESTDATA"/sandbox_config.json [ "$status" -ne 0 ] [[ "$output" =~ "executable file not found" ]] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -798,22 +929,163 @@ function teardown() { @test "ctr create with non-existent command [tty]" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" newconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["command"] = ["nonexistent"]; obj["tty"] = True; json.dump(obj, sys.stdout)') echo "$newconfig" > "$TESTDIR"/container_nonexistent.json - run crioctl ctr create --config "$TESTDIR"/container_nonexistent.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/container_nonexistent.json "$TESTDATA"/sandbox_config.json [ "$status" -ne 0 ] [[ "$output" =~ "executable file not found" ]] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs cleanup_pods stop_crio } + +@test "ctr update resources" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/memory/memory.limit_in_bytes" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "209715200" ]] + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpu/cpu.shares" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "512" ]] + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpu/cpu.cfs_period_us" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "10000" ]] + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "20000" ]] + + run crictl update --memory 524288000 --cpu-period 20000 --cpu-quota 10000 --cpu-share 256 "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/memory/memory.limit_in_bytes" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "524288000" ]] + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpu/cpu.shares" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "256" ]] + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpu/cpu.cfs_period_us" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "20000" ]] + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "10000" ]] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "ctr correctly setup working directory" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + notexistcwd=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["working_dir"] = "/thisshouldntexistatall"; json.dump(obj, sys.stdout)') + echo "$notexistcwd" > "$TESTDIR"/container_cwd_notexist.json + run crictl create "$pod_id" "$TESTDIR"/container_cwd_notexist.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + + filecwd=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["working_dir"] = "/etc/passwd"; obj["metadata"]["name"] = "container2"; json.dump(obj, sys.stdout)') + echo "$filecwd" > "$TESTDIR"/container_cwd_file.json + run crictl create "$pod_id" "$TESTDIR"/container_cwd_file.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -ne 0 ] + ctr_id="$output" + [[ "$output" =~ "not a directory" ]] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "ctr execsync conflicting with conmon env" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDATA"/container_redis_env_custom.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl exec "$ctr_id" env + echo "$output" + echo "$status" + [ "$status" -eq 0 ] + [[ "$output" =~ "acustompathinpath" ]] + run crictl exec --sync "$ctr_id" env + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "acustompathinpath" ]] + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "ctr resources" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpuset/cpuset.cpus" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "0-1" ]] + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpuset/cpuset.mems" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "0" ]] + + cleanup_ctrs + cleanup_pods + stop_crio +} diff --git a/test/ctr_seccomp.bats b/test/ctr_seccomp.bats new file mode 100644 index 00000000..2dae19d1 --- /dev/null +++ b/test/ctr_seccomp.bats @@ -0,0 +1,221 @@ +#!/usr/bin/env bats + +load helpers + +function teardown() { + cleanup_test +} + +# 1. test running with ctr unconfined +# test that we can run with a syscall which would be otherwise blocked +@test "ctr seccomp profiles unconfined" { + # this test requires seccomp, so skip this test if seccomp is not enabled. + enabled=$(is_seccomp_enabled) + if [[ "$enabled" -eq 0 ]]; then + skip "skip this test since seccomp is not enabled." + fi + + sed -e 's/"chmod",//' "$SECCOMP_PROFILE" > "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json + + start_crio "$TESTDIR"/seccomp_profile1.json + + sed -e 's/%VALUE%/unconfined/g' "$TESTDATA"/container_config_seccomp.json > "$TESTDIR"/seccomp1.json + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDIR"/seccomp1.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl exec --sync "$ctr_id" chmod 777 . + echo "$output" + [ "$status" -eq 0 ] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +# 2. test running with ctr runtime/default +# test that we cannot run with a syscall blocked by the default seccomp profile +@test "ctr seccomp profiles runtime/default" { + # this test requires seccomp, so skip this test if seccomp is not enabled. + enabled=$(is_seccomp_enabled) + if [[ "$enabled" -eq 0 ]]; then + skip "skip this test since seccomp is not enabled." + fi + + sed -e 's/"chmod",//' "$SECCOMP_PROFILE" > "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json + + start_crio "$TESTDIR"/seccomp_profile1.json + + sed -e 's/%VALUE%/runtime\/default/g' "$TESTDATA"/container_config_seccomp.json > "$TESTDIR"/seccomp2.json + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$TESTDIR"/seccomp2.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl exec --sync "$ctr_id" chmod 777 . + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "Exit code: 1" ]] + [[ "$output" =~ "Operation not permitted" ]] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +# 3. test running with ctr unconfined and profile empty +# test that we can run with a syscall which would be otherwise blocked +@test "ctr seccomp profiles unconfined by empty field" { + # this test requires seccomp, so skip this test if seccomp is not enabled. + enabled=$(is_seccomp_enabled) + if [[ "$enabled" -eq 0 ]]; then + skip "skip this test since seccomp is not enabled." + fi + + sed -e 's/"chmod",//' "$SECCOMP_PROFILE" > "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json + + start_crio "$TESTDIR"/seccomp_profile1.json + + sed -e 's/%VALUE%//g' "$TESTDATA"/container_config_seccomp.json > "$TESTDIR"/seccomp1.json + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDIR"/seccomp1.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl exec --sync "$ctr_id" chmod 777 . + echo "$output" + [ "$status" -eq 0 ] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +# 4. test running with ctr wrong profile name +@test "ctr seccomp profiles wrong profile name" { + # this test requires seccomp, so skip this test if seccomp is not enabled. + enabled=$(is_seccomp_enabled) + if [[ "$enabled" -eq 0 ]]; then + skip "skip this test since seccomp is not enabled." + fi + + sed -e 's/"chmod",//' "$SECCOMP_PROFILE" > "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json + + start_crio "$TESTDIR"/seccomp_profile1.json + + sed -e 's/%VALUE%/wontwork/g' "$TESTDATA"/container_config_seccomp.json > "$TESTDIR"/seccomp1.json + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDIR"/seccomp1.json "$TESTDATA"/sandbox_config.json + echo "$output" + [[ "$status" -ne 0 ]] + [[ "$output" =~ "unknown seccomp profile option:" ]] + [[ "$output" =~ "wontwork" ]] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +# 5. test running with ctr localhost/profile_name +@test "ctr seccomp profiles localhost/profile_name" { + # this test requires seccomp, so skip this test if seccomp is not enabled. + enabled=$(is_seccomp_enabled) + if [[ "$enabled" -eq 0 ]]; then + skip "skip this test since seccomp is not enabled." + fi + + start_crio + + sed -e 's/"chmod",//' "$SECCOMP_PROFILE" > "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json + + sed -e 's@%VALUE%@localhost/'"$TESTDIR"'/seccomp_profile1.json@g' "$TESTDATA"/container_config_seccomp.json > "$TESTDIR"/seccomp1.json + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDIR"/seccomp1.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl exec --sync "$ctr_id" chmod 777 . + [ "$status" -eq 0 ] + [[ "$output" =~ "Exit code: 1" ]] + [[ "$output" =~ "Operation not permitted" ]] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +# 6. test running with ctr docker/default +# test that we cannot run with a syscall blocked by the default seccomp profile +@test "ctr seccomp profiles runtime/default" { + # this test requires seccomp, so skip this test if seccomp is not enabled. + enabled=$(is_seccomp_enabled) + if [[ "$enabled" -eq 0 ]]; then + skip "skip this test since seccomp is not enabled." + fi + + sed -e 's/"chmod",//' "$SECCOMP_PROFILE" > "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json + + start_crio "$TESTDIR"/seccomp_profile1.json + + sed -e 's/%VALUE%/docker\/default/g' "$TESTDATA"/container_config_seccomp.json > "$TESTDIR"/seccomp2.json + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDIR"/seccomp2.json "$TESTDIR"/seccomp_profile1.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl exec --sync "$ctr_id" chmod 777 . + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "Exit code: 1" ]] + [[ "$output" =~ "Operation not permitted" ]] + + cleanup_ctrs + cleanup_pods + stop_crio +} diff --git a/test/default_mounts.bats b/test/default_mounts.bats new file mode 100644 index 00000000..711aa40c --- /dev/null +++ b/test/default_mounts.bats @@ -0,0 +1,69 @@ +#!/usr/bin/env bats + +load helpers + +IMAGE="redis:alpine" + +function teardown() { + cleanup_test +} + +@test "bind secrets mounts to container" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl pull "$IMAGE" + [ "$status" -eq 0 ] + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl exec --sync "$ctr_id" cat /proc/mounts + echo "$output" + [ "$status" -eq 0 ] + mount_info="$output" + run grep /container/path1 <<< "$mount_info" + echo "$output" + [ "$status" -eq 0 ] + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "default mounts correctly sorted with other mounts" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl pull "$IMAGE" + [ "$status" -eq 0 ] + host_path="$TESTDIR"/clash + mkdir "$host_path" + echo "clashing..." > "$host_path"/clashing.txt + sed -e "s,%HPATH%,$host_path,g" "$TESTDATA"/container_redis_default_mounts.json > "$TESTDIR"/defmounts_pre.json + sed -e 's,%CPATH%,\/container\/path1\/clash,g' "$TESTDIR"/defmounts_pre.json > "$TESTDIR"/defmounts.json + run crictl create "$pod_id" "$TESTDIR"/defmounts.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl exec --sync "$ctr_id" ls -la /container/path1/clash + echo "$output" + [ "$status" -eq 0 ] + run crictl exec --sync "$ctr_id" cat /container/path1/clash/clashing.txt + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "clashing..." ]] + run crictl exec --sync "$ctr_id" ls -la /container/path1 + echo "$output" + [ "$status" -eq 0 ] + run crictl exec --sync "$ctr_id" cat /container/path1/test.txt + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "Testing secrets mounts!" ]] + cleanup_ctrs + cleanup_pods + stop_crio +} diff --git a/test/helpers.bash b/test/helpers.bash index f7c0517b..a0c715e1 100644 --- a/test/helpers.bash +++ b/test/helpers.bash @@ -10,16 +10,14 @@ TESTDATA="${INTEGRATION_ROOT}/testdata" CRIO_ROOT=${CRIO_ROOT:-$(cd "$INTEGRATION_ROOT/../.."; pwd -P)} # Path of the crio binary. -CRIO_BINARY=${CRIO_BINARY:-${CRIO_ROOT}/cri-o/crio} +CRIO_BINARY=${CRIO_BINARY:-${CRIO_ROOT}/cri-o/bin/crio} # Path of the crictl binary. CRICTL_PATH=$(command -v crictl || true) CRICTL_BINARY=${CRICTL_PATH:-/usr/bin/crictl} -# Path to kpod binary. -KPOD_BINARY=${KPOD_BINARY:-${CRIO_ROOT}/cri-o/kpod} # Path of the conmon binary. -CONMON_BINARY=${CONMON_BINARY:-${CRIO_ROOT}/cri-o/conmon/conmon} +CONMON_BINARY=${CONMON_BINARY:-${CRIO_ROOT}/cri-o/bin/conmon} # Path of the pause binary. -PAUSE_BINARY=${PAUSE_BINARY:-${CRIO_ROOT}/cri-o/pause/pause} +PAUSE_BINARY=${PAUSE_BINARY:-${CRIO_ROOT}/cri-o/bin/pause} # Path of the default seccomp profile. SECCOMP_PROFILE=${SECCOMP_PROFILE:-${CRIO_ROOT}/cri-o/seccomp.json} # Name of the default apparmor profile. @@ -56,20 +54,36 @@ CGROUP_MANAGER=${CGROUP_MANAGER:-cgroupfs} IMAGE_VOLUMES=${IMAGE_VOLUMES:-mkdir} # Container pids limit PIDS_LIMIT=${PIDS_LIMIT:-1024} +# Log size max limit +LOG_SIZE_MAX_LIMIT=${LOG_SIZE_MAX_LIMIT:--1} +# enable share container pid namespace +ENABLE_SHARED_PID_NAMESPACE=${ENABLE_SHARED_PID_NAMESPACE:-false} TESTDIR=$(mktemp -d) +# kpod pull needs a configuration file for shortname pulls +export REGISTRIES_CONFIG_PATH="$INTEGRATION_ROOT/registries.conf" + # Setup default hooks dir HOOKSDIR=$TESTDIR/hooks mkdir ${HOOKSDIR} HOOKS_OPTS="--hooks-dir-path=$HOOKSDIR" +# Setup default secrets mounts +MOUNT_PATH="$TESTDIR/secrets" +mkdir ${MOUNT_PATH} +MOUNT_FILE="${MOUNT_PATH}/test.txt" +touch ${MOUNT_FILE} +echo "Testing secrets mounts!" > ${MOUNT_FILE} + +DEFAULT_MOUNTS_OPTS="--default-mounts=${MOUNT_PATH}:/container/path1" + # We may need to set some default storage options. case "$(stat -f -c %T ${TESTDIR})" in aufs) # None of device mapper, overlay, or aufs can be used dependably over aufs, and of course btrfs and zfs can't, # and we have to explicitly specify the "vfs" driver in order to use it, so do that now. - STORAGE_OPTS=${STORAGE_OPTS:---storage-driver vfs} + STORAGE_OPTIONS=${STORAGE_OPTIONS:---storage-driver vfs} ;; esac @@ -81,7 +95,7 @@ fi CRIO_SOCKET="$TESTDIR/crio.sock" CRIO_CONFIG="$TESTDIR/crio.conf" CRIO_CNI_CONFIG="$TESTDIR/cni/net.d/" -CRIO_CNI_PLUGIN="/opt/cni/bin/" +CRIO_CNI_PLUGIN=${CRIO_CNI_PLUGIN:-/opt/cni/bin/} POD_CIDR="10.88.0.0/16" POD_CIDR_MASK="10.88.*.*" @@ -89,7 +103,7 @@ cp "$CONMON_BINARY" "$TESTDIR/conmon" PATH=$PATH:$TESTDIR -# Make sure we have a copy of the redis:latest image. +# Make sure we have a copy of the redis:alpine image. if ! [ -d "$ARTIFACTS_PATH"/redis-image ]; then mkdir -p "$ARTIFACTS_PATH"/redis-image if ! "$COPYIMG_BINARY" --import-from=docker://redis:alpine --export-to=dir:"$ARTIFACTS_PATH"/redis-image --signature-policy="$INTEGRATION_ROOT"/policy.json ; then @@ -99,19 +113,6 @@ if ! [ -d "$ARTIFACTS_PATH"/redis-image ]; then fi fi -# TODO: remove the code below for redis digested image id when -# https://github.com/kubernetes-incubator/cri-o/issues/531 is complete -# as the digested reference will be auto-stored when pulling the tag -# above -if ! [ -d "$ARTIFACTS_PATH"/redis-image-digest ]; then - mkdir -p "$ARTIFACTS_PATH"/redis-image-digest - if ! "$COPYIMG_BINARY" --import-from=docker://redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b --export-to=dir:"$ARTIFACTS_PATH"/redis-image-digest --signature-policy="$INTEGRATION_ROOT"/policy.json ; then - echo "Error pulling docker://redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b" - rm -fr "$ARTIFACTS_PATH"/redis-image-digest - exit 1 - fi -fi - # Make sure we have a copy of the runcom/stderr-test image. if ! [ -d "$ARTIFACTS_PATH"/stderr-test ]; then mkdir -p "$ARTIFACTS_PATH"/stderr-test @@ -157,13 +158,6 @@ function crio() { "$CRIO_BINARY" --listen "$CRIO_SOCKET" "$@" } -# DEPRECATED -OCIC_BINARY=${OCIC_BINARY:-${CRIO_ROOT}/cri-o/crioctl} -# Run crioctl using the binary specified by $OCIC_BINARY. -function crioctl() { - "$OCIC_BINARY" --connect "$CRIO_SOCKET" "$@" -} - # Run crictl using the binary specified by $CRICTL_BINARY. function crictl() { "$CRICTL_BINARY" -r "$CRIO_SOCKET" -i "$CRIO_SOCKET" "$@" @@ -195,9 +189,9 @@ function retry() { false } -# Waits until the given crio becomes reachable. +# Waits until crio becomes reachable. function wait_until_reachable() { - retry 15 1 crictl status + retry 15 1 crictl info } # Start crio. @@ -216,19 +210,14 @@ function start_crio() { # Don't forget: bin2img, copyimg, and crio have their own default drivers, so if you override any, you probably need to override them all if ! [ "$3" = "--no-pause-image" ] ; then - "$BIN2IMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --source-binary "$PAUSE_BINARY" + "$BIN2IMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --source-binary "$PAUSE_BINARY" fi - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --image-name=redis:alpine --import-from=dir:"$ARTIFACTS_PATH"/redis-image --add-name=docker.io/library/redis:alpine --signature-policy="$INTEGRATION_ROOT"/policy.json -# TODO: remove the code below for redis:alpine digested image id when -# https://github.com/kubernetes-incubator/cri-o/issues/531 is complete -# as the digested reference will be auto-stored when pulling the tag -# above - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --image-name=redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b --import-from=dir:"$ARTIFACTS_PATH"/redis-image-digest --add-name=docker.io/library/redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b --signature-policy="$INTEGRATION_ROOT"/policy.json - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --image-name=mrunalp/oom --import-from=dir:"$ARTIFACTS_PATH"/oom-image --add-name=docker.io/library/mrunalp/oom --signature-policy="$INTEGRATION_ROOT"/policy.json - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --image-name=mrunalp/image-volume-test --import-from=dir:"$ARTIFACTS_PATH"/image-volume-test-image --add-name=docker.io/library/mrunalp/image-volume-test --signature-policy="$INTEGRATION_ROOT"/policy.json - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --image-name=busybox:latest --import-from=dir:"$ARTIFACTS_PATH"/busybox-image --add-name=docker.io/library/busybox:latest --signature-policy="$INTEGRATION_ROOT"/policy.json - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --image-name=runcom/stderr-test:latest --import-from=dir:"$ARTIFACTS_PATH"/stderr-test --add-name=docker.io/runcom/stderr-test:latest --signature-policy="$INTEGRATION_ROOT"/policy.json - "$CRIO_BINARY" ${HOOKS_OPTS} --conmon "$CONMON_BINARY" --listen "$CRIO_SOCKET" --cgroup-manager "$CGROUP_MANAGER" --registry "docker.io" --runtime "$RUNTIME_BINARY" --root "$TESTDIR/crio" --runroot "$TESTDIR/crio-run" $STORAGE_OPTS --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$CRIO_CNI_CONFIG" --signature-policy "$INTEGRATION_ROOT"/policy.json --image-volumes "$IMAGE_VOLUMES" --pids-limit "$PIDS_LIMIT" --config /dev/null config >$CRIO_CONFIG + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/library/redis:alpine --import-from=dir:"$ARTIFACTS_PATH"/redis-image --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/mrunalp/oom:latest --import-from=dir:"$ARTIFACTS_PATH"/oom-image --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/mrunalp/image-volume-test:latest --import-from=dir:"$ARTIFACTS_PATH"/image-volume-test-image --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/library/busybox:latest --import-from=dir:"$ARTIFACTS_PATH"/busybox-image --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/runcom/stderr-test:latest --import-from=dir:"$ARTIFACTS_PATH"/stderr-test --signature-policy="$INTEGRATION_ROOT"/policy.json + "$CRIO_BINARY" ${DEFAULT_MOUNTS_OPTS} ${HOOKS_OPTS} --conmon "$CONMON_BINARY" --listen "$CRIO_SOCKET" --cgroup-manager "$CGROUP_MANAGER" --registry "docker.io" --runtime "$RUNTIME_BINARY" --root "$TESTDIR/crio" --runroot "$TESTDIR/crio-run" $STORAGE_OPTIONS --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$CRIO_CNI_CONFIG" --cni-plugin-dir "$CRIO_CNI_PLUGIN" --signature-policy "$INTEGRATION_ROOT"/policy.json --image-volumes "$IMAGE_VOLUMES" --pids-limit "$PIDS_LIMIT" --enable-shared-pid-namespace=${ENABLE_SHARED_PID_NAMESPACE} --log-size-max "$LOG_SIZE_MAX_LIMIT" --config /dev/null config >$CRIO_CONFIG # Prepare the CNI configuration files, we're running with non host networking by default if [[ -n "$4" ]]; then @@ -238,56 +227,40 @@ function start_crio() { fi ${netfunc} $POD_CIDR - "$CRIO_BINARY" --debug --config "$CRIO_CONFIG" & CRIO_PID=$! + "$CRIO_BINARY" --log-level debug --config "$CRIO_CONFIG" & CRIO_PID=$! wait_until_reachable run crictl inspecti redis:alpine if [ "$status" -ne 0 ] ; then crictl pull redis:alpine fi - REDIS_IMAGEID=$(crictl inspecti redis:alpine | head -1 | sed -e "s/ID: //g") + REDIS_IMAGEID=$(crictl inspecti redis:alpine | grep ^ID: | head -n 1 | sed -e "s/ID: //g") + REDIS_IMAGEREF=$(crictl inspecti redis:alpine | grep ^Digest: | head -n 1 | sed -e "s/Digest: //g") run crictl inspecti mrunalp/oom if [ "$status" -ne 0 ] ; then crictl pull mrunalp/oom fi - # - # - # - # TODO: remove the code below for redis digested image id when - # https://github.com/kubernetes-incubator/cri-o/issues/531 is complete - # as the digested reference will be auto-stored when pulling the tag - # above - # - # - # - REDIS_IMAGEID_DIGESTED="redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b" - run crictl inspecti $REDIS_IMAGEID_DIGESTED - if [ "$status" -ne 0 ]; then - crictl pull $REDIS_IMAGEID_DIGESTED - fi - # - # - # - run crictl inspecti runcom/stderr-test + OOM_IMAGEID=$(crictl inspecti mrunalp/oom | grep ^ID: | head -n 1 | sed -e "s/ID: //g") + run crioctl image status --id=runcom/stderr-test if [ "$status" -ne 0 ] ; then crictl pull runcom/stderr-test:latest fi - STDERR_IMAGEID=$(crictl inspecti runcom/stderr-test | head -1 | sed -e "s/ID: //g") + STDERR_IMAGEID=$(crictl inspecti runcom/stderr-test | grep ^ID: | head -n 1 | sed -e "s/ID: //g") run crictl inspecti busybox if [ "$status" -ne 0 ] ; then crictl pull busybox:latest fi - BUSYBOX_IMAGEID=$(crictl inspecti busybox | head -1 | sed -e "s/ID: //g") + BUSYBOX_IMAGEID=$(crictl inspecti busybox | grep ^ID: | head -n 1 | sed -e "s/ID: //g") run crictl inspecti mrunalp/image-volume-test if [ "$status" -ne 0 ] ; then crictl pull mrunalp/image-volume-test:latest fi - VOLUME_IMAGEID=$(crictl inspecti mrunalp/image-volume-test | head -1 | sed -e "s/ID: //g") + VOLUME_IMAGEID=$(crictl inspecti mrunalp/image-volume-test | grep ^ID: | head -n 1 | sed -e "s/ID: //g") } function cleanup_ctrs() { - run crictl ps --quiet - if [ "$status" -eq 0 ]; then + output=$(crictl ps --quiet) + if [ $? -eq 0 ]; then if [ "$output" != "" ]; then printf '%s\n' "$output" | while IFS= read -r line do @@ -300,8 +273,8 @@ function cleanup_ctrs() { } function cleanup_images() { - run crictl images --quiet - if [ "$status" -eq 0 ]; then + output=$(crictl images --quiet) + if [ $? -eq 0 ]; then if [ "$output" != "" ]; then printf '%s\n' "$output" | while IFS= read -r line do @@ -312,8 +285,8 @@ function cleanup_images() { } function cleanup_pods() { - run crictl sandboxes --quiet - if [ "$status" -eq 0 ]; then + output=$(crictl sandboxes --quiet) + if [ $? -eq 0 ]; then if [ "$output" != "" ]; then printf '%s\n' "$output" | while IFS= read -r line do @@ -432,7 +405,7 @@ EOF } function check_pod_cidr() { - run crioctl ctr execsync --id $1 ip addr show dev eth0 scope global 2>&1 + run crictl exec --sync $1 ip addr show dev eth0 scope global 2>&1 echo "$output" [ "$status" -eq 0 ] [[ "$output" =~ $POD_CIDR_MASK ]] @@ -456,7 +429,7 @@ function get_host_ip() { } function ping_pod() { - inet=`crioctl ctr execsync --id $1 ip addr show dev eth0 scope global 2>&1 | grep inet` + inet=`crictl exec --sync $1 ip addr show dev eth0 scope global 2>&1 | grep inet` IFS=" " ip=`parse_pod_ip $inet` @@ -467,12 +440,12 @@ function ping_pod() { } function ping_pod_from_pod() { - inet=`crioctl ctr execsync --id $1 ip addr show dev eth0 scope global 2>&1 | grep inet` + inet=`crictl exec --sync $1 ip addr show dev eth0 scope global 2>&1 | grep inet` IFS=" " ip=`parse_pod_ip $inet` - run crioctl ctr execsync --id $2 ping -W 1 -c 2 $ip + run crictl exec --sync $2 ping -W 1 -c 2 $ip echo "$output" [ "$status" -eq 0 ] } diff --git a/test/hooks.bats b/test/hooks.bats index 0c1a51ea..2e1b7ae4 100644 --- a/test/hooks.bats +++ b/test/hooks.bats @@ -10,23 +10,23 @@ cp hooks/checkhook.sh ${HOOKSDIR} sed "s|HOOKSDIR|${HOOKSDIR}|" hooks/checkhook.json > ${HOOKSDIR}/checkhook.json @test "pod test hooks" { - run rm -f /run/hookscheck + rm -f /run/hookscheck start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] run cat /run/hookscheck diff --git a/test/image.bats b/test/image.bats index 8ccceefb..52336641 100644 --- a/test/image.bats +++ b/test/image.bats @@ -3,6 +3,8 @@ load helpers IMAGE=kubernetes/pause +SIGNED_IMAGE=registry.access.redhat.com/rhel7-atomic:latest +UNSIGNED_IMAGE=docker.io/library/hello-world:latest function teardown() { cleanup_test @@ -10,12 +12,16 @@ function teardown() { @test "run container in pod with image ID" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" sed -e "s/%VALUE%/$REDIS_IMAGEID/g" "$TESTDATA"/container_config_by_imageid.json > "$TESTDIR"/ctr_by_imageid.json - run crioctl ctr create --config "$TESTDIR"/ctr_by_imageid.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/ctr_by_imageid.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -23,89 +29,174 @@ function teardown() { stop_crio } -@test "container status return image:tag if created by image ID" { +@test "container status when created by image ID" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" sed -e "s/%VALUE%/$REDIS_IMAGEID/g" "$TESTDATA"/container_config_by_imageid.json > "$TESTDIR"/ctr_by_imageid.json - run crioctl ctr create --config "$TESTDIR"/ctr_by_imageid.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/ctr_by_imageid.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" --output yaml echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "Image: redis:alpine" ]] + [[ "$output" =~ "image: docker.io/library/redis:alpine" ]] + [[ "$output" =~ "imageRef: $REDIS_IMAGEREF" ]] cleanup_ctrs cleanup_pods stop_crio } -@test "container status return image@digest if created by image ID and digest available" { - skip "depends on https://github.com/kubernetes-incubator/cri-o/issues/531" - +@test "container status when created by image tagged reference" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - sed -e "s/%VALUE%/$REDIS_IMAGEID_DIGESTED/g" "$TESTDATA"/container_config_by_imageid.json > "$TESTDIR"/ctr_by_imageid.json + sed -e "s/%VALUE%/redis:alpine/g" "$TESTDATA"/container_config_by_imageid.json > "$TESTDIR"/ctr_by_imagetag.json - run crioctl ctr create --config "$TESTDIR"/ctr_by_imageid.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/ctr_by_imagetag.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" --output yaml echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "ImageRef: redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b" ]] + [[ "$output" =~ "image: docker.io/library/redis:alpine" ]] + [[ "$output" =~ "imageRef: $REDIS_IMAGEREF" ]] cleanup_ctrs cleanup_pods stop_crio } -@test "image pull" { +@test "container status when created by image canonical reference" { + start_crio + + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + + sed -e "s|%VALUE%|$REDIS_IMAGEREF|g" "$TESTDATA"/container_config_by_imageid.json > "$TESTDIR"/ctr_by_imageref.json + + run crictl create "$pod_id" "$TESTDIR"/ctr_by_imageref.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + + run crictl inspect "$ctr_id" --output yaml + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "image: docker.io/library/redis:alpine" ]] + [[ "$output" =~ "imageRef: $REDIS_IMAGEREF" ]] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "image pull and list" { start_crio "" "" --no-pause-image - run crioctl image pull "$IMAGE" + run crictl pull "$IMAGE" + echo "$output" + [ "$status" -eq 0 ] + + run crictl images --quiet "$IMAGE" + [ "$status" -eq 0 ] + echo "$output" + [ "$output" != "" ] + imageid="$output" + + run crictl images @"$imageid" + [ "$status" -eq 0 ] + [[ "$output" =~ "$IMAGE" ]] + + run crictl images --quiet "$imageid" + [ "$status" -eq 0 ] + echo "$output" + [ "$output" != "" ] + cleanup_images + stop_crio +} + +@test "image pull with signature" { + start_crio "" "" --no-pause-image + run crictl pull "$SIGNED_IMAGE" echo "$output" [ "$status" -eq 0 ] cleanup_images stop_crio } -@test "image pull and list by digest" { +@test "image pull without signature" { start_crio "" "" --no-pause-image - run crioctl image pull nginx@sha256:33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc + run crictl image pull "$UNSIGNED_IMAGE" + echo "$output" + [ "$status" -ne 0 ] + cleanup_images + stop_crio +} + +@test "image pull and list by tag and ID" { + start_crio "" "" --no-pause-image + run crictl pull "$IMAGE:go" echo "$output" [ "$status" -eq 0 ] - run crioctl image list --quiet nginx@sha256:33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc + run crictl images --quiet "$IMAGE:go" + [ "$status" -eq 0 ] + echo "$output" + [ "$output" != "" ] + imageid="$output" + + run crictl images --quiet @"$imageid" [ "$status" -eq 0 ] echo "$output" [ "$output" != "" ] - run crioctl image list --quiet nginx@33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc + run crictl images --quiet "$imageid" [ "$status" -eq 0 ] echo "$output" [ "$output" != "" ] - run crioctl image list --quiet @33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc + cleanup_images + stop_crio +} + +@test "image pull and list by digest and ID" { + start_crio "" "" --no-pause-image + run crictl pull nginx@sha256:33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc + echo "$output" + [ "$status" -eq 0 ] + + run crictl images --quiet nginx@sha256:33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc + [ "$status" -eq 0 ] + echo "$output" + [ "$output" != "" ] + imageid="$output" + + run crictl images --quiet @"$imageid" [ "$status" -eq 0 ] echo "$output" [ "$output" != "" ] - run crioctl image list --quiet 33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc + run crictl images --quiet "$imageid" [ "$status" -eq 0 ] echo "$output" [ "$output" != "" ] @@ -116,18 +207,18 @@ function teardown() { @test "image list with filter" { start_crio "" "" --no-pause-image - run crioctl image pull "$IMAGE" + run crictl pull "$IMAGE" echo "$output" [ "$status" -eq 0 ] - run crioctl image list --quiet "$IMAGE" + run crictl images --quiet "$IMAGE" echo "$output" [ "$status" -eq 0 ] printf '%s\n' "$output" | while IFS= read -r id; do - run crioctl image remove --id "$id" + run crictl rmi "$id" echo "$output" [ "$status" -eq 0 ] done - run crioctl image list --quiet + run crictl images --quiet echo "$output" [ "$status" -eq 0 ] printf '%s\n' "$output" | while IFS= read -r id; do @@ -140,19 +231,19 @@ function teardown() { @test "image list/remove" { start_crio "" "" --no-pause-image - run crioctl image pull "$IMAGE" + run crictl pull "$IMAGE" echo "$output" [ "$status" -eq 0 ] - run crioctl image list --quiet + run crictl images --quiet echo "$output" [ "$status" -eq 0 ] [ "$output" != "" ] printf '%s\n' "$output" | while IFS= read -r id; do - run crioctl image remove --id "$id" + run crictl rmi "$id" echo "$output" [ "$status" -eq 0 ] done - run crioctl image list --quiet + run crictl images --quiet echo "$output" [ "$status" -eq 0 ] [ "$output" = "" ] @@ -166,23 +257,23 @@ function teardown() { @test "image status/remove" { start_crio "" "" --no-pause-image - run crioctl image pull "$IMAGE" + run crictl pull "$IMAGE" echo "$output" [ "$status" -eq 0 ] - run crioctl image list --quiet + run crictl images --quiet echo "$output" [ "$status" -eq 0 ] [ "$output" != "" ] printf '%s\n' "$output" | while IFS= read -r id; do - run crioctl image status --id "$id" + run crictl images -v "$id" echo "$output" [ "$status" -eq 0 ] [ "$output" != "" ] - run crioctl image remove --id "$id" + run crictl rmi "$id" echo "$output" [ "$status" -eq 0 ] done - run crioctl image list --quiet + run crictl images --quiet echo "$output" [ "$status" -eq 0 ] [ "$output" = "" ] diff --git a/test/image_remove.bats b/test/image_remove.bats new file mode 100644 index 00000000..54b06c05 --- /dev/null +++ b/test/image_remove.bats @@ -0,0 +1,75 @@ +#!/usr/bin/env bats + +load helpers + +IMAGE=docker.io/kubernetes/pause + +function teardown() { + cleanup_test +} + +@test "image remove with multiple names, by name" { + start_crio "" "" --no-pause-image + # Pull the image, giving it one name. + run crictl pull "$IMAGE" + echo "$output" + [ "$status" -eq 0 ] + # Add a second name to the image. + run "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name="$IMAGE":latest --add-name="$IMAGE":othertag --signature-policy="$INTEGRATION_ROOT"/policy.json + echo "$output" + [ "$status" -eq 0 ] + # Get the list of image names and IDs. + run crictl images -v + echo "$output" + [ "$status" -eq 0 ] + [ "$output" != "" ] + # Cycle through each name, removing it by name. The image that we assigned a second + # name to should still be around when we get to removing its second name. + grep ^RepoTags: <<< "$output" | while read -r header tag ignored ; do + run crictl rmi "$tag" + echo "$output" + [ "$status" -eq 0 ] + done + # List all images and their names. There should be none now. + run crictl images --quiet + echo "$output" + [ "$status" -eq 0 ] + [ "$output" = "" ] + printf '%s\n' "$output" | while IFS= read -r id; do + echo "$id" + done + # All done. + cleanup_images + stop_crio +} + +@test "image remove with multiple names, by ID" { + start_crio "" "" --no-pause-image + # Pull the image, giving it one name. + run crictl pull "$IMAGE" + echo "$output" + [ "$status" -eq 0 ] + # Add a second name to the image. + run "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name="$IMAGE":latest --add-name="$IMAGE":othertag --signature-policy="$INTEGRATION_ROOT"/policy.json + echo "$output" + [ "$status" -eq 0 ] + # Get the list of the image's names and its ID. + run crictl images -v "$IMAGE":latest + echo "$output" + [ "$status" -eq 0 ] + [ "$output" != "" ] + # Try to remove the image using its ID. That should succeed. + grep ^ID: <<< "$output" | while read -r header id ; do + run crictl rmi "$id" + echo "$output" + [ "$status" -eq 0 ] + done + # The image should be gone now. + run crictl images -v "$IMAGE" + echo "$output" + [ "$status" -eq 0 ] + [ "$output" = "" ] + # All done. + cleanup_images + stop_crio +} diff --git a/test/image_volume.bats b/test/image_volume.bats index ff05e9cd..f5b39401 100644 --- a/test/image_volume.bats +++ b/test/image_volume.bats @@ -8,28 +8,28 @@ function teardown() { @test "image volume ignore" { IMAGE_VOLUMES=ignore start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" image_volume_config=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["image"]["image"] = "mrunalp/image-volume-test"; obj["command"] = ["/bin/sleep", "600"]; json.dump(obj, sys.stdout)') echo "$image_volume_config" > "$TESTDIR"/container_image_volume.json - run crioctl ctr create --config "$TESTDIR"/container_image_volume.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/container_image_volume.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" ls /imagevolume + run crictl exec --sync "$ctr_id" ls /imagevolume echo "$output" [ "$status" -eq 0 ] [[ "$output" =~ "Exit code: 1" ]] [[ "$output" =~ "ls: /imagevolume: No such file or directory" ]] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -39,27 +39,27 @@ function teardown() { @test "image volume bind" { IMAGE_VOLUMES=bind start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" image_volume_config=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["image"]["image"] = "mrunalp/image-volume-test"; obj["command"] = ["/bin/sleep", "600"]; json.dump(obj, sys.stdout)') echo "$image_volume_config" > "$TESTDIR"/container_image_volume.json - run crioctl ctr create --config "$TESTDIR"/container_image_volume.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/container_image_volume.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" touch /imagevolume/test_file + run crictl exec --sync "$ctr_id" touch /imagevolume/test_file echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "Exit code: 0" ]] - run crioctl pod stop --id "$pod_id" + [ "$output" = "" ] + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs diff --git a/test/inspect.bats b/test/inspect.bats new file mode 100644 index 00000000..bb7977e5 --- /dev/null +++ b/test/inspect.bats @@ -0,0 +1,71 @@ +#!/usr/bin/env bats + +load helpers + +function teardown() { + cleanup_test +} + +@test "info inspect" { + start_crio + out=`echo -e "GET /info HTTP/1.1\r\nHost: crio\r\n" | socat - UNIX-CONNECT:$CRIO_SOCKET` + echo "$out" + [[ "$out" =~ "\"cgroup_driver\":\"$CGROUP_MANAGER\"" ]] + [[ "$out" =~ "\"storage_root\":\"$TESTDIR/crio\"" ]] + + stop_crio +} + +@test "ctr inspect" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDATA"/container_config.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + + out=`echo -e "GET /containers/$ctr_id HTTP/1.1\r\nHost: crio\r\n" | socat - UNIX-CONNECT:$CRIO_SOCKET` + echo "$out" + [[ "$out" =~ "\"sandbox\":\"$pod_id\"" ]] + [[ "$out" =~ "\"image\":\"docker.io/library/redis:alpine\"" ]] + [[ "$out" =~ "\"image_ref\":\"$REDIS_IMAGEREF\"" ]] + + run crictl inspect --output json "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "\"id\": \"$ctr_id\"" ]] + [[ "$output" =~ "\"image\": \"docker.io/library/redis:alpine\"" ]] + [[ "$output" =~ "\"imageRef\": \"$REDIS_IMAGEREF\"" ]] + + run crictl inspects --output json "$pod_id" + echo "$output" + [ "$status" -eq 0 ] + + inet=`crictl exec --sync "$ctr_id" ip addr show dev eth0 scope global 2>&1 | grep inet` + + IFS=" " + ip=`parse_pod_ip $inet` + [[ "$out" =~ "\"ip_address\":\"$ip\"" ]] + [[ "$output" =~ "\"ip\": \"$ip\"" ]] + + +# TODO: add some other check based on the json below: +# +# {"name":"k8s_container1_podsandbox1_redhat.test.crio_redhat-test-crio_1","pid":27477,"image":"redis:alpine","created_time":1505223601111546169,"labels":{"batch":"no","type":"small"},"annotations":{"daemon":"crio","owner":"dragon"},"log_path":"/var/log/crio/pods/297d014ba2c54236779da0c2f80dfba45dc31b106e4cd126a1c3c6d78edc2201/81567e9573ea798d6494c9aab156103ee91b72180fd3841a7c24d2ca39886ba2.log","root":"/tmp/tmp.0bkjphWudF/crio/overlay/d7cfc1de83cab9f377a4a1542427d2a019e85a70c1c660a9e6cf9e254df68873/merged","sandbox":"297d014ba2c54236779da0c2f80dfba45dc31b106e4cd126a1c3c6d78edc2201","ip_address":"10.88.9.153"} + + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "ctr inspect not found" { + start_crio + out=`echo -e "GET /containers/notexists HTTP/1.1\r\nHost: crio\r\n" | socat - UNIX-CONNECT:$CRIO_SOCKET` + echo "$out" + [[ "$out" =~ "can't find the container with id notexists" ]] + + stop_crio +} diff --git a/test/kpod.bats b/test/kpod.bats deleted file mode 100644 index 8ac5feda..00000000 --- a/test/kpod.bats +++ /dev/null @@ -1,254 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="alpine:latest" -ROOT="$TESTDIR/crio" -RUNROOT="$TESTDIR/crio-run" -KPOD_OPTIONS="--root $ROOT --runroot $RUNROOT --storage-driver vfs" - -function teardown() { - cleanup_test -} - -@test "kpod version test" { - run ${KPOD_BINARY} version - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod pull from docker with tag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull debian:6.0.10 - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi debian:6.0.10 - [ "$status" -eq 0 ] -} - -@test "kpod pull from docker without tag" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull debian - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi debian - [ "$status" -eq 0 ] -} - -@test "kpod pull from a non-docker registry with tag" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull registry.fedoraproject.org/fedora:rawhide - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi registry.fedoraproject.org/fedora:rawhide - [ "$status" -eq 0 ] -} - -@test "kpod pull from a non-docker registry without tag" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull registry.fedoraproject.org/fedora - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi registry.fedoraproject.org/fedora - [ "$status" -eq 0 ] -} - -@test "kpod pull using digest" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull alpine@sha256:1072e499f3f655a032e88542330cf75b02e7bdf673278f701d7ba61629ee3ebe - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi alpine:latest - [ "$status" -eq 0 ] -} - -@test "kpod pull from a non existent image" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull umohnani/get-started - echo "$output" - [ "$status" -ne 0 ] -} - -@test "kpod history default" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} history $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - [ "$status" -eq 0 ] -} - -@test "kpod history with Go template format" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} history --format "{{.ID}} {{.Created}}" $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - [ "$status" -eq 0 ] -} - -@test "kpod history human flag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} history --human=false $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - [ "$status" -eq 0 ] -} - -@test "kpod history quiet flag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} history -q $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - [ "$status" -eq 0 ] -} - -@test "kpod history no-trunc flag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} history --no-trunc $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - [ "$status" -eq 0 ] -} - -@test "kpod history json flag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - [ "$status" -eq 0 ] - run bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} history --format json $IMAGE | python -m json.tool" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - [ "$status" -eq 0 ] -} - -@test "kpod push to containers/storage" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS push "$IMAGE" containers-storage:[$ROOT]busybox:test - echo "$output" - [ "$status" -eq 0 ] - run crioctl image remove "$IMAGE" - run crioctl image remove busybox:test - stop_crio -} - -@test "kpod push to directory" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run mkdir /tmp/busybox - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS push "$IMAGE" dir:/tmp/busybox - echo "$output" - [ "$status" -eq 0 ] - run crioctl image remove "$IMAGE" - run rm -rf /tmp/busybox - stop_crio -} - -@test "kpod push to docker archive" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS push "$IMAGE" docker-archive:/tmp/busybox-archive:1.26 - echo "$output" - [ "$status" -eq 0 ] - rm /tmp/busybox-archive - run crioctl image remove "$IMAGE" - stop_crio -} - -@test "kpod push to oci without compression" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run mkdir /tmp/oci-busybox - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS push "$IMAGE" oci:/tmp/oci-busybox:"$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run rm -rf /tmp/oci-busybox - run crioctl image remove "$IMAGE" - stop_crio -} - -@test "kpod push without signatures" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run mkdir /tmp/busybox - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS push --remove-signatures "$IMAGE" dir:/tmp/busybox - echo "$output" - [ "$status" -eq 0 ] - run rm -rf /tmp/busybox - run crioctl image remove "$IMAGE" - stop_crio -} - -@test "kpod inspect image" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull redis:alpine - [ "$status" -eq 0 ] - run bash -c "${KPOD_BINARY} $KPOD_OPTIONS inspect redis:alpine | python -m json.tool" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi redis:alpine - [ "$status" -eq 0 ] -} - - -@test "kpod inspect non-existent container" { - run ${KPOD_BINARY} $KPOD_OPTIONS inspect 14rcole/non-existent - echo "$output" - [ "$status" -ne 0 ] -} - -@test "kpod inspect with format" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull redis:alpine - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS inspect --format {{.ID}} redis:alpine - [ "$status" -eq 0 ] - inspectOutput="$output" - run ${KPOD_BINARY} $KPOD_OPTIONS images --quiet redis:alpine - [ "$status" -eq 0 ] - [ "$output" = "$inspectOutput" ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi redis:alpine - [ "$status" -eq 0 ] -} - -@test "kpod inspect specified type" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull redis:alpine - [ "$status" -eq 0 ] - run bash -c "${KPOD_BINARY} $KPOD_OPTIONS inspect --type image redis:alpine | python -m json.tool" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi redis:alpine - [ "$status" -eq 0 ] -} - -@test "kpod images" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull debian:6.0.10 - run ${KPOD_BINARY} $KPOD_OPTIONS images - [ "$status" -eq 0 ] -} - -@test "kpod images test valid json" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull debian:6.0.10 - run ${KPOD_BINARY} $KPOD_OPTIONS images --format json - echo "$output" | python -m json.tool - [ "$status" -eq 0 ] -} - -@test "kpod images check name json output" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull debian:6.0.10 - run ${KPOD_BINARY} $KPOD_OPTIONS images --format json - echo "$output" - name=$(echo $output | python -c 'import sys; import json; print(json.loads(sys.stdin.read())[0])["names"][0]') - [ "$name" = "docker.io/library/debian:6.0.10" ] -} diff --git a/test/kpod_diff.bats b/test/kpod_diff.bats deleted file mode 100644 index 50a82d5c..00000000 --- a/test/kpod_diff.bats +++ /dev/null @@ -1,40 +0,0 @@ -#/usr/bin/env bats - -load helpers - -IMAGE="alpine:latest" -ROOT="$TESTDIR/crio" -RUNROOT="$TESTDIR/crio-run" -KPOD_OPTIONS="--root $ROOT --runroot $RUNROOT $STORAGE_OPTS" - -function teardown() { - cleanup_test -} - -@test "test diff of image and parent" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS diff $IMAGE - echo "$output" - [ "$status" -eq 0 ] - echo "$output" - run ${KKPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE -} - -@test "test diff on non-existent layer" { - run ${KPOD_BINARY} $KPOD_OPTIONS diff "abc123" - [ "$status" -ne 0 ] - echo "$output" -} - -@test "test diff with json output" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull $IMAGE - echo "$output" - [ "$status" -eq 0 ] - # run bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} diff --format json $IMAGE | python -m json.tool" - run ${KPOD_BINARY} $KPOD_OPTIONS diff --format json $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KKPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE -} diff --git a/test/kpod_export.bats b/test/kpod_export.bats deleted file mode 100644 index 808d39ff..00000000 --- a/test/kpod_export.bats +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="redis:alpine" -ROOT="$TESTDIR/crio" -RUNROOT="$TESTDIR/crio-run" -KPOD_OPTIONS="--root $ROOT --runroot $RUNROOT ${STORAGE_OPTS}" - - -@test "kpod export output flag" { - start_crio - [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run ${KPOD_BINARY} ${KPOD_OPTIONS} export -o container.tar "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - [ "$status" -eq 0 ] - cleanup_pods - [ "$status" -eq 0 ] - stop_crio - [ "$status" -eq 0 ] - rm -f container.tar - [ "$status" -eq 0 ] -} diff --git a/test/kpod_load.bats b/test/kpod_load.bats deleted file mode 100644 index 8cf5cf1b..00000000 --- a/test/kpod_load.bats +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="alpine:latest" -ROOT="$TESTDIR/crio" -RUNROOT="$TESTDIR/crio-run" -KPOD_OPTIONS="--root $ROOT --runroot $RUNROOT $STORAGE_OPTS" - -function teardown() { - cleanup_test -} - -@test "kpod load input flag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} save -o alpine.tar $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} load -i alpine.tar - echo "$output" - [ "$status" -eq 0 ] - rm -f alpine.tar - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - [ "$status" -eq 0 ] -} - -@test "kpod load using quiet flag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} save -o alpine.tar $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} load -q -i alpine.tar - echo "$output" - [ "$status" -eq 0 ] - rm -f alpine.tar - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - [ "$status" -eq 0 ] -} - -@test "kpod load non-existent file" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} load -i alpine.tar - echo "$output" - [ "$status" -ne 0 ] -} diff --git a/test/kpod_logs.bats b/test/kpod_logs.bats deleted file mode 100644 index d11b69c1..00000000 --- a/test/kpod_logs.bats +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="alpine:latest" -ROOT="$TESTDIR/crio" -RUNROOT="$TESTDIR/crio-run" -KPOD_OPTIONS="--root $ROOT --runroot $RUNROOT ${STORAGE_OPTS}" - -function teardown() { - cleanup_test -} - -@test "display logs for container" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS logs "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "tail three lines of logs for container" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS logs --tail 3 $ctr_id - echo "$output" - lines=$(echo "$output" | wc -l) - [ "$status" -eq 0 ] - [[ $(wc -l < "$output" ) -le 3 ]] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "display logs for container since a given time" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS logs --since 2017-08-07T10:10:09.056611202-04:00 $ctr_id - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} diff --git a/test/kpod_mount.bats b/test/kpod_mount.bats deleted file mode 100644 index 517b627c..00000000 --- a/test/kpod_mount.bats +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env bats - -function teardown() { - cleanup_test -} - -load helpers - -IMAGE="redis:alpine" -ROOT="$TESTDIR/crio" -RUNROOT="$TESTDIR/crio-run" -KPOD_OPTIONS="--root $ROOT --runroot $RUNROOT ${STORAGE_OPTS}" - -@test "mount" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run ${KPOD_BINARY} ${KPOD_OPTIONS} mount $ctr_id - echo "$output" - echo ${KPOD_BINARY} ${KPOD_OPTIONS} mount $ctr_id - [ "$status" -eq 0 ] - run bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} mount --notruncate | grep $ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} unmount $ctr_id - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} mount $ctr_id - echo "$output" - [ "$status" -eq 0 ] - root="$output" - run bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} mount --format=json | python -m json.tool | grep $ctr_id" - echo "$output" - [ "$status" -eq 0 ] - touch $root/foobar - ${KPOD_BINARY} ${KPOD_OPTIONS} unmount $ctr_id - cleanup_ctrs - cleanup_pods - stop_crio -} diff --git a/test/kpod_ps.bats b/test/kpod_ps.bats deleted file mode 100644 index 5954be6b..00000000 --- a/test/kpod_ps.bats +++ /dev/null @@ -1,287 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="redis:alpine" -ROOT="$TESTDIR/crio" -RUNROOT="$TESTDIR/crio-run" -KPOD_OPTIONS="--root $ROOT --runroot $RUNROOT ${STORAGE_OPTS}" - -@test "kpod ps with no containers" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod ps default" { - start_crio - [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio - [ "$status" -eq 0 ] -} - -@test "kpod ps all flag" { - start_crio - [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps --all - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio - [ "$status" -eq 0 ] -} - -@test "kpod ps size flag" { - start_crio - [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a -s - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --size - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio - [ "$status" -eq 0 ] -} - -@test "kpod ps quiet flag" { - start_crio - [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a -q - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --quiet - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio - [ "$status" -eq 0 ] -} - -@test "kpod ps latest flag" { - start_crio - [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps --latest - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -l - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio - [ "$status" -eq 0 ] -} - -@test "kpod ps last flag" { - start_crio - [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps --last 2 - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -n 2 - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio - [ "$status" -eq 0 ] -} - -@test "kpod ps no-trunc flag" { - start_crio - [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --no-trunc - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio - [ "$status" -eq 0 ] -} - -@test "kpod ps format flag = json" { - start_crio - [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --format json | python -m json.tool" - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio - [ "$status" -eq 0 ] -} - -@test "kpod ps format flag = go template" { - start_crio - [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --format "table {{.ID}} {{.Image}} {{.Labels}}" - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio - [ "$status" -eq 0 ] -} - -@test "kpod ps filter flag - ancestor" { - start_crio - [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --filter ancestor=${IMAGE} - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio - [ "$status" -eq 0 ] -} - -@test "kpod ps filter flag - id" { - start_crio - [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --filter id="$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio - [ "$status" -eq 0 ] -} - -@test "kpod ps filter flag - status" { - start_crio - [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --filter status=running - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio - [ "$status" -eq 0 ] -} diff --git a/test/kpod_rename.bats b/test/kpod_rename.bats deleted file mode 100644 index 9419ce2d..00000000 --- a/test/kpod_rename.bats +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="redis:alpine" -ROOT="$TESTDIR/crio" -RUNROOT="$TESTDIR/crio-run" -KPOD_OPTIONS="--root $ROOT --runroot $RUNROOT $STORAGE_OPTS" -NEW_NAME="rename-test" - -function teardown() { - cleanup_test -} - -@test "kpod rename successful" { - start_crio - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - pod_id="$output" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - ctr_id="$output" - run ${KPOD_BINARY} $KPOD_OPTIONS rename "$ctr_id" "$NEW_NAME" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS inspect "$ctr_id" --format {{.Name}} - echo "$output" - [ "$status" -eq 0 ] - [ "$output" == "$NEW_NAME" ] - cleanup_ctrs - cleanup_pods - stop_crio -} diff --git a/test/kpod_rm.bats b/test/kpod_rm.bats deleted file mode 100644 index b41f58dd..00000000 --- a/test/kpod_rm.bats +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="alpine:latest" -ROOT="$TESTDIR/crio" -RUNROOT="$TESTDIR/crio-run" -KPOD_OPTIONS="--root $ROOT --runroot $RUNROOT $STORAGE_OPTS --runtime $RUNTIME_BINARY" -function teardown() { - cleanup_test -} - -@test "remove a stopped container" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr stop --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rm "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_pods - stop_crio -} - -@test "refuse to remove a running container" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rm "$ctr_id" - echo "$output" - [ "$status" -ne 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "remove a created container" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run ${KPOD_BINARY} $KPOD_OPTIONS rm -f "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_pods - stop_crio -} - -@test "remove a running container" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rm -f "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_pods - stop_crio -} diff --git a/test/kpod_save.bats b/test/kpod_save.bats deleted file mode 100644 index d77cd6b5..00000000 --- a/test/kpod_save.bats +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="alpine:latest" -ROOT="$TESTDIR/crio" -RUNROOT="$TESTDIR/crio-run" -KPOD_OPTIONS="--root $ROOT --runroot $RUNROOT $STORAGE_OPTS" - -function teardown() { - cleanup_test -} - -@test "kpod save output flag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} save -o alpine.tar $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE - [ "$status" -eq 0 ] - rm -f alpine.tar - [ "$status" -eq 0 ] -} - -@test "kpod save using stdout" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} save > alpine.tar $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE - [ "$status" -eq 0 ] - rm -f alpine.tar - [ "$status" -eq 0 ] -} - -@test "kpod save quiet flag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} save -q -o alpine.tar $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE - [ "$status" -eq 0 ] - rm -f alpine.tar - [ "$status" -eq 0 ] -} - -@test "kpod save non-existent image" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} save -o alpine.tar $IMAGE - echo "$output" - [ "$status" -ne 0 ] -} diff --git a/test/kpod_stats.bats b/test/kpod_stats.bats deleted file mode 100644 index c9f58cc9..00000000 --- a/test/kpod_stats.bats +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -ROOT="$TESTDIR/crio" -RUNROOT="$TESTDIR/crio-run" -KPOD_OPTIONS="--root $ROOT --runroot $RUNROOT $STORAGE_OPTS" - -function teardown() { - cleanup_test -} - -@test "stats single output" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS stats --no-stream "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "stats does not output stopped container" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run ${KPOD_BINARY} $KPOD_OPTIONS stats --no-stream - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "stats outputs stopped container with all flag" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run ${KPOD_BINARY} $KPOD_OPTIONS stats --no-stream --all - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "stats output only id" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS stats --no-stream --format {{.ID}} "$ctr_id" - [ "$status" -eq 0 ] - # once ps is implemented, run ps -q and see if that equals the output from above - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "stats streaming output" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run timeout 5s bash -c "${KPOD_BINARY} $KPOD_OPTIONS stats --all" - echo "$output" - [ "$status" -eq 124 ] #124 is the status set by timeout when it has to kill the command at the end of the given time - cleanup_ctrs - cleanup_pods - stop_crio -} diff --git a/test/namespaces.bats b/test/namespaces.bats new file mode 100644 index 00000000..033cbab2 --- /dev/null +++ b/test/namespaces.bats @@ -0,0 +1,45 @@ +#!/usr/bin/env bats + +load helpers + +function teardown() { + cleanup_test +} + +function pid_namespace_test() { + start_crio + + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + [ "$status" -eq 0 ] + + run crictl exec --sync "$ctr_id" cat /proc/1/cmdline + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "${EXPECTED_INIT:-redis}" ]] + + run crictl stops "$pod_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl rms "$pod_id" + echo "$output" + [ "$status" -eq 0 ] + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "pod disable shared pid namespace" { + ENABLE_SHARED_PID_NAMESPACE=false pid_namespace_test +} + +@test "pod enable shared pid namespace" { + ENABLE_SHARED_PID_NAMESPACE=true EXPECTED_INIT=pause pid_namespace_test +} diff --git a/test/network.bats b/test/network.bats index 8121ca48..1ed95ce1 100644 --- a/test/network.bats +++ b/test/network.bats @@ -2,80 +2,137 @@ load helpers +function teardown() { + cleanup_ctrs + cleanup_pods + stop_crio + rm -f /var/lib/cni/networks/crionet_test_args/* + chmod 0755 $CONMON_BINARY + cleanup_test +} + +@test "ensure correct hostname" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + + run crictl exec --sync "$ctr_id" sh -c "hostname" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "crictl_host" ]] + run crictl exec --sync "$ctr_id" sh -c "echo \$HOSTNAME" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "crictl_host" ]] + run crictl exec --sync "$ctr_id" sh -c "cat /etc/hostname" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "crictl_host" ]] +} + +@test "ensure correct hostname for hostnetwork:true" { + start_crio + hostnetworkconfig=$(cat "$TESTDATA"/sandbox_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["linux"]["security_context"]["namespace_options"]["host_network"] = True; obj["annotations"] = {}; obj["hostname"] = ""; json.dump(obj, sys.stdout)') + echo "$hostnetworkconfig" > "$TESTDIR"/sandbox_hostnetwork_config.json + run crictl runs "$TESTDIR"/sandbox_hostnetwork_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDIR"/sandbox_hostnetwork_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + + run crictl exec --sync "$ctr_id" sh -c "hostname" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "$HOSTNAME" ]] + run crictl exec --sync "$ctr_id" sh -c "echo \$HOSTNAME" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "$HOSTNAME" ]] + run crictl exec --sync "$ctr_id" sh -c "cat /etc/hostname" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "$HOSTNAME" ]] +} + @test "Check for valid pod netns CIDR" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" check_pod_cidr $ctr_id - cleanup_ctrs - cleanup_pods - stop_crio } @test "Ping pod from the host" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" ping_pod $ctr_id - - cleanup_ctrs - cleanup_pods - stop_crio } @test "Ping pod from another pod" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod1_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod1_id" + run crictl create "$pod1_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr1_id="$output" temp_sandbox_conf cni_test - run crioctl pod run --config "$TESTDIR"/sandbox_config_cni_test.json + run crictl runs "$TESTDIR"/sandbox_config_cni_test.json echo "$output" [ "$status" -eq 0 ] pod2_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod2_id" + run crictl create "$pod2_id" "$TESTDATA"/container_redis.json "$TESTDIR"/sandbox_config_cni_test.json echo "$output" [ "$status" -eq 0 ] ctr2_id="$output" ping_pod_from_pod $ctr1_id $ctr2_id - [ "$status" -eq 0 ] ping_pod_from_pod $ctr2_id $ctr1_id - [ "$status" -eq 0 ] - - cleanup_ctrs - cleanup_pods - stop_crio } @test "Ensure correct CNI plugin namespace/name/container-id arguments" { + if [[ ! -e "$CRIO_CNI_PLUGIN"/bridge-custom ]]; then + skip "bridge-custom plugin not available" + fi start_crio "" "" "" "prepare_plugin_test_args_network_conf" - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json [ "$status" -eq 0 ] . /tmp/plugin_test_args.out @@ -86,14 +143,11 @@ load helpers [ "$FOUND_K8S_POD_NAME" = "podsandbox1" ] rm -rf /tmp/plugin_test_args.out - - cleanup_pods - stop_crio } @test "Connect to pod hostport from the host" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config_hostport.json + run crictl runs "$TESTDATA"/sandbox_config_hostport.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" @@ -101,20 +155,39 @@ load helpers get_host_ip echo $host_ip - run crioctl ctr create --config "$TESTDATA"/container_config_hostport.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config_hostport.json "$TESTDATA"/sandbox_config_hostport.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run nc -w 5 $host_ip 4888 + run nc -w 5 $host_ip 4888 "$TESTDIR"/sandbox_wrong_cgroup_parent.json start_crio - run crioctl pod run --config "$TESTDIR"/sandbox_wrong_cgroup_parent.json + run crictl runs "$TESTDIR"/sandbox_wrong_cgroup_parent.json echo "$output" [ "$status" -eq 1 ] @@ -350,7 +347,7 @@ function teardown() { echo "$cgroup_parent_config" > "$TESTDIR"/sandbox_systemd_cgroup_parent.json start_crio - run crioctl pod run --config "$TESTDIR"/sandbox_systemd_cgroup_parent.json + run crictl runs "$TESTDIR"/sandbox_systemd_cgroup_parent.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" diff --git a/test/policy.json b/test/policy.json index bb26e57f..ddc46e1a 100644 --- a/test/policy.json +++ b/test/policy.json @@ -3,5 +3,21 @@ { "type": "insecureAcceptAnything" } - ] + ], + "transports": { + "docker": { + "docker.io/library/hello-world": [ + { + "type": "reject" + } + ], + "registry.access.redhat.com": [ + { + "keyType": "GPGKeys", + "type": "signedBy", + "keyData": "VGhlIGZvbGxvd2luZyBwdWJsaWMga2V5IGNhbiBiZSB1c2VkIHRvIHZlcmlmeSBSUE0gcGFja2FnZXMgYnVpbHQgYW5kCnNpZ25lZCBieSBSZWQgSGF0LCBJbmMuICBUaGlzIGtleSBpcyB1c2VkIGZvciBwYWNrYWdlcyBpbiBSZWQgSGF0CnByb2R1Y3RzIHNoaXBwZWQgYWZ0ZXIgTm92ZW1iZXIgMjAwOSwgYW5kIGZvciBhbGwgdXBkYXRlcyB0byB0aG9zZQpwcm9kdWN0cy4KClF1ZXN0aW9ucyBhYm91dCB0aGlzIGtleSBzaG91bGQgYmUgc2VudCB0byBzZWN1cml0eUByZWRoYXQuY29tLgoKcHViICA0MDk2Ui9GRDQzMUQ1MSAyMDA5LTEwLTIyIFJlZCBIYXQsIEluYy4gKHJlbGVhc2Uga2V5IDIpIDxzZWN1cml0eUByZWRoYXQuY29tPgoKLS0tLS1CRUdJTiBQR1AgUFVCTElDIEtFWSBCTE9DSy0tLS0tClZlcnNpb246IEdudVBHIHYxLjIuNiAoR05VL0xpbnV4KQoKbVFJTkJFcmdTVHNCRUFDaDJBNGIwTzl0K3Z6QzlWclZ0TDFBS3ZVV2k5T1BDamt2UjdYZDhEdEp4ZWVNWjVlRgowSHR6SUc1OHFEUnlid1VlODlGWnByQjFmZnVVS3pkRStIY0wzRmJOV1NTT1hWalpJZXJzZFh5SDNOdm5MTExGCjBETlJCMml4M2JYRzlSaC9SWHBGc054RHAyQ0VNZFV2YllDekU3OUsxRW5VVFZoMUwwT2YwMjNGdFBTWlhYMGMKdTdQYjVESTVsWDVZZW9YTzZSb29kcklHWUpzVkJRV25yV3c0eE5UY29uVWZOUGswRUdadEVuenZIMnp5UG9KaApYR0YrTmN1OVh3YmFsbllkZTEwT0N2U1dBWjV6VENwb0xNVHZRaldwYkNkV1hKekNtNkcrL2h4OXVwa2U1NDZICjVJanRZbTRkVElWVG5jM3d2RGlPRGdCS1J6T2w5ckVPQ0lnT3VHdER4UnhjUWtqckMreHZnNVZrcW43dkJVeVcKOXBIZWRPVStQb0YzREdPTStkcXYrZU5LQnZoOVlGOXVnRkFRQmtjRzd2aVpndkdFTUdHVXB6TmdON1huUzFnagovRFBvOW1aRVNPWW5LY2V2ZTJ0SUM4N3AyaHFqcnhPSHVJN2ZrWlllTkljQW9hODNyQmx0RlhhQkRZaFdBS1MxClBjWFMxLzdKelAwa3k3ZDBMNlhidS9JZjVrcVdRcEt3VUluWHR5U1JrdXJhVmZ1SzNCcGErWDFYZWNXaTI0SlkKSFZ0bE5YMDI1eHgxZXdWekdOQ1RsV24xc2tRTjJPT29RVFY0QzgvcUZwVFc2RFRXWXVyZDQrZkUwT0pGSlpRRgpidWhmWFl3bVJsVk9nTjVpNzdOVElKWkpRZllGajM4Yy9JdjV2WkJQb2tPNm1mZnJPVHYzTUhXVmdRQVJBUUFCCnRETlNaV1FnU0dGMExDQkpibU11SUNoeVpXeGxZWE5sSUd0bGVTQXlLU0E4YzJWamRYSnBkSGxBY21Wa2FHRjAKTG1OdmJUNkpBallFRXdFQ0FDQUZBa3JnU1RzQ0d3TUdDd2tJQndNQ0JCVUNDQU1FRmdJREFRSWVBUUlYZ0FBSwpDUkFabmkrUi9VTWRVV3pwRC85czVTRlIvWkYzeWpZNVZMVUZMTVhJS1V6dE5OM29jNDVmeUxkVEkzK1VDbEtDCjJ0RXJ1ellqcU5IaHFBRVhhMnNOMWZNcnN1S2VjNjFMbDJOZnZKamtMS0R2Z1ZJaDdrTTdhc2xOWVZPUDZCVGYKQy9KSjcvdWZ6M1VabXlWaUgvV0RsK0FZZGdrM0pxQ0lPNXc1cnlyQzlJeUJ6WXYybTBIcVliV2ZwaFkzdUh3NQp1bjNuZExKY3U4K0JHUDVGK09OUUVHbCtEUkg1OElsOUpwM0h3YlJhN2R2a1BnRWhmRlIrMWhJK0J0dGEyQzdFCjAvMk5LekN4Wnc3THgzUEJSY1U5MllLeWFFaWhmeS9hUUtaQ0F1eWZLaU12c216cys0cG9JWDdJOU5RQ0pweUUKSUdmSU5vWjdWeHFId1JuL2Q1bXcyTVpUSmpielNmK1VtOVlKeUEwaUVFeUQ2cWpyaVdRUmJ1eHBRWG1sQUpiaAo4b2taNGdiVkZ2MUY4TXpLKzRSOFZ2V0owWHhndGlrU283MmZIandoYTdNQWpxRm5PcTZlbzZmRUMvNzVnM05MCkdodDVWZHBHdUhrMHZiZEVOSE1DOHdTOTllNXFYR05EdWVkM2hsVGF2RE1sRUFIbDM0cTJIOW5ha1RHUkY1S2kKSlVmTmgzRFZSR2hnOGNNSXRpMjFuamlSaDdneUZJMk9jY0FUWTdiQlNyNzlKaHVOd2VsSHV4THJDRnBZN1YyNQpPRmt0bDE1alpKYU14dVFCcVlkQmdTYXkyRzBVNkQxKzdWc1d1ZnB6ZC9BYngxL2Mzb2k5WmFKdlcyMmtBZ2dxCmR6ZEEyN1VVWWpXdng0Mnc5bWVuSndoLzBqZVFjVGVjSVVkMGQwckZjdy9jMXB2Z01NbC9RNzN5ektnS1l3PT0KPXpiSEUKLS0tLS1FTkQgUEdQIFBVQkxJQyBLRVkgQkxPQ0stLS0tLQpUaGUgZm9sbG93aW5nIHB1YmxpYyBrZXkgY2FuIGJlIHVzZWQgdG8gdmVyaWZ5IFJQTSBwYWNrYWdlcyBidWlsdCBhbmQKc2lnbmVkIGJ5IFJlZCBIYXQsIEluYy4gIFRoaXMga2V5IGlzIGEgc3VwcG9ydGluZyAoYXV4aWxpYXJ5KSBrZXkgZm9yClJlZCBIYXQgcHJvZHVjdHMgc2hpcHBlZCBhZnRlciBOb3ZlbWJlciAyMDA2IGFuZCBmb3IgYWxsIHVwZGF0ZXMgdG8KdGhvc2UgcHJvZHVjdHMuCgpRdWVzdGlvbnMgYWJvdXQgdGhpcyBrZXkgc2hvdWxkIGJlIHNlbnQgdG8gc2VjdXJpdHlAcmVkaGF0LmNvbS4KCi0tLS0tQkVHSU4gUEdQIFBVQkxJQyBLRVkgQkxPQ0stLS0tLQpWZXJzaW9uOiBHbnVQRyB2MS4yLjYgKEdOVS9MaW51eCkKCm1RR2lCRVZ3REdrUkJBQ3dQaFpJcHZrakk4d1Y5c0ZURG9xeVBMeDF1YjhTZC93K1l1STVPdm00OW12dkVRVlQKVkxnOEZnRTVKbFNUNTlBYnNMRHlWdFJhOUN4SXZONXN5QlZyV1dXdEh0RG5ueWxGQmNxRy9BNkozYkk0RTkvQQpVdFNMNVp4YmF2MCt1dFA2ZjN3T3B4UXJ4YytXSURWZ3B1cmRCS0FRM2Rzb2JHQnF5cGVYNkZYWjV3Q2dvdTZDCnlacEdJQnFvc0phRFdMek5lT2ZiLzcwRC8xdGhMa1F5aFczSko2Y0hDWUpITmZCU2h2YkxXQmY2UzIzMW1nbXUKTXlNbHQ4S21pcGM5Yncrc2FhQWtTa1ZzUS9aYmZqcldCN2U1a2JNcnVLTFZySCtuR2hhbWxIWVVHeUFQdHNQZwpVai9OVVNqNUJtckNzT2tNcG40M25nVExzc0U5TUxoU1BqMm5JSEdGdjlCK2lWTHZvbURkd25hQlJnUTFhSzh6Cno2TUFBLzQwNnlmNXlWSi9NbFRXczEvNjhWd0Rob3NjOUJ0VTFWNUlFME5YZ1pVQWZCSnp6ZlZ6ektRcTZ6SjIKZVpzTUxocjk2d2JzVzEzelVadDFpbmcrdWx3aDJlZTRtZXVKcTZoLzk3MUpzcEZZL1hCaGNmcTRxQ05xVmpzcQpTWm5Xb0dkQ082SjhDeFBJZW1EMklVSHpqb3l5ZUVqM1JWeWR1cDZwY1daQW1oemtLclF6VW1Wa0lFaGhkQ3dnClNXNWpMaUFvWVhWNGFXeHBZWEo1SUd0bGVTa2dQSE5sWTNWeWFYUjVRSEpsWkdoaGRDNWpiMjAraUY0RUV4RUMKQUI0RkFrVndER2tDR3dNR0N3a0lCd01DQXhVQ0F3TVdBZ0VDSGdFQ0Y0QUFDZ2tRUldpY2lDK21XT0MxclFDZwpvb05MQ0ZPek5QY3ZoZDlaYThDODAxSG1uc1lBbmlDdzN5enJDcXRqWW54RER4bHVmSDBGVlR3WAo9ZC9ibQotLS0tLUVORCBQR1AgUFVCTElDIEtFWSBCTE9DSy0tLS0tCg==" + } + ] + } + } } diff --git a/test/redhat_sigstore.yaml b/test/redhat_sigstore.yaml new file mode 100644 index 00000000..83552853 --- /dev/null +++ b/test/redhat_sigstore.yaml @@ -0,0 +1,3 @@ +docker: + registry.access.redhat.com: + sigstore: https://access.redhat.com/webassets/docker/content/sigstore diff --git a/test/registries.conf b/test/registries.conf new file mode 100644 index 00000000..f3bf092b --- /dev/null +++ b/test/registries.conf @@ -0,0 +1,9 @@ +[registries.search] +registries = ['registry.access.redhat.com', 'registry.fedoraproject.org', 'docker.io'] + +[registries.insecure] +registries = [] + +#blocked (docker only) +[registries.block] +registries = [] diff --git a/test/restore.bats b/test/restore.bats index 264096ed..09f4f2b2 100644 --- a/test/restore.bats +++ b/test/restore.bats @@ -8,69 +8,71 @@ function teardown() { @test "crio restore" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl pod list --id "$pod_id" + run crictl sandboxes --quiet --id "$pod_id" echo "$output" [ "$status" -eq 0 ] pod_list_info="$output" - run crioctl pod status --id "$pod_id" + run crictl inspects "$pod_id" echo "$output" [ "$status" -eq 0 ] - pod_status_info="$output" + pod_status_info=`echo "$output" | grep Status` - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr list --id "$ctr_id" + run crictl ps --quiet --id "$ctr_id" echo "$output" [ "$status" -eq 0 ] ctr_list_info="$output" - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] - ctr_status_info="$output" + ctr_status_info=`echo "$output" | grep State` stop_crio start_crio - run crioctl pod list + run crictl sandboxes --quiet echo "$output" [ "$status" -eq 0 ] [[ "${output}" != "" ]] - [[ "${output}" =~ "${pod_id}" ]] + [[ "${output}" == "${pod_id}" ]] - run crioctl pod list --id "$pod_id" + run crictl sandboxes --quiet --id "$pod_id" echo "$output" [ "$status" -eq 0 ] [[ "${output}" == "${pod_list_info}" ]] - run crioctl pod status --id "$pod_id" + run crictl inspects "$pod_id" echo "$output" [ "$status" -eq 0 ] + output=`echo "$output" | grep Status` [[ "${output}" == "${pod_status_info}" ]] - run crioctl ctr list + run crictl ps --quiet echo "$output" [ "$status" -eq 0 ] [[ "${output}" != "" ]] - [[ "${output}" =~ "${ctr_id}" ]] + [[ "${output}" == "${ctr_id}" ]] - run crioctl ctr list --id "$ctr_id" + run crictl ps --quiet --id "$ctr_id" echo "$output" [ "$status" -eq 0 ] [[ "${output}" == "${ctr_list_info}" ]] - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] + output=`echo "$output" | grep State` [[ "${output}" == "${ctr_status_info}" ]] cleanup_ctrs @@ -80,12 +82,12 @@ function teardown() { @test "crio restore with bad state and pod stopped" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] @@ -96,7 +98,7 @@ function teardown() { start_crio - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] @@ -106,17 +108,17 @@ function teardown() { @test "crio restore with bad state and ctr stopped" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr stop --id "$ctr_id" + run crictl stop "$ctr_id" echo "$output" [ "$status" -eq 0 ] @@ -127,7 +129,7 @@ function teardown() { start_crio - run crioctl ctr stop --id "$ctr_id" + run crictl stop "$ctr_id" echo "$output" [ "$status" -eq 0 ] @@ -138,21 +140,21 @@ function teardown() { @test "crio restore with bad state and ctr removed" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr stop --id "$ctr_id" + run crictl stop "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr remove --id "$ctr_id" + run crictl rm "$ctr_id" echo "$output" [ "$status" -eq 0 ] @@ -163,7 +165,7 @@ function teardown() { start_crio - run crioctl ctr stop --id "$ctr_id" + run crictl stop "$ctr_id" echo "$output" [ "$status" -eq 1 ] [[ "${output}" =~ "not found" ]] @@ -175,16 +177,16 @@ function teardown() { @test "crio restore with bad state and pod removed" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] @@ -195,7 +197,7 @@ function teardown() { start_crio - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] @@ -205,22 +207,22 @@ function teardown() { @test "crio restore with bad state" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl pod status --id "$pod_id" + run crictl inspects "$pod_id" echo "$output" [ "$status" -eq 0 ] [[ "${output}" =~ "SANDBOX_READY" ]] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] [[ "${output}" =~ "CONTAINER_CREATED" ]] @@ -231,33 +233,34 @@ function teardown() { for i in $("$RUNTIME" list -q | xargs); do "$RUNTIME" delete -f $i; done start_crio - run crioctl pod list + run crictl sandboxes --quiet echo "$output" [ "$status" -eq 0 ] [[ "${output}" != "" ]] [[ "${output}" =~ "${pod_id}" ]] - run crioctl pod status --id "$pod_id" + run crictl inspects "$pod_id" echo "$output" [ "$status" -eq 0 ] [[ "${output}" =~ "SANDBOX_NOTREADY" ]] - run crioctl ctr list + run crictl ps --quiet echo "$output" [ "$status" -eq 0 ] [[ "${output}" != "" ]] [[ "${output}" =~ "${ctr_id}" ]] - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] [[ "${output}" =~ "CONTAINER_EXITED" ]] - [[ "${output}" =~ "Exit Code: 255" ]] + # TODO: may be cri-tool should display Exit Code + #[[ "${output}" =~ "Exit Code: 255" ]] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] diff --git a/test/runtimeversion.bats b/test/runtimeversion.bats index f0d2a436..81d0b531 100644 --- a/test/runtimeversion.bats +++ b/test/runtimeversion.bats @@ -6,9 +6,9 @@ function teardown() { cleanup_test } -@test "crioctl runtimeversion" { +@test "crictl runtimeversion" { start_crio - run crioctl runtimeversion + run crictl info echo "$output" [ "$status" -eq 0 ] stop_crio diff --git a/test/seccomp.bats b/test/seccomp.bats deleted file mode 100644 index 1c6229dc..00000000 --- a/test/seccomp.bats +++ /dev/null @@ -1,330 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -function teardown() { - cleanup_test -} - -# 1. test running with ctr unconfined -# test that we can run with a syscall which would be otherwise blocked -@test "ctr seccomp profiles unconfined" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - start_crio "$TESTDIR"/seccomp_profile1.json - - sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/container\/k8s_testname_seccomp_1_redhat\.test\.crio_redhat-test-crio_0": "unconfined"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp1.json - run crioctl pod run --name seccomp1 --config "$TESTDIR"/seccomp1.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --name testname --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" chmod 777 . - echo "$output" - [ "$status" -eq 0 ] - - cleanup_ctrs - cleanup_pods - stop_crio -} - -# 2. test running with ctr runtime/default -# test that we cannot run with a syscall blocked by the default seccomp profile -@test "ctr seccomp profiles runtime/default" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - start_crio "$TESTDIR"/seccomp_profile1.json - - sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/container\/k8s_testname2_seccomp2_redhat\.test\.crio_redhat-test-crio_0": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp2.json - run crioctl pod run --name seccomp2 --config "$TESTDIR"/seccomp2.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --name testname2 --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" chmod 777 . - echo "$output" - [ "$status" -eq 0 ] - [[ "$output" =~ "Exit code: 1" ]] - [[ "$output" =~ "Operation not permitted" ]] - - cleanup_ctrs - cleanup_pods - stop_crio -} - -# 3. test running with ctr wrong profile name -@test "ctr seccomp profiles wrong profile name" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - start_crio "$TESTDIR"/seccomp_profile1.json - - sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/container\/k8s_testname3_seccomp3_redhat\.test\.crio_redhat-test-crio_1": "notgood"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp3.json - run crioctl pod run --name seccomp3 --config "$TESTDIR"/seccomp3.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --name testname3 --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -ne 0 ] - [[ "$output" =~ "unknown seccomp profile option:" ]] - [[ "$output" =~ "notgood" ]] - - cleanup_ctrs - cleanup_pods - stop_crio -} - -# TODO(runcom): need https://issues.k8s.io/36997 -# 4. test running with ctr localhost/profile_name -@test "ctr seccomp profiles localhost/profile_name" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - #sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - #sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - #sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - #start_crio "$TESTDIR"/seccomp_profile1.json - - skip "need https://issues.k8s.io/36997" -} - -# 5. test running with unkwown ctr profile falls back to pod profile -# unknown ctr -> unconfined -# pod -> runtime/default -# result: fail chmod -@test "ctr seccomp profiles falls back to pod profile" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - start_crio "$TESTDIR"/seccomp_profile1.json - - sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/container\/redhat\.test\.crio-seccomp2-1-testname2-0-not-exists": "unconfined", "security\.alpha\.kubernetes\.io\/seccomp\/pod": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp5.json - run crioctl pod run --name seccomp5 --config "$TESTDIR"/seccomp5.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" chmod 777 . - echo "$output" - [ "$status" -eq 0 ] - [[ "$output" =~ "Exit code: 1" ]] - [[ "$output" =~ "Operation not permitted" ]] - - cleanup_ctrs - cleanup_pods - stop_crio -} - -# 6. test running with unkwown ctr profile and no pod, falls back to unconfined -# unknown ctr -> runtime/default -# pod -> NO -# result: success, running unconfined -@test "ctr seccomp profiles falls back to unconfined" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - start_crio "$TESTDIR"/seccomp_profile1.json - - sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/container\/redhat\.test\.crio-seccomp6-1-testname6-0-not-exists": "runtime-default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp6.json - run crioctl pod run --name seccomp6 --config "$TESTDIR"/seccomp6.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --name testname6 --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" chmod 777 . - echo "$output" - [ "$status" -eq 0 ] - - cleanup_ctrs - cleanup_pods - stop_crio -} - -# 1. test running with pod unconfined -# test that we can run with a syscall which would be otherwise blocked -@test "pod seccomp profiles unconfined" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - start_crio "$TESTDIR"/seccomp_profile1.json - - sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/pod": "unconfined"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp1.json - run crioctl pod run --name seccomp1 --config "$TESTDIR"/seccomp1.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" chmod 777 . - echo "$output" - [ "$status" -eq 0 ] - - cleanup_ctrs - cleanup_pods - stop_crio -} - -# 2. test running with pod runtime/default -# test that we cannot run with a syscall blocked by the default seccomp profile -@test "pod seccomp profiles runtime/default" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - start_crio "$TESTDIR"/seccomp_profile1.json - - sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/pod": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp2.json - run crioctl pod run --name seccomp2 --config "$TESTDIR"/seccomp2.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" chmod 777 . - echo "$output" - [ "$status" -eq 0 ] - [[ "$output" =~ "Exit code: 1" ]] - [[ "$output" =~ "Operation not permitted" ]] - - cleanup_ctrs - cleanup_pods - stop_crio -} - -# 3. test running with pod wrong profile name -@test "pod seccomp profiles wrong profile name" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - start_crio "$TESTDIR"/seccomp_profile1.json - - # 3. test running with pod wrong profile name - sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/pod": "notgood"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp3.json - run crioctl pod run --name seccomp3 --config "$TESTDIR"/seccomp3.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -ne 0 ] - [[ "$output" =~ "unknown seccomp profile option:" ]] - [[ "$output" =~ "notgood" ]] - - cleanup_ctrs - cleanup_pods - stop_crio -} - -# TODO(runcom): need https://issues.k8s.io/36997 -# 4. test running with pod localhost/profile_name -@test "pod seccomp profiles localhost/profile_name" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - #sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - #sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - #sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - #start_crio "$TESTDIR"/seccomp_profile1.json - - skip "need https://issues.k8s.io/36997" -} diff --git a/test/selinux.bats b/test/selinux.bats new file mode 100644 index 00000000..b876afc1 --- /dev/null +++ b/test/selinux.bats @@ -0,0 +1,26 @@ +#!/usr/bin/env bats + +load helpers + +function teardown() { + cleanup_test +} + +@test "ctr termination reason Completed" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config_selinux.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config_selinux.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + + cleanup_ctrs + cleanup_pods + stop_crio +} diff --git a/test/testdata/README.md b/test/testdata/README.md index afc6b32f..780d5713 100644 --- a/test/testdata/README.md +++ b/test/testdata/README.md @@ -5,11 +5,11 @@ sudo ./crio In terminal 2: ``` -sudo ./crioctl runtimeversion +sudo ./crictl runtimeversion sudo rm -rf /var/lib/containers/storage/sandboxes/podsandbox1 -sudo ./crioctl pod run --config testdata/sandbox_config.json +sudo ./crictl runs testdata/sandbox_config.json sudo rm -rf /var/lib/containers/storage/containers/container1 -sudo ./crioctl container create --pod podsandbox1 --config testdata/container_config.json +sudo ./crictl create podsandbox1 testdata/container_config.json testdata/sandbox_config.json ``` diff --git a/test/testdata/container_config.json b/test/testdata/container_config.json index 28936dff..d8ef76a5 100644 --- a/test/testdata/container_config.json +++ b/test/testdata/container_config.json @@ -51,6 +51,12 @@ }, "security_context": { "readonly_rootfs": false, + "selinux_options": { + "user": "system_u", + "role": "system_r", + "type": "svirt_lxc_net_t", + "level": "s0:c4,c5" + }, "capabilities": { "add_capabilities": [ "setuid", @@ -58,12 +64,6 @@ ], "drop_capabilities": [ ] - }, - "selinux_options": { - "user": "system_u", - "role": "system_r", - "type": "container_t", - "level": "s0:c4,c5" } } } diff --git a/test/testdata/container_config_by_imageid.json b/test/testdata/container_config_by_imageid.json index 25b2b13a..d953efb3 100644 --- a/test/testdata/container_config_by_imageid.json +++ b/test/testdata/container_config_by_imageid.json @@ -62,7 +62,7 @@ "selinux_options": { "user": "system_u", "role": "system_r", - "type": "container_t", + "type": "svirt_lxc_net_t", "level": "s0:c4,c5" } } diff --git a/test/testdata/container_config_hostport.json b/test/testdata/container_config_hostport.json index ddc0014e..e5a0ca67 100644 --- a/test/testdata/container_config_hostport.json +++ b/test/testdata/container_config_hostport.json @@ -64,7 +64,7 @@ "selinux_options": { "user": "system_u", "role": "system_r", - "type": "container_t", + "type": "svirt_lxc_net_t", "level": "s0:c4,c5" } } diff --git a/test/testdata/container_config_logging.json b/test/testdata/container_config_logging.json index 92772818..8e8d0da4 100644 --- a/test/testdata/container_config_logging.json +++ b/test/testdata/container_config_logging.json @@ -64,7 +64,7 @@ "selinux_options": { "user": "system_u", "role": "system_r", - "type": "container_t", + "type": "svirt_lxc_net_t", "level": "s0:c4,c5" } } diff --git a/test/testdata/container_config_resolvconf.json b/test/testdata/container_config_resolvconf.json index ad22cf81..80093db1 100644 --- a/test/testdata/container_config_resolvconf.json +++ b/test/testdata/container_config_resolvconf.json @@ -1,6 +1,6 @@ { "metadata": { - "name": "container1", + "name": "test-resolve", "attempt": 1 }, "image": { @@ -64,7 +64,7 @@ "selinux_options": { "user": "system_u", "role": "system_r", - "type": "container_t", + "type": "svirt_lxc_net_t", "level": "s0:c4,c5" } } diff --git a/test/testdata/container_config_resolvconf_ro.json b/test/testdata/container_config_resolvconf_ro.json index 8e866488..faa0f303 100644 --- a/test/testdata/container_config_resolvconf_ro.json +++ b/test/testdata/container_config_resolvconf_ro.json @@ -1,6 +1,6 @@ { "metadata": { - "name": "container1", + "name": "test-resolve-ro", "attempt": 1 }, "image": { @@ -64,7 +64,7 @@ "selinux_options": { "user": "system_u", "role": "system_r", - "type": "container_t", + "type": "svirt_lxc_net_t", "level": "s0:c4,c5" } } diff --git a/test/testdata/container_config_seccomp.json b/test/testdata/container_config_seccomp.json index 9054a2c6..6097050a 100644 --- a/test/testdata/container_config_seccomp.json +++ b/test/testdata/container_config_seccomp.json @@ -6,13 +6,11 @@ "image": { "image": "redis:alpine" }, - "command": [ - "/bin/bash" - ], "args": [ - "/bin/chmod", "777", "." + "docker-entrypoint.sh", + "redis-server" ], - "working_dir": "/", + "working_dir": "/data", "envs": [ { "key": "PATH", @@ -53,6 +51,7 @@ "oom_score_adj": 30 }, "security_context": { + "seccomp_profile_path": "%VALUE%", "capabilities": { "add_capabilities": [ "setuid", @@ -65,7 +64,7 @@ "user": "system_u", "role": "system_r", "type": "svirt_lxc_net_t", - "level": "s0:c4-c5" + "level": "s0:c4,c5" } } } diff --git a/test/testdata/container_config_sleep.json b/test/testdata/container_config_sleep.json new file mode 100644 index 00000000..c86ff701 --- /dev/null +++ b/test/testdata/container_config_sleep.json @@ -0,0 +1,71 @@ +{ + "metadata": { + "name": "container999", + "attempt": 1 + }, + "image": { + "image": "docker.io/library/busybox:latest" + }, + "command": [ + "sleep", + "9999" + ], + "args": [], + "working_dir": "/", + "envs": [ + { + "key": "PATH", + "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + }, + { + "key": "TERM", + "value": "xterm" + }, + { + "key": "TESTDIR", + "value": "test/dir1" + }, + { + "key": "TESTFILE", + "value": "test/file1" + } + ], + "labels": { + "type": "small", + "batch": "no" + }, + "annotations": { + "owner": "dragon", + "daemon": "crio" + }, + "privileged": true, + "log_path": "", + "stdin": false, + "stdin_once": false, + "tty": false, + "linux": { + "resources": { + "cpu_period": 10000, + "cpu_quota": 20000, + "cpu_shares": 512, + "oom_score_adj": 30 + }, + "security_context": { + "readonly_rootfs": false, + "selinux_options": { + "user": "system_u", + "role": "system_r", + "type": "svirt_lxc_net_t", + "level": "s0:c4,c5" + }, + "capabilities": { + "add_capabilities": [ + "setuid", + "setgid" + ], + "drop_capabilities": [ + ] + } + } + } +} diff --git a/test/testdata/container_redis.json b/test/testdata/container_redis.json index 638aba4f..39a0865b 100644 --- a/test/testdata/container_redis.json +++ b/test/testdata/container_redis.json @@ -45,10 +45,13 @@ "tty": false, "linux": { "resources": { + "memory_limit_in_bytes": 209715200, "cpu_period": 10000, "cpu_quota": 20000, "cpu_shares": 512, - "oom_score_adj": 30 + "oom_score_adj": 30, + "cpuset_cpus": "0-1", + "cpuset_mems": "0" }, "security_context": { "capabilities": { diff --git a/test/testdata/container_redis_default_mounts.json b/test/testdata/container_redis_default_mounts.json new file mode 100644 index 00000000..dff3db5a --- /dev/null +++ b/test/testdata/container_redis_default_mounts.json @@ -0,0 +1,67 @@ +{ + "metadata": { + "name": "podsandbox1-redis" + }, + "image": { + "image": "redis:alpine" + }, + "args": [ + "docker-entrypoint.sh", + "redis-server" + ], + "mounts": [ + { + "container_path": "%CPATH%", + "host_path": "%HPATH%" + } + ], + "working_dir": "/data", + "envs": [ + { + "key": "PATH", + "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + }, + { + "key": "TERM", + "value": "xterm" + }, + { + "key": "REDIS_VERSION", + "value": "3.2.3" + }, + { + "key": "REDIS_DOWNLOAD_URL", + "value": "http://download.redis.io/releases/redis-3.2.3.tar.gz" + }, + { + "key": "REDIS_DOWNLOAD_SHA1", + "value": "92d6d93ef2efc91e595c8bf578bf72baff397507" + } + ], + "labels": { + "tier": "backend" + }, + "annotations": { + "pod": "podsandbox1" + }, + "readonly_rootfs": false, + "log_path": "", + "stdin": false, + "stdin_once": false, + "tty": false, + "linux": { + "resources": { + "cpu_period": 10000, + "cpu_quota": 20000, + "cpu_shares": 512, + "oom_score_adj": 30 + }, + "security_context": { + "capabilities": { + "add_capabilities": [ + "sys_admin" + ] + } + } + } +} diff --git a/test/testdata/container_redis_env_custom.json b/test/testdata/container_redis_env_custom.json new file mode 100644 index 00000000..3ec41001 --- /dev/null +++ b/test/testdata/container_redis_env_custom.json @@ -0,0 +1,62 @@ +{ + "metadata": { + "name": "podsandbox1-redis" + }, + "image": { + "image": "redis:alpine" + }, + "args": [ + "docker-entrypoint.sh", + "redis-server" + ], + "working_dir": "/data", + "envs": [ + { + "key": "PATH", + "value": "/acustompathinpath:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + }, + { + "key": "TERM", + "value": "xterm" + }, + { + "key": "REDIS_VERSION", + "value": "3.2.3" + }, + { + "key": "REDIS_DOWNLOAD_URL", + "value": "http://download.redis.io/releases/redis-3.2.3.tar.gz" + }, + { + "key": "REDIS_DOWNLOAD_SHA1", + "value": "92d6d93ef2efc91e595c8bf578bf72baff397507" + } + ], + "labels": { + "tier": "backend" + }, + "annotations": { + "pod": "podsandbox1" + }, + "readonly_rootfs": false, + "log_path": "", + "stdin": false, + "stdin_once": false, + "tty": false, + "linux": { + "resources": { + "memory_limit_in_bytes": 209715200, + "cpu_period": 10000, + "cpu_quota": 20000, + "cpu_shares": 512, + "oom_score_adj": 30 + }, + "security_context": { + "capabilities": { + "add_capabilities": [ + "sys_admin" + ] + } + } + } +} diff --git a/test/testdata/sandbox1_config.json b/test/testdata/sandbox1_config.json new file mode 100644 index 00000000..23b9a67e --- /dev/null +++ b/test/testdata/sandbox1_config.json @@ -0,0 +1,51 @@ +{ + "metadata": { + "name": "podsandbox1", + "uid": "redhat-test-crio-1", + "namespace": "redhat.test.crio", + "attempt": 1 + }, + "hostname": "crictl_host", + "log_directory": "", + "dns_config": { + "searches": [ + "8.8.8.8" + ] + }, + "port_mappings": [], + "resources": { + "cpu": { + "limits": 3, + "requests": 2 + }, + "memory": { + "limits": 50000000, + "requests": 2000000 + } + }, + "labels": { + "name": "podsandbox1", + "group": "test", + "version": "v1.0.0" + }, + "annotations": { + "owner": "hmeng", + "security.alpha.kubernetes.io/seccomp/pod": "unconfined" + }, + "linux": { + "cgroup_parent": "/Burstable/pod_123-456", + "security_context": { + "namespace_options": { + "host_network": false, + "host_pid": false, + "host_ipc": false + }, + "selinux_options": { + "user": "system_u", + "role": "system_r", + "type": "svirt_lxc_net_t", + "level": "s0:c4,c5" + } + } + } +} diff --git a/test/testdata/sandbox2_config.json b/test/testdata/sandbox2_config.json new file mode 100644 index 00000000..7f76e628 --- /dev/null +++ b/test/testdata/sandbox2_config.json @@ -0,0 +1,51 @@ +{ + "metadata": { + "name": "podsandbox2", + "uid": "redhat-test-crio-2", + "namespace": "redhat.test.crio", + "attempt": 1 + }, + "hostname": "crictl_host", + "log_directory": "", + "dns_config": { + "searches": [ + "8.8.8.8" + ] + }, + "port_mappings": [], + "resources": { + "cpu": { + "limits": 3, + "requests": 2 + }, + "memory": { + "limits": 50000000, + "requests": 2000000 + } + }, + "labels": { + "name": "podsandbox2", + "group": "test", + "version": "v1.0.0" + }, + "annotations": { + "owner": "hmeng", + "security.alpha.kubernetes.io/seccomp/pod": "unconfined" + }, + "linux": { + "cgroup_parent": "/Burstable/pod_123-456", + "security_context": { + "namespace_options": { + "host_network": false, + "host_pid": false, + "host_ipc": false + }, + "selinux_options": { + "user": "system_u", + "role": "system_r", + "type": "svirt_lxc_net_t", + "level": "s0:c4,c5" + } + } + } +} diff --git a/test/testdata/sandbox3_config.json b/test/testdata/sandbox3_config.json new file mode 100644 index 00000000..8b15a422 --- /dev/null +++ b/test/testdata/sandbox3_config.json @@ -0,0 +1,51 @@ +{ + "metadata": { + "name": "podsandbox3", + "uid": "redhat-test-crio-3", + "namespace": "redhat.test.crio", + "attempt": 1 + }, + "hostname": "crictl_host", + "log_directory": "", + "dns_config": { + "searches": [ + "8.8.8.8" + ] + }, + "port_mappings": [], + "resources": { + "cpu": { + "limits": 3, + "requests": 2 + }, + "memory": { + "limits": 50000000, + "requests": 2000000 + } + }, + "labels": { + "name": "podsandbox3", + "group": "test", + "version": "v1.1.0" + }, + "annotations": { + "owner": "hmeng", + "security.alpha.kubernetes.io/seccomp/pod": "unconfined" + }, + "linux": { + "cgroup_parent": "/Burstable/pod_123-456", + "security_context": { + "namespace_options": { + "host_network": false, + "host_pid": false, + "host_ipc": false + }, + "selinux_options": { + "user": "system_u", + "role": "system_r", + "type": "svirt_lxc_net_t", + "level": "s0:c4,c5" + } + } + } +} diff --git a/test/testdata/sandbox_config.json b/test/testdata/sandbox_config.json index 5f9e6871..c424f748 100644 --- a/test/testdata/sandbox_config.json +++ b/test/testdata/sandbox_config.json @@ -5,7 +5,7 @@ "namespace": "redhat.test.crio", "attempt": 1 }, - "hostname": "crioctl_host", + "hostname": "crictl_host", "log_directory": "", "dns_config": { "searches": [ @@ -28,8 +28,6 @@ }, "annotations": { "owner": "hmeng", - "security.alpha.kubernetes.io/sysctls": "kernel.shm_rmid_forced=1,net.ipv4.ip_local_port_range=1024 65000", - "security.alpha.kubernetes.io/unsafe-sysctls": "kernel.msgmax=8192" , "security.alpha.kubernetes.io/seccomp/pod": "unconfined" }, "linux": { @@ -39,6 +37,12 @@ "host_network": false, "host_pid": false, "host_ipc": false + }, + "selinux_options": { + "user": "system_u", + "role": "system_r", + "type": "svirt_lxc_net_t", + "level": "s0:c4,c5" } } } diff --git a/test/testdata/sandbox_config_hostnet.json b/test/testdata/sandbox_config_hostnet.json index 99a7560d..a035e56a 100644 --- a/test/testdata/sandbox_config_hostnet.json +++ b/test/testdata/sandbox_config_hostnet.json @@ -5,7 +5,7 @@ "namespace": "redhat.test.crio", "attempt": 1 }, - "hostname": "crioctl_host", + "hostname": "crictl_host", "log_directory": "", "dns_options": { "servers": [ @@ -32,7 +32,6 @@ }, "annotations": { "owner": "hmeng", - "security.alpha.kubernetes.io/unsafe-sysctls": "kernel.msgmax=8192" , "security.alpha.kubernetes.io/seccomp/pod": "unconfined" }, "linux": { diff --git a/test/testdata/sandbox_config_hostport.json b/test/testdata/sandbox_config_hostport.json index 5feda866..0d39dcc4 100644 --- a/test/testdata/sandbox_config_hostport.json +++ b/test/testdata/sandbox_config_hostport.json @@ -5,7 +5,7 @@ "namespace": "redhat.test.crio", "attempt": 1 }, - "hostname": "crioctl_host", + "hostname": "crictl_host", "log_directory": "", "dns_options": { "servers": [ @@ -38,8 +38,6 @@ }, "annotations": { "owner": "hmeng", - "security.alpha.kubernetes.io/sysctls": "kernel.shm_rmid_forced=1,net.ipv4.ip_local_port_range=1024 65000", - "security.alpha.kubernetes.io/unsafe-sysctls": "kernel.msgmax=8192" , "security.alpha.kubernetes.io/seccomp/pod": "unconfined" }, "linux": { diff --git a/test/testdata/sandbox_config_seccomp.json b/test/testdata/sandbox_config_seccomp.json index eb272406..702cfc3b 100644 --- a/test/testdata/sandbox_config_seccomp.json +++ b/test/testdata/sandbox_config_seccomp.json @@ -5,7 +5,7 @@ "namespace": "redhat.test.crio", "attempt": 1 }, - "hostname": "crioctl_host", + "hostname": "crictl_host", "log_directory": "", "dns_options": { "servers": [ @@ -32,22 +32,22 @@ }, "annotations": { "owner": "hmeng" - %VALUE% }, "linux": { "cgroup_parent": "/Burstable/pod_123-456", "security_context": { + "seccomp_profile_path": "%VALUE%", "namespace_options": { "host_network": false, "host_pid": false, "host_ipc": false + }, + "selinux_options": { + "user": "system_u", + "role": "system_r", + "type": "svirt_lxc_net_t", + "level": "s0:c4,c5" } - }, - "selinux_options": { - "user": "system_u", - "role": "system_r", - "type": "container_t", - "level": "s0:c1,c2" } } } diff --git a/test/testdata/sandbox_config_selinux.json b/test/testdata/sandbox_config_selinux.json new file mode 100644 index 00000000..57cecacd --- /dev/null +++ b/test/testdata/sandbox_config_selinux.json @@ -0,0 +1,46 @@ +{ + "metadata": { + "name": "podsandbox1", + "uid": "redhat-test-crio", + "namespace": "redhat.test.crio", + "attempt": 1 + }, + "hostname": "crictl_host", + "log_directory": "", + "dns_config": { + "searches": [ + "8.8.8.8" + ] + }, + "port_mappings": [], + "resources": { + "cpu": { + "limits": 3, + "requests": 2 + }, + "memory": { + "limits": 50000000, + "requests": 2000000 + } + }, + "labels": { + "group": "test" + }, + "annotations": { + "owner": "hmeng", + "security.alpha.kubernetes.io/seccomp/pod": "unconfined" + }, + "linux": { + "cgroup_parent": "/Burstable/pod_123-456", + "security_context": { + "namespace_options": { + "host_network": false, + "host_pid": false, + "host_ipc": false + }, + "selinux_options": { + "level": "s0" + } + } + } +} diff --git a/test/testdata/sandbox_config_sysctl.json b/test/testdata/sandbox_config_sysctl.json new file mode 100644 index 00000000..4146dc4f --- /dev/null +++ b/test/testdata/sandbox_config_sysctl.json @@ -0,0 +1,54 @@ +{ + "metadata": { + "name": "podsandbox1", + "uid": "redhat-test-crio", + "namespace": "redhat.test.crio", + "attempt": 1 + }, + "hostname": "crictl_host", + "log_directory": "", + "dns_config": { + "searches": [ + "8.8.8.8" + ] + }, + "port_mappings": [], + "resources": { + "cpu": { + "limits": 3, + "requests": 2 + }, + "memory": { + "limits": 50000000, + "requests": 2000000 + } + }, + "labels": { + "group": "test" + }, + "annotations": { + "owner": "hmeng", + "security.alpha.kubernetes.io/seccomp/pod": "unconfined" + }, + "linux": { + "sysctls": { + "kernel.shm_rmid_forced": "1", + "net.ipv4.ip_local_port_range": "1024 65000", + "kernel.msgmax": "8192" + }, + "cgroup_parent": "/Burstable/pod_123-456", + "security_context": { + "namespace_options": { + "host_network": false, + "host_pid": false, + "host_ipc": false + }, + "selinux_options": { + "user": "system_u", + "role": "system_r", + "type": "svirt_lxc_net_t", + "level": "s0:c4,c5" + } + } + } +} diff --git a/test/testdata/template_container_config.json b/test/testdata/template_container_config.json new file mode 100644 index 00000000..a770a7c9 --- /dev/null +++ b/test/testdata/template_container_config.json @@ -0,0 +1,68 @@ +{ + "metadata": { + "name": "${NAME}", + "attempt": 1 + }, + "image": { + "image": "${IMAGE}" + }, + "command": ${COMMAND}, + "args": [], + "working_dir": "/", + "envs": [ + { + "key": "PATH", + "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + }, + { + "key": "TERM", + "value": "xterm" + }, + { + "key": "TESTDIR", + "value": "test/dir1" + }, + { + "key": "TESTFILE", + "value": "test/file1" + } + ], + "labels": { + "type": "small", + "batch": "no" + }, + "annotations": { + "owner": "dragon", + "daemon": "crio" + }, + "privileged": true, + "log_path": "", + "stdin": false, + "stdin_once": false, + "tty": false, + "linux": { + "resources": { + "cpu_period": 10000, + "cpu_quota": 20000, + "cpu_shares": 512, + "oom_score_adj": 30 + }, + "security_context": { + "readonly_rootfs": false, + "selinux_options": { + "user": "system_u", + "role": "system_r", + "type": "svirt_lxc_net_t", + "level": "s0:c4,c5" + }, + "capabilities": { + "add_capabilities": [ + "setuid", + "setgid" + ], + "drop_capabilities": [ + ] + } + } + } +} diff --git a/test/testdata/template_sandbox_config.json b/test/testdata/template_sandbox_config.json new file mode 100644 index 00000000..c2f3f197 --- /dev/null +++ b/test/testdata/template_sandbox_config.json @@ -0,0 +1,49 @@ +{ + "metadata": { + "name": "${NAME}", + "uid": "${CUID}", + "namespace": "${NAMESPACE}", + "attempt": 1 + }, + "hostname": "crictl_host", + "log_directory": "", + "dns_config": { + "searches": [ + "8.8.8.8" + ] + }, + "port_mappings": [], + "resources": { + "cpu": { + "limits": 3, + "requests": 2 + }, + "memory": { + "limits": 50000000, + "requests": 2000000 + } + }, + "labels": { + "group": "test" + }, + "annotations": { + "owner": "hmeng", + "security.alpha.kubernetes.io/seccomp/pod": "unconfined" + }, + "linux": { + "cgroup_parent": "/Burstable/pod_123-456", + "security_context": { + "namespace_options": { + "host_network": false, + "host_pid": false, + "host_ipc": false + }, + "selinux_options": { + "user": "system_u", + "role": "system_r", + "type": "svirt_lxc_net_t", + "level": "s0:c4,c5" + } + } + } +} diff --git a/transfer.md b/transfer.md index d5ad3950..8f6379b4 100644 --- a/transfer.md +++ b/transfer.md @@ -1,14 +1,14 @@ -# cri-o Usage Transfer +# CRI-O Usage Transfer -This document outlines useful information for ops and dev transfer as it relates to infrastructure that utilizes cri-o. +This document outlines useful information for ops and dev transfer as it relates to infrastructure that utilizes CRI-O. ## Operational Transfer ## Abstract -The `crio` daemon is intended to provide the [CRI](https://github.com/kubernetes/community/blob/master/contributors/devel/container-runtime-interface.md) socket needed for Kubernetes to use for automating deployment, scaling, and management of containerized applications (See the document for [configuring kubernetes to use cri-o](./kubernetes.md) for more information on that). -Therefore the `crioctl` command line is a client that interfaces to the same grpc socket as the kubernetes daemon would, for talking to the `crio` daemon. -In many ways `crioctl` is only as feature rich as the Kubernetes CRI requires. +The `crio` daemon is intended to provide the [CRI](https://github.com/kubernetes/community/blob/master/contributors/devel/container-runtime-interface.md) socket needed for Kubernetes to use for automating deployment, scaling, and management of containerized applications (See the document for [configuring kubernetes to use CRI-O](./kubernetes.md) for more information on that). +Therefore the [crictl](https://github.com/kubernetes-incubator/cri-tools) command line is a client that interfaces to the same grpc socket as the kubernetes daemon would, for talking to the `crio` daemon. +In many ways [crictl](https://github.com/kubernetes-incubator/cri-tools) is only as feature rich as the Kubernetes CRI requires. There are additional tools e.g. `kpod` and [`buildah`](https://github.com/projectatomic/buildah) that provide a feature rich set of commands for all operational needs in a Kubernetes environment. @@ -20,16 +20,16 @@ As well as some systemd helpers like `systemd-cgls` and `systemd-cgtop` are stil ## Equivalents For many troubleshooting and information collection steps, there may be an existing pattern. -Following provides equivalent with cri-o tools for gathering information or jumping into containers, for operational use. +Following provides equivalent with CRI-O tools for gathering information or jumping into containers, for operational use. | Existing Step | CRI-O (and friends) | | :---: | :---: | -| `docker exec` | [`crioctl ctr exec`](./docs/crio.8.md) | +| `docker exec` | [`crictl exec`](https://github.com/kubernetes-incubator/cri-tools/blob/master/docs/crictl.md) | | `docker info` | [`kpod info`](./docs/kpod-info.1.md) | | `docker inspect` | [`kpod inspect`](./docs/kpod-inspect.1.md) | | `docker logs` | [`kpod logs`](./docs/kpod-logs.1.md) | -| `docker ps` | [`crioctl ctr list`](./docs/crio.8.md) or [`runc list`](https://github.com/opencontainers/runc/blob/master/man/runc-list.8.md) | -| `docker stats` | [`kpod stats`](./docs/kpod-stats.1.md) or `crioctl ctr status`| +| `docker ps` | [`crictl ps`](https://github.com/kubernetes-incubator/cri-tools/blob/master/docs/crictl.md) or [`runc list`](https://github.com/opencontainers/runc/blob/master/man/runc-list.8.md) | +| `docker stats` | [`kpod stats`](./docs/kpod-stats.1.md) | If you were already using steps like `kubectl exec` (or `oc exec` on OpenShift), they will continue to function the same way. @@ -39,18 +39,32 @@ There are other equivalents for these tools | Existing Step | CRI-O (and friends) | | :---: | :---: | +| `docker attach` | [`kpod exec`](./docs/kpod-attach.1.md) ***| | `docker build` | [`buildah bud`](https://github.com/projectatomic/buildah/blob/master/docs/buildah-bud.md) | -| `docker cp` | [`kpod mount`](./docs/kpod-cp.1.md) | +| `docker cp` | [`kpod mount`](./docs/kpod-cp.1.md) **** | +| `docker create` | [`kpod create`](./docs/kpod-create.1.md) | | `docker diff` | [`kpod diff`](./docs/kpod-diff.1.md) | | `docker export` | [`kpod export`](./docs/kpod-export.1.md) | | `docker history`| [`kpod history`](./docs/kpod-history.1.md)| | `docker images` | [`kpod images`](./docs/kpod-images.1.md) | +| `docker kill` | [`kpod kill`](./docs/kpod-kill.1.md) | | `docker load` | [`kpod load`](./docs/kpod-load.1.md) | +| `docker login` | [`kpod login`](./docs/kpod-login.1.md) | +| `docker logout` | [`kpod logout`](./docs/kpod-logout.1.md) | +| `docker pause` | [`kpod pause`](./docs/kpod-pause.1.md) | | `docker ps` | [`kpod ps`](./docs/kpod-ps.1.md) | | `docker pull` | [`kpod pull`](./docs/kpod-pull.1.md) | | `docker push` | [`kpod push`](./docs/kpod-push.1.md) | | `docker rename` | [`kpod rename`](./docs/kpod-rename.1.md) | +| `docker rm` | [`kpod rm`](./docs/kpod-rm.1.md) | | `docker rmi` | [`kpod rmi`](./docs/kpod-rmi.1.md) | +| `docker run` | [`kpod run`](./docs/kpod-run.1.md) | | `docker save` | [`kpod save`](./docs/kpod-save.1.md) | +| `docker stop` | [`kpod stop`](./docs/kpod-stop.1.md) | | `docker tag` | [`kpod tag`](./docs/kpod-tag.1.md) | +| `docker unpause`| [`kpod unpause`](./docs/kpod-unpause.1.md)| | `docker version`| [`kpod version`](./docs/kpod-version.1.md)| +| `docker wait` | [`kpod wait`](./docs/kpod-wait.1.md) | + +*** Use `kpod exec` to enter a container and `kpod logs` to view the output of pid 1 of a container. +**** Use mount to take advantage of the entire linux tool chain rather then just cp. Read [`here`](./docs/kpod-cp.1.md) for more information. diff --git a/tutorial.md b/tutorial.md index 94ba2672..5f89ccb8 100644 --- a/tutorial.md +++ b/tutorial.md @@ -1,10 +1,10 @@ -# cri-o Tutorial +# CRI-O Tutorial -This tutorial will walk you through the installation of [cri-o](https://github.com/kubernetes-incubator/cri-o), an Open Container Initiative-based implementation of [Kubernetes Container Runtime Interface](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/container-runtime-interface-v1.md), and the creation of [Redis](https://redis.io/) server running in a [Pod](http://kubernetes.io/docs/user-guide/pods/). +This tutorial will walk you through the installation of [CRI-O](https://github.com/kubernetes-incubator/cri-o), an Open Container Initiative-based implementation of [Kubernetes Container Runtime Interface](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/container-runtime-interface-v1.md), and the creation of [Redis](https://redis.io/) server running in a [Pod](http://kubernetes.io/docs/user-guide/pods/). ## Prerequisites -A Linux machine is required to download and build the `cri-o` components and run the commands in this tutorial. +A Linux machine is required to download and build the `CRI-O` components and run the commands in this tutorial. Create a machine running Ubuntu 16.10: @@ -26,7 +26,7 @@ gcloud compute ssh cri-o This section will walk you through installing the following components: * crio - The implementation of the Kubernetes CRI, which manages Pods. -* crioctl - The crio client for testing. +* crictl - The CRI client for testing. * cni - The Container Network Interface * runc - The OCI runtime to launch the container @@ -36,17 +36,17 @@ This section will walk you through installing the following components: Download the `runc` release binary: ``` -wget https://github.com/opencontainers/runc/releases/download/v1.0.0-rc4/runc-linux-amd64 +wget https://github.com/opencontainers/runc/releases/download/v1.0.0-rc4/runc.amd64 ``` Set the executable bit and copy the `runc` binary into your PATH: ``` -chmod +x runc-linux-amd64 +chmod +x runc.amd64 ``` ``` -sudo mv runc-linux-amd64 /usr/bin/runc +sudo mv runc.amd64 /usr/bin/runc ``` Print the `runc` version: @@ -66,16 +66,16 @@ The `crio` project does not ship binary releases so you'll need to build it from #### Install the Go runtime and tool chain -Download the Go 1.7.4 binary release: +Download the Go 1.8.5 binary release: ``` -wget https://storage.googleapis.com/golang/go1.7.4.linux-amd64.tar.gz +wget https://storage.googleapis.com/golang/go1.8.5.linux-amd64.tar.gz ``` -Install Go 1.7.4: +Install Go 1.8.5: ``` -sudo tar -xvf go1.7.4.linux-amd64.tar.gz -C /usr/local/ +sudo tar -xvf go1.8.5.linux-amd64.tar.gz -C /usr/local/ ``` ``` @@ -90,20 +90,32 @@ export GOPATH=$HOME/go export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin ``` -At this point the Go 1.7.4 tool chain should be installed: +At this point the Go 1.8.5 tool chain should be installed: ``` go version ``` ``` -go version go1.7.4 linux/amd64 +go version go1.8.5 linux/amd64 +``` + +#### Get crictl + +``` +go get github.com/kubernetes-incubator/cri-tools/cmd/crictl ``` #### Build crio from source ``` -sudo apt-get install -y libglib2.0-dev libseccomp-dev libapparmor-dev +sudo apt-get update && apt-get install -y libglib2.0-dev \ + libseccomp-dev \ + libapparmor-dev \ + libgpgme11-dev \ + libdevmapper-dev \ + make \ + git ``` ``` @@ -126,33 +138,10 @@ make sudo make install ``` -Output: +If you are installing for the first time, generate and install configuration files with: ``` -install -D -m 755 kpod /usr/local/bin/kpod -install -D -m 755 crio /usr/local/bin/crio -install -D -m 755 crioctl /usr/local/bin/crioctl -install -D -m 755 conmon/conmon /usr/local/libexec/crio/conmon -install -D -m 755 pause/pause /usr/local/libexec/crio/pause -install -d -m 755 /usr/local/share/man/man{1,5,8} -install -m 644 docs/kpod.1 docs/kpod-launch.1 -t /usr/local/share/man/man1 -install -m 644 docs/crio.conf.5 -t /usr/local/share/man/man5 -install -m 644 docs/crio.8 -t /usr/local/share/man/man8 -install -D -m 644 crio.conf /etc/crio/crio.conf -install -D -m 644 seccomp.json /etc/crio/seccomp.json -``` - -If you are installing for the first time, generate config as follows: - -``` -make install.config -``` - -Output: - -``` -install -D -m 644 crio.conf /etc/crio/crio.conf -install -D -m 644 seccomp.json /etc/crio/seccomp.json +sudo make install.config ``` #### Start the crio system daemon @@ -163,7 +152,7 @@ Description=OCI-based implementation of Kubernetes Container Runtime Interface Documentation=https://github.com/kubernetes-incubator/cri-o [Service] -ExecStart=/usr/local/bin/crio --debug +ExecStart=/usr/local/bin/crio --log-level debug Restart=on-failure RestartSec=5 @@ -184,12 +173,19 @@ sudo systemctl start crio #### Ensure the crio service is running ``` -sudo crioctl runtimeversion +sudo crictl --runtime-endpoint /var/run/crio/crio.sock info ``` ``` -VersionResponse: Version: 0.1.0, RuntimeName: runc, RuntimeVersion: 1.0.0-rc4, RuntimeApiVersion: v1alpha1 +Version: 0.1.0 +RuntimeName: cri-o +RuntimeVersion: 1.9.0-dev +RuntimeApiVersion: v1alpha1 ``` +> to avoid set --runtime-endpoint when call crictl, +> you can export $CRI_RUNTIME_ENDPOINT=/var/run/crio/crio.sock +> or cp crictl.yaml /etc/crictl.yaml from this repo + ### CNI plugins This tutorial will use the latest version of `CNI` plugins from the master branch and build it from source. @@ -273,11 +269,25 @@ sudo sh -c 'cat >/etc/cni/net.d/99-loopback.conf <<-EOF EOF' ``` +Install `skopeo-containers` package from `ppa:projectatomic/ppa` + +``` +sudo add-apt-repository ppa:projectatomic/ppa +sudo apt-get update +sudo apt-get install skopeo-containers -y +``` + +Restart crio in order to apply CNI config + +``` +systemctl restart crio +``` + At this point `CNI` is installed and configured to allocation IP address to containers from the `10.88.0.0/16` subnet. ## Pod Tutorial -Now that the `cri-o` components have been installed and configured we are ready to create a Pod. This section will walk you through launching a Redis server in a Pod. Once the Redis server is running we'll use telnet to verify it's working, then we'll stop the Redis server and clean up the Pod. +Now that the `CRI-O` components have been installed and configured we are ready to create a Pod. This section will walk you through launching a Redis server in a Pod. Once the Redis server is running we'll use telnet to verify it's working, then we'll stop the Redis server and clean up the Pod. ### Creating a Pod @@ -290,15 +300,15 @@ cd $GOPATH/src/github.com/kubernetes-incubator/cri-o Next create the Pod and capture the Pod ID for later use: ``` -POD_ID=$(sudo crioctl pod run --config test/testdata/sandbox_config.json) +POD_ID=$(sudo crictl runp test/testdata/sandbox_config.json) ``` -> sudo crioctl pod run --config test/testdata/sandbox_config.json +> sudo crictl runp test/testdata/sandbox_config.json -Use the `crioctl` command to get the status of the Pod: +Use the `crictl` command to get the status of the Pod: ``` -sudo crioctl pod status --id $POD_ID +sudo crictl inspectp --output table $POD_ID ``` Output: @@ -324,27 +334,27 @@ Annotations: ### Create a Redis container inside the Pod -Use the `crioctl` command to pull the redis image, create a redis container from a container configuration and attach it to the Pod created earlier: +Use the `crictl` command to pull the redis image, create a redis container from a container configuration and attach it to the Pod created earlier: ``` -sudo crioctl image pull redis:alpine -CONTAINER_ID=$(sudo crioctl ctr create --pod $POD_ID --config test/testdata/container_redis.json) +sudo crictl pull redis:alpine +CONTAINER_ID=$(sudo crictl create $POD_ID test/testdata/container_redis.json test/testdata/sandbox_config.json) ``` -> sudo crioctl ctr create --pod $POD_ID --config test/testdata/container_redis.json +> sudo crictl create $POD_ID test/testdata/container_redis.json test/testdata/sandbox_config.json -The `crioctl ctr create` command will take a few seconds to return because the redis container needs to be pulled. +The `crictl create` command will take a few seconds to return because the redis container needs to be pulled. Start the Redis container: ``` -sudo crioctl ctr start --id $CONTAINER_ID +sudo crictl start $CONTAINER_ID ``` Get the status for the Redis container: ``` -sudo crioctl ctr status --id $CONTAINER_ID +sudo crictl inspect $CONTAINER_ID ``` Output: @@ -401,25 +411,25 @@ sudo journalctl -u crio --no-pager ### Stop the redis container and delete the Pod ``` -sudo crioctl ctr stop --id $CONTAINER_ID +sudo crictl stop $CONTAINER_ID ``` ``` -sudo crioctl ctr remove --id $CONTAINER_ID +sudo crictl rm $CONTAINER_ID ``` ``` -sudo crioctl pod stop --id $POD_ID +sudo crictl stops $POD_ID ``` ``` -sudo crioctl pod remove --id $POD_ID +sudo crictl rms $POD_ID ``` ``` -sudo crioctl pod list +sudo crictl sandboxes ``` ``` -sudo crioctl ctr list +sudo crictl ps ``` diff --git a/types/types.go b/types/types.go new file mode 100644 index 00000000..cedc3abd --- /dev/null +++ b/types/types.go @@ -0,0 +1,24 @@ +package types + +// ContainerInfo stores information about containers +type ContainerInfo struct { + Name string `json:"name"` + Pid int `json:"pid"` + Image string `json:"image"` + ImageRef string `json:"image_ref"` + CreatedTime int64 `json:"created_time"` + Labels map[string]string `json:"labels"` + Annotations map[string]string `json:"annotations"` + CrioAnnotations map[string]string `json:"crio_annotations"` + LogPath string `json:"log_path"` + Root string `json:"root"` + Sandbox string `json:"sandbox"` + IP string `json:"ip_address"` +} + +// CrioInfo stores information about the crio daemon +type CrioInfo struct { + StorageDriver string `json:"storage_driver"` + StorageRoot string `json:"storage_root"` + CgroupDriver string `json:"cgroup_driver"` +} diff --git a/vendor.conf b/vendor.conf index 81b0b766..acd066aa 100644 --- a/vendor.conf +++ b/vendor.conf @@ -1,19 +1,26 @@ -k8s.io/kubernetes v1.7.5 https://github.com/kubernetes/kubernetes -# https://github.com/kubernetes/client-go#compatibility-matrix -k8s.io/client-go v4.0.0 https://github.com/kubernetes/client-go -k8s.io/apimachinery release-1.7 https://github.com/kubernetes/apimachinery -k8s.io/apiserver release-1.7 https://github.com/kubernetes/apiserver +k8s.io/kubernetes a48f11c2257d84b0bec89864025508b0ef626b4f https://github.com/kubernetes/kubernetes +k8s.io/client-go master https://github.com/kubernetes/client-go +k8s.io/apimachinery master https://github.com/kubernetes/apimachinery +k8s.io/apiserver master https://github.com/kubernetes/apiserver +k8s.io/utils 4fe312863be2155a7b68acd2aff1c9221b24e68c https://github.com/kubernetes/utils +k8s.io/api master https://github.com/kubernetes/api +k8s.io/kube-openapi 39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1 https://github.com/kubernetes/kube-openapi +k8s.io/apiextensions-apiserver master https://github.com/kubernetes/apiextensions-apiserver # +github.com/googleapis/gnostic 0c5108395e2debce0d731cf0287ddf7242066aba +github.com/gregjones/httpcache 787624de3eb7bd915c329cba748687a3b22666a6 +github.com/json-iterator/go 1.0.0 +github.com/peterbourgon/diskv v2.0.1 github.com/sirupsen/logrus v1.0.0 -github.com/containers/image abb4cd79e3427bb2b02a5930814ef2ad19983c24 +github.com/containers/image 3d0304a02154dddc8f97cc833aa0861cea5e9ade github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/ostreedev/ostree-go master -github.com/containers/storage f8cff0727cf0802f0752ca58d2c05ec5270a47d5 +github.com/containers/storage 0d32dfce498e06c132c60dac945081bf44c22464 github.com/containernetworking/cni v0.4.0 google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go -github.com/opencontainers/selinux v1.0.0-rc1 +github.com/opencontainers/selinux b29023b86e4a69d1b46b7e7b4e2b6fda03f0b9cd github.com/opencontainers/go-digest v1.0.0-rc0 -github.com/opencontainers/runtime-tools d3f7e9e9e631c7e87552d67dc7c86de33c3fb68a +github.com/opencontainers/runtime-tools 625e2322645b151a7cbb93a8b42920933e72167f github.com/opencontainers/runc 45bde006ca8c90e089894508708bcf0e2cdf9e13 github.com/mrunalp/fileutils master github.com/vishvananda/netlink master @@ -57,11 +64,12 @@ github.com/ugorji/go d23841a297e5489e787e72fceffabf9d2994b52a github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7 golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 golang.org/x/net c427ad74c6d7a814201695e9ffde0c5d400a7674 -golang.org/x/sys 4cd6d1a821c7175768725b55ca82f14683a29ea4 +golang.org/x/sys 9aade4d3a3b7e6d876cd3823ad20ec45fc035402 golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756 github.com/kr/pty v1.0.0 -github.com/gogo/protobuf v0.3 -github.com/golang/protobuf 748d386b5c1ea99658fd69fe9f03991ce86a90c1 +github.com/google/btree 7d79101e329e5a3adf994758c578dab82b90c017 +github.com/gogo/protobuf c0656edd0d9eab7c66d1eb0c568f9039345796f7 +github.com/golang/protobuf 4bd1920723d7b7c925de087aa32e2187708897f7 github.com/coreos/go-systemd v14 github.com/coreos/pkg v3 github.com/golang/groupcache b710c8433bd175204919eb38776e944233235d03 @@ -72,9 +80,9 @@ github.com/Microsoft/hcsshim 43f9725307998e09f2e3816c2c0c36dc98f0c982 github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46 github.com/emicklei/go-restful-swagger12 1.0.1 github.com/pkg/errors v0.8.0 -github.com/godbus/dbus v4.0.0 -github.com/urfave/cli v1.19.1 -github.com/vbatts/tar-split v0.10.1 +github.com/godbus/dbus a389bdde4dd695d414e47b755e95e72b7826432c +github.com/urfave/cli v1.20.0 +github.com/vbatts/tar-split v0.10.2 github.com/renstrom/dedent v1.0.0 github.com/hpcloud/tail v1.0.0 gopkg.in/fsnotify.v1 v1.4.2 @@ -102,3 +110,9 @@ github.com/go-zoo/bone 031b4005dfe248ccba241a0c9de0f9e112fd6b7c github.com/soheilhy/cmux v0.1.3 github.com/hashicorp/go-multierror 83588e72410abfbe4df460eeb6f30841ae47d4c4 github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55 +github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac +github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 +github.com/pmezard/go-difflib v1.0.0 +github.com/xeipuuv/gojsonreference master +github.com/xeipuuv/gojsonschema master +github.com/xeipuuv/gojsonpointer master diff --git a/vendor/github.com/buger/goterm/README.md b/vendor/github.com/buger/goterm/README.md deleted file mode 100644 index 536b7b88..00000000 --- a/vendor/github.com/buger/goterm/README.md +++ /dev/null @@ -1,119 +0,0 @@ -## Description - -This library provides basic building blocks for building advanced console UIs. - -Initially created for [Gor](http://github.com/buger/gor). - -Full API documentation: http://godoc.org/github.com/buger/goterm - -## Basic usage - -Full screen console app, printing current time: - -```go -import ( - tm "github.com/buger/goterm" - "time" -) - -func main() { - tm.Clear() // Clear current screen - - for { - // By moving cursor to top-left position we ensure that console output - // will be overwritten each time, instead of adding new. - tm.MoveCursor(1,1) - - tm.Println("Current Time:", time.Now().Format(time.RFC1123)) - - tm.Flush() // Call it every time at the end of rendering - - time.Sleep(time.Second) - } -} -``` - -This can be seen in [examples/time_example.go](examples/time_example.go). To -run it yourself, go into your `$GOPATH/src/github.com/buger/goterm` directory -and run `go run ./examples/time_example.go` - - -Print red bold message on white background: - -```go -tm.Println(tm.Background(tm.Color(tm.Bold("Important header"), tm.RED), tm.WHITE)) -``` - - -Create box and move it to center of the screen: - -```go -tm.Clear() - -// Create Box with 30% width of current screen, and height of 20 lines -box := tm.NewBox(30|tm.PCT, 20, 0) - -// Add some content to the box -// Note that you can add ANY content, even tables -fmt.Fprint(box, "Some box content") - -// Move Box to approx center of the screen -tm.Print(tm.MoveTo(box.String(), 40|tm.PCT, 40|tm.PCT)) - -tm.Flush() -``` - -This can be found in [examples/box_example.go](examples/box_example.go). - -Draw table: - -```go -// Based on http://golang.org/pkg/text/tabwriter -totals := tm.NewTable(0, 10, 5, ' ', 0) -fmt.Fprintf(totals, "Time\tStarted\tActive\tFinished\n") -fmt.Fprintf(totals, "%s\t%d\t%d\t%d\n", "All", started, started-finished, finished) -tm.Println(totals) -tm.Flush() -``` - -This can be found in [examples/table_example.go](examples/table_example.go). - -## Line charts - -Chart example: - -![screen shot 2013-07-09 at 5 05 37 pm](https://f.cloud.github.com/assets/14009/767676/e3dd35aa-e887-11e2-9cd2-f6451eb26adc.png) - - -```go - import ( - tm "github.com/buger/goterm" - ) - - chart := tm.NewLineChart(100, 20) - - data := new(tm.DataTable) - data.addColumn("Time") - data.addColumn("Sin(x)") - data.addColumn("Cos(x+1)") - - for i := 0.1; i < 10; i += 0.1 { - data.addRow(i, math.Sin(i), math.Cos(i+1)) - } - - tm.Println(chart.Draw(data)) -``` - -This can be found in [examples/chart_example.go](examples/chart_example.go). - -Drawing 2 separate graphs in different scales. Each graph have its own Y axe. - -```go -chart.Flags = tm.DRAW_INDEPENDENT -``` - -Drawing graph with relative scale (Grapwh draw starting from min value instead of zero) - -```go -chart.Flags = tm.DRAW_RELATIVE -``` diff --git a/vendor/github.com/buger/goterm/box.go b/vendor/github.com/buger/goterm/box.go deleted file mode 100644 index 7df929d7..00000000 --- a/vendor/github.com/buger/goterm/box.go +++ /dev/null @@ -1,122 +0,0 @@ -package goterm - -import ( - "bytes" - "strings" -) - -const DEFAULT_BORDER = "- │ ┌ ┐ └ ┘" - -// Box allows you to create independent parts of screen, with its own buffer and borders. -// Can be used for creating modal windows -// -// Generates boxes likes this: -// ┌--------┐ -// │hello │ -// │world │ -// │ │ -// └--------┘ -// -type Box struct { - Buf *bytes.Buffer - - Width int - Height int - - // To get even padding: PaddingX ~= PaddingY*4 - PaddingX int - PaddingY int - - // Should contain 6 border pieces separated by spaces - // - // Example border: - // "- │ ┌ ┐ └ ┘" - Border string - - Flags int // Not used now -} - -// Create new Box. -// Width and height can be relative: -// -// // Create box with 50% with of current screen and 10 lines height -// box := tm.NewBox(50|tm.PCT, 10, 0) -// -func NewBox(width, height int, flags int) *Box { - width, height = GetXY(width, height) - - box := new(Box) - box.Buf = new(bytes.Buffer) - box.Width = width - box.Height = height - box.Border = DEFAULT_BORDER - box.PaddingX = 1 - box.PaddingY = 0 - box.Flags = flags - - return box -} - -func (b *Box) Write(p []byte) (int, error) { - return b.Buf.Write(p) -} - -// Render Box -func (b *Box) String() (out string) { - borders := strings.Split(b.Border, " ") - lines := strings.Split(b.Buf.String(), "\n") - - // Border + padding - prefix := borders[1] + strings.Repeat(" ", b.PaddingX) - suffix := strings.Repeat(" ", b.PaddingX) + borders[1] - - offset := b.PaddingY + 1 // 1 is border width - - // Content width without borders and padding - contentWidth := b.Width - (b.PaddingX+1)*2 - - for y := 0; y < b.Height; y++ { - var line string - - switch { - // Draw borders for first line - case y == 0: - line = borders[2] + strings.Repeat(borders[0], b.Width-2) + borders[3] - - // Draw borders for last line - case y == (b.Height - 1): - line = borders[4] + strings.Repeat(borders[0], b.Width-2) + borders[5] - - // Draw top and bottom padding - case y <= b.PaddingY || y >= (b.Height-b.PaddingY): - line = borders[1] + strings.Repeat(" ", b.Width-2) + borders[1] - - // Render content - default: - if len(lines) > y-offset { - line = lines[y-offset] - } else { - line = "" - } - - if len(line) > contentWidth-1 { - // If line is too large limit it - line = line[0:contentWidth] - } else { - // If line is too small enlarge it by adding spaces - line = line + strings.Repeat(" ", contentWidth-len(line)) - } - - line = prefix + line + suffix - } - - // Don't add newline for last element - if y != b.Height-1 { - line = line + "\n" - } - - out += line - } - - return out -} diff --git a/vendor/github.com/buger/goterm/plot.go b/vendor/github.com/buger/goterm/plot.go deleted file mode 100644 index 77b9fb09..00000000 --- a/vendor/github.com/buger/goterm/plot.go +++ /dev/null @@ -1,328 +0,0 @@ -package goterm - -import ( - "fmt" - "math" - "strings" -) - -const ( - AXIS_LEFT = iota - AXIS_RIGHT -) - -const ( - DRAW_INDEPENDENT = 1 << iota - DRAW_RELATIVE -) - -type DataTable struct { - columns []string - - rows [][]float64 -} - -func (d *DataTable) AddColumn(name string) { - d.columns = append(d.columns, name) -} - -func (d *DataTable) AddRow(elms ...float64) { - d.rows = append(d.rows, elms) -} - -type Chart interface { - Draw(data DataTable, flags int) string -} - -type LineChart struct { - Buf []string - chartBuf []string - - data *DataTable - - Width int - Height int - - chartHeight int - chartWidth int - - paddingX int - - paddingY int - - Flags int -} - -func genBuf(size int) []string { - buf := make([]string, size) - - for i := 0; i < size; i++ { - buf[i] = " " - } - - return buf -} - -// Format float -func ff(num interface{}) string { - return fmt.Sprintf("%.1f", num) -} - -func NewLineChart(width, height int) *LineChart { - chart := new(LineChart) - chart.Width = width - chart.Height = height - chart.Buf = genBuf(width * height) - - // axis lines + axies text - chart.paddingY = 2 - - return chart -} - -func (c *LineChart) DrawAxes(maxX, minX, maxY, minY float64, index int) { - side := AXIS_LEFT - - if c.Flags&DRAW_INDEPENDENT != 0 { - if index%2 == 0 { - side = AXIS_RIGHT - } - - c.DrawLine(c.paddingX-1, 1, c.Width-c.paddingX, 1, "-") - } else { - c.DrawLine(c.paddingX-1, 1, c.Width-1, 1, "-") - } - - if side == AXIS_LEFT { - c.DrawLine(c.paddingX-1, 1, c.paddingX-1, c.Height-1, "│") - } else { - c.DrawLine(c.Width-c.paddingX, 1, c.Width-c.paddingX, c.Height-1, "│") - } - - left := 0 - if side == AXIS_RIGHT { - left = c.Width - c.paddingX + 1 - } - - if c.Flags&DRAW_RELATIVE != 0 { - c.writeText(ff(minY), left, 1) - } else { - if minY > 0 { - c.writeText("0", left, 1) - } else { - c.writeText(ff(minY), left, 1) - } - } - - c.writeText(ff(maxY), left, c.Height-1) - - c.writeText(ff(minX), c.paddingX, 0) - - x_col := c.data.columns[0] - c.writeText(c.data.columns[0], c.Width/2-len(x_col)/2, 1) - - if c.Flags&DRAW_INDEPENDENT != 0 || len(c.data.columns) < 3 { - col := c.data.columns[index] - - for idx, char := range strings.Split(col, "") { - start_from := c.Height/2 + len(col)/2 - idx - - if side == AXIS_LEFT { - c.writeText(char, c.paddingX-1, start_from) - } else { - c.writeText(char, c.Width-c.paddingX, start_from) - } - } - } - - if c.Flags&DRAW_INDEPENDENT != 0 { - c.writeText(ff(maxX), c.Width-c.paddingX-len(ff(maxX)), 0) - } else { - c.writeText(ff(maxX), c.Width-len(ff(maxX)), 0) - } -} - -func (c *LineChart) writeText(text string, x, y int) { - coord := y*c.Width + x - - for idx, char := range strings.Split(text, "") { - c.Buf[coord+idx] = char - } -} - -func (c *LineChart) Draw(data *DataTable) (out string) { - var scaleY, scaleX float64 - - c.data = data - - if c.Flags&DRAW_INDEPENDENT != 0 && len(data.columns) > 3 { - fmt.Println("Error: Can't use DRAW_INDEPENDENT for more then 2 graphs") - return "" - } - - charts := len(data.columns) - 1 - - prevPoint := [2]int{-1, -1} - - maxX, minX, maxY, minY := getBoundaryValues(data, -1) - - c.paddingX = int(math.Max(float64(len(ff(minY))), float64(len(ff(maxY))))) + 1 - - c.chartHeight = c.Height - c.paddingY - - if c.Flags&DRAW_INDEPENDENT != 0 { - c.chartWidth = c.Width - 2*c.paddingX - } else { - c.chartWidth = c.Width - c.paddingX - 1 - } - - scaleX = float64(c.chartWidth) / (maxX - minX) - - if c.Flags&DRAW_RELATIVE != 0 || minY < 0 { - scaleY = float64(c.chartHeight) / (maxY - minY) - } else { - scaleY = float64(c.chartHeight) / maxY - } - - for i := 1; i < charts+1; i++ { - if c.Flags&DRAW_INDEPENDENT != 0 { - maxX, minX, maxY, minY = getBoundaryValues(data, i) - - scaleX = float64(c.chartWidth-1) / (maxX - minX) - scaleY = float64(c.chartHeight) / maxY - - if c.Flags&DRAW_RELATIVE != 0 || minY < 0 { - scaleY = float64(c.chartHeight) / (maxY - minY) - } - } - - symbol := Color("•", i) - - c_data := getChartData(data, i) - - for _, point := range c_data { - x := int((point[0]-minX)*scaleX) + c.paddingX - y := int((point[1])*scaleY) + c.paddingY - - if c.Flags&DRAW_RELATIVE != 0 || minY < 0 { - y = int((point[1]-minY)*scaleY) + c.paddingY - } - - if prevPoint[0] == -1 { - prevPoint[0] = x - prevPoint[1] = y - } - - if prevPoint[0] <= x { - c.DrawLine(prevPoint[0], prevPoint[1], x, y, symbol) - } - - prevPoint[0] = x - prevPoint[1] = y - } - - c.DrawAxes(maxX, minX, maxY, minY, i) - } - - for row := c.Height - 1; row >= 0; row-- { - out += strings.Join(c.Buf[row*c.Width:(row+1)*c.Width], "") + "\n" - } - - return -} - -func (c *LineChart) DrawLine(x0, y0, x1, y1 int, symbol string) { - drawLine(x0, y0, x1, y1, func(x, y int) { - coord := y*c.Width + x - - if coord > 0 && coord < len(c.Buf) { - c.Buf[coord] = symbol - } - }) -} - -func getBoundaryValues(data *DataTable, index int) (maxX, minX, maxY, minY float64) { - maxX = data.rows[0][0] - minX = data.rows[0][0] - maxY = data.rows[0][1] - minY = data.rows[0][1] - - for _, r := range data.rows { - maxX = math.Max(maxX, r[0]) - minX = math.Min(minX, r[0]) - - for idx, c := range r { - if idx > 0 { - if index == -1 || index == idx { - maxY = math.Max(maxY, c) - minY = math.Min(minY, c) - } - } - } - } - - if maxY > 0 { - maxY = maxY * 1.1 - } else { - maxY = maxY * 0.9 - } - - if minY > 0 { - minY = minY * 0.9 - } else { - minY = minY * 1.1 - } - - return -} - -// DataTable can contain data for multiple graphs, we need to extract only 1 -func getChartData(data *DataTable, index int) (out [][]float64) { - for _, r := range data.rows { - out = append(out, []float64{r[0], r[index]}) - } - - return -} - -// Algorithm for drawing line between two points -// -// http://en.wikipedia.org/wiki/Bresenham's_line_algorithm -func drawLine(x0, y0, x1, y1 int, plot func(int, int)) { - dx := x1 - x0 - if dx < 0 { - dx = -dx - } - dy := y1 - y0 - if dy < 0 { - dy = -dy - } - var sx, sy int - if x0 < x1 { - sx = 1 - } else { - sx = -1 - } - if y0 < y1 { - sy = 1 - } else { - sy = -1 - } - err := dx - dy - - for { - plot(x0, y0) - if x0 == x1 && y0 == y1 { - break - } - e2 := 2 * err - if e2 > -dy { - err -= dy - x0 += sx - } - if e2 < dx { - err += dx - y0 += sy - } - } -} diff --git a/vendor/github.com/buger/goterm/table.go b/vendor/github.com/buger/goterm/table.go deleted file mode 100644 index d8dae55c..00000000 --- a/vendor/github.com/buger/goterm/table.go +++ /dev/null @@ -1,34 +0,0 @@ -package goterm - -import ( - "bytes" - "text/tabwriter" -) - -// Tabwriter with own buffer: -// -// totals := tm.NewTable(0, 10, 5, ' ', 0) -// fmt.Fprintf(totals, "Time\tStarted\tActive\tFinished\n") -// fmt.Fprintf(totals, "%s\t%d\t%d\t%d\n", "All", started, started-finished, finished) -// tm.Println(totals) -// -// Based on http://golang.org/pkg/text/tabwriter -type Table struct { - tabwriter.Writer - - Buf *bytes.Buffer -} - -// Same as here http://golang.org/pkg/text/tabwriter/#Writer.Init -func NewTable(minwidth, tabwidth, padding int, padchar byte, flags uint) *Table { - tbl := new(Table) - tbl.Buf = new(bytes.Buffer) - tbl.Init(tbl.Buf, minwidth, tabwidth, padding, padchar, flags) - - return tbl -} - -func (t *Table) String() string { - t.Flush() - return t.Buf.String() -} diff --git a/vendor/github.com/buger/goterm/terminal.go b/vendor/github.com/buger/goterm/terminal.go deleted file mode 100644 index 6b45c78b..00000000 --- a/vendor/github.com/buger/goterm/terminal.go +++ /dev/null @@ -1,258 +0,0 @@ -// Provides basic bulding blocks for advanced console UI -// -// Coordinate system: -// -// 1/1---X----> -// | -// Y -// | -// v -// -// Documentation for ANSI codes: http://en.wikipedia.org/wiki/ANSI_escape_code#Colors -// -// Inspired by: http://www.darkcoding.net/software/pretty-command-line-console-output-on-unix-in-python-and-go-lang/ -package goterm - -import ( - "bufio" - "bytes" - "fmt" - "os" - "strings" -) - -// Reset all custom styles -const RESET = "\033[0m" - -// Reset to default color -const RESET_COLOR = "\033[32m" - -// Return curor to start of line and clean it -const RESET_LINE = "\r\033[K" - -// List of possible colors -const ( - BLACK = iota - RED - GREEN - YELLOW - BLUE - MAGENTA - CYAN - WHITE -) - -var Output *bufio.Writer = bufio.NewWriter(os.Stdout) - -func getColor(code int) string { - return fmt.Sprintf("\033[3%dm", code) -} - -func getBgColor(code int) string { - return fmt.Sprintf("\033[4%dm", code) -} - -// Set percent flag: num | PCT -// -// Check percent flag: num & PCT -// -// Reset percent flag: num & 0xFF -const shift = uint(^uint(0)>>63) << 4 -const PCT = 0x8000 << shift - -type winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -// Global screen buffer -// Its not recommented write to buffer dirrectly, use package Print,Printf,Println fucntions instead. -var Screen *bytes.Buffer = new(bytes.Buffer) - -// Get relative or absolute coorditantes -// To get relative, set PCT flag to number: -// -// // Get 10% of total width to `x` and 20 to y -// x, y = tm.GetXY(10|tm.PCT, 20) -// -func GetXY(x int, y int) (int, int) { - if y == -1 { - y = CurrentHeight() + 1 - } - - if x&PCT != 0 { - x = int((x & 0xFF) * Width() / 100) - } - - if y&PCT != 0 { - y = int((y & 0xFF) * Height() / 100) - } - - return x, y -} - -type sf func(int, string) string - -// Apply given transformation func for each line in string -func applyTransform(str string, transform sf) (out string) { - out = "" - - for idx, line := range strings.Split(str, "\n") { - out += transform(idx, line) - } - - return -} - -// Clear screen -func Clear() { - Output.WriteString("\033[2J") -} - -// Move cursor to given position -func MoveCursor(x int, y int) { - fmt.Fprintf(Screen, "\033[%d;%dH", x, y) -} - -// Move cursor up relative the current position -func MoveCursorUp(bias int) { - fmt.Fprintf(Screen, "\033[%dA", bias) -} - -// Move cursor down relative the current position -func MoveCursorDown(bias int) { - fmt.Fprintf(Screen, "\033[%dB", bias) -} - -// Move cursor forward relative the current position -func MoveCursorForward(bias int) { - fmt.Fprintf(Screen, "\033[%dC", bias) -} - -// Move cursor backward relative the current position -func MoveCursorBackward(bias int) { - fmt.Fprintf(Screen, "\033[%dD", bias) -} - -// Move string to possition -func MoveTo(str string, x int, y int) (out string) { - x, y = GetXY(x, y) - - return applyTransform(str, func(idx int, line string) string { - return fmt.Sprintf("\033[%d;%dH%s", y+idx, x, line) - }) -} - -// Return carrier to start of line -func ResetLine(str string) (out string) { - return applyTransform(str, func(idx int, line string) string { - return fmt.Sprintf(RESET_LINE, line) - }) -} - -// Make bold -func Bold(str string) string { - return applyTransform(str, func(idx int, line string) string { - return fmt.Sprintf("\033[1m%s\033[0m", line) - }) -} - -// Apply given color to string: -// -// tm.Color("RED STRING", tm.RED) -// -func Color(str string, color int) string { - return applyTransform(str, func(idx int, line string) string { - return fmt.Sprintf("%s%s%s", getColor(color), line, RESET) - }) -} - -func Highlight(str, substr string, color int) string { - hiSubstr := Color(substr, color) - return strings.Replace(str, substr, hiSubstr, -1) -} - -func HighlightRegion(str string, from, to, color int) string { - return str[:from] + Color(str[from:to], color) + str[to:] -} - -// Change background color of string: -// -// tm.Background("string", tm.RED) -// -func Background(str string, color int) string { - return applyTransform(str, func(idx int, line string) string { - return fmt.Sprintf("%s%s%s", getBgColor(color), line, RESET) - }) -} - -// Get console width -func Width() int { - ws, err := getWinsize() - - if err != nil { - return -1 - } - - return int(ws.Col) -} - -// Get console height -func Height() int { - ws, err := getWinsize() - if err != nil { - return -1 - } - return int(ws.Row) -} - -// Get current height. Line count in Screen buffer. -func CurrentHeight() int { - return strings.Count(Screen.String(), "\n") -} - -// Flush buffer and ensure that it will not overflow screen -func Flush() { - for idx, str := range strings.Split(Screen.String(), "\n") { - if idx > Height() { - return - } - - Output.WriteString(str + "\n") - } - - Output.Flush() - Screen.Reset() -} - -func Print(a ...interface{}) (n int, err error) { - return fmt.Fprint(Screen, a...) -} - -func Println(a ...interface{}) (n int, err error) { - return fmt.Fprintln(Screen, a...) -} - -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(Screen, format, a...) -} - -func Context(data string, idx, max int) string { - var start, end int - - if len(data[:idx]) < (max / 2) { - start = 0 - } else { - start = idx - max/2 - } - - if len(data)-idx < (max / 2) { - end = len(data) - 1 - } else { - end = idx + max/2 - } - - return data[start:end] -} diff --git a/vendor/github.com/buger/goterm/terminal_nosysioctl.go b/vendor/github.com/buger/goterm/terminal_nosysioctl.go deleted file mode 100644 index 69061500..00000000 --- a/vendor/github.com/buger/goterm/terminal_nosysioctl.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build windows plan9 solaris - -package goterm - -func getWinsize() (*winsize, error) { - ws := new(winsize) - - ws.Col = 80 - ws.Row = 24 - - return ws, nil -} diff --git a/vendor/github.com/buger/goterm/terminal_sysioctl.go b/vendor/github.com/buger/goterm/terminal_sysioctl.go deleted file mode 100644 index e98430fb..00000000 --- a/vendor/github.com/buger/goterm/terminal_sysioctl.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build !windows,!plan9,!solaris - -package goterm - -import ( - "fmt" - "os" - "runtime" - "syscall" - "unsafe" -) - -func getWinsize() (*winsize, error) { - ws := new(winsize) - - var _TIOCGWINSZ int64 - - switch runtime.GOOS { - case "linux": - _TIOCGWINSZ = 0x5413 - case "darwin": - _TIOCGWINSZ = 1074295912 - } - - r1, _, errno := syscall.Syscall(syscall.SYS_IOCTL, - uintptr(syscall.Stdin), - uintptr(_TIOCGWINSZ), - uintptr(unsafe.Pointer(ws)), - ) - - if int(r1) == -1 { - fmt.Println("Error:", os.NewSyscallError("GetWinsize", errno)) - return nil, os.NewSyscallError("GetWinsize", errno) - } - return ws, nil -} diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go index bb52ea76..29065e03 100644 --- a/vendor/github.com/containers/image/copy/copy.go +++ b/vendor/github.com/containers/image/copy/copy.go @@ -12,8 +12,6 @@ import ( "strings" "time" - pb "gopkg.in/cheggaaa/pb.v1" - "github.com/containers/image/image" "github.com/containers/image/pkg/compression" "github.com/containers/image/signature" @@ -22,6 +20,7 @@ import ( "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" + pb "gopkg.in/cheggaaa/pb.v1" ) type digestingReader struct { @@ -31,23 +30,6 @@ type digestingReader struct { validationFailed bool } -// imageCopier allows us to keep track of diffID values for blobs, and other -// data, that we're copying between images, and cache other information that -// might allow us to take some shortcuts -type imageCopier struct { - copiedBlobs map[digest.Digest]digest.Digest - cachedDiffIDs map[digest.Digest]digest.Digest - manifestUpdates *types.ManifestUpdateOptions - dest types.ImageDestination - src types.Image - rawSource types.ImageSource - diffIDsAreNeeded bool - canModifyManifest bool - reportWriter io.Writer - progressInterval time.Duration - progress chan types.ProgressProperties -} - // newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error // and set validationFailed to true if the source stream does not match expectedDigest. func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { @@ -86,6 +68,27 @@ func (d *digestingReader) Read(p []byte) (int, error) { return n, err } +// copier allows us to keep track of diffID values for blobs, and other +// data shared across one or more images in a possible manifest list. +type copier struct { + copiedBlobs map[digest.Digest]digest.Digest + cachedDiffIDs map[digest.Digest]digest.Digest + dest types.ImageDestination + rawSource types.ImageSource + reportWriter io.Writer + progressInterval time.Duration + progress chan types.ProgressProperties +} + +// imageCopier tracks state specific to a single image (possibly an item of a manifest list) +type imageCopier struct { + c *copier + manifestUpdates *types.ManifestUpdateOptions + src types.Image + diffIDsAreNeeded bool + canModifyManifest bool +} + // Options allows supplying non-default configuration modifying the behavior of CopyImage. type Options struct { RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. @@ -95,6 +98,8 @@ type Options struct { DestinationCtx *types.SystemContext ProgressInterval time.Duration // time to wait between reports to signal the progress channel Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. + // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type + ForceManifestMIMEType string } // Image copies image from srcRef to destRef, using policyContext to validate @@ -115,10 +120,6 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe reportWriter = options.ReportWriter } - writeReport := func(f string, a ...interface{}) { - fmt.Fprintf(reportWriter, f, a...) - } - dest, err := destRef.NewImageDestination(options.DestinationCtx) if err != nil { return errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef)) @@ -129,49 +130,93 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe } }() - destSupportedManifestMIMETypes := dest.SupportedManifestMIMETypes() - - rawSource, err := srcRef.NewImageSource(options.SourceCtx, destSupportedManifestMIMETypes) + rawSource, err := srcRef.NewImageSource(options.SourceCtx) if err != nil { return errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef)) } - unparsedImage := image.UnparsedFromSource(rawSource) defer func() { - if unparsedImage != nil { - if err := unparsedImage.Close(); err != nil { - retErr = errors.Wrapf(retErr, " (unparsed: %v)", err) - } + if err := rawSource.Close(); err != nil { + retErr = errors.Wrapf(retErr, " (src: %v)", err) } }() + c := &copier{ + copiedBlobs: make(map[digest.Digest]digest.Digest), + cachedDiffIDs: make(map[digest.Digest]digest.Digest), + dest: dest, + rawSource: rawSource, + reportWriter: reportWriter, + progressInterval: options.ProgressInterval, + progress: options.Progress, + } + + unparsedToplevel := image.UnparsedInstance(rawSource, nil) + multiImage, err := isMultiImage(unparsedToplevel) + if err != nil { + return errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef)) + } + + if !multiImage { + // The simple case: Just copy a single image. + if err := c.copyOneImage(policyContext, options, unparsedToplevel); err != nil { + return err + } + } else { + // This is a manifest list. Choose a single image and copy it. + // FIXME: Copy to destinations which support manifest lists, one image at a time. + instanceDigest, err := image.ChooseManifestInstanceFromManifestList(options.SourceCtx, unparsedToplevel) + if err != nil { + return errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef)) + } + logrus.Debugf("Source is a manifest list; copying (only) instance %s", instanceDigest) + unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) + + if err := c.copyOneImage(policyContext, options, unparsedInstance); err != nil { + return err + } + } + + if err := c.dest.Commit(); err != nil { + return errors.Wrap(err, "Error committing the finished image") + } + + return nil +} + +// Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate +// source image admissibility. +func (c *copier) copyOneImage(policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (retErr error) { + // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. + // Make sure we fail cleanly in such cases. + multiImage, err := isMultiImage(unparsedImage) + if err != nil { + // FIXME FIXME: How to name a reference for the sub-image? + return errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference())) + } + if multiImage { + return fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") + } + // Please keep this policy check BEFORE reading any other information about the image. + // (the multiImage check above only matches the MIME type, which we have received anyway. + // Actual parsing of anything should be deferred.) if allowed, err := policyContext.IsRunningImageAllowed(unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. return errors.Wrap(err, "Source image rejected") } - src, err := image.FromUnparsedImage(unparsedImage) + src, err := image.FromUnparsedImage(options.SourceCtx, unparsedImage) if err != nil { - return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(srcRef)) + return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference())) } - unparsedImage = nil - defer func() { - if err := src.Close(); err != nil { - retErr = errors.Wrapf(retErr, " (source: %v)", err) - } - }() - if err := checkImageDestinationForCurrentRuntimeOS(src, dest); err != nil { + if err := checkImageDestinationForCurrentRuntimeOS(options.DestinationCtx, src, c.dest); err != nil { return err } - if src.IsMultiImage() { - return errors.Errorf("can not copy %s: manifest contains multiple images", transports.ImageName(srcRef)) - } - var sigs [][]byte if options.RemoveSignatures { sigs = [][]byte{} } else { - writeReport("Getting image source signatures\n") + c.Printf("Getting image source signatures\n") s, err := src.Signatures(context.TODO()) if err != nil { return errors.Wrap(err, "Error reading signatures") @@ -179,41 +224,33 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe sigs = s } if len(sigs) != 0 { - writeReport("Checking if image destination supports signatures\n") - if err := dest.SupportsSignatures(); err != nil { + c.Printf("Checking if image destination supports signatures\n") + if err := c.dest.SupportsSignatures(); err != nil { return errors.Wrap(err, "Can not copy signatures") } } - canModifyManifest := len(sigs) == 0 - manifestUpdates := types.ManifestUpdateOptions{} - manifestUpdates.InformationOnly.Destination = dest + ic := imageCopier{ + c: c, + manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, + src: src, + // diffIDsAreNeeded is computed later + canModifyManifest: len(sigs) == 0, + } - if err := updateEmbeddedDockerReference(&manifestUpdates, dest, src, canModifyManifest); err != nil { + if err := ic.updateEmbeddedDockerReference(); err != nil { return err } // We compute preferredManifestMIMEType only to show it in error messages. // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed. - preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := determineManifestConversion(&manifestUpdates, src, destSupportedManifestMIMETypes, canModifyManifest) + preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType) if err != nil { return err } - // If src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates) will be true, it needs to be true by the time we get here. - ic := imageCopier{ - copiedBlobs: make(map[digest.Digest]digest.Digest), - cachedDiffIDs: make(map[digest.Digest]digest.Digest), - manifestUpdates: &manifestUpdates, - dest: dest, - src: src, - rawSource: rawSource, - diffIDsAreNeeded: src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates), - canModifyManifest: canModifyManifest, - reportWriter: reportWriter, - progressInterval: options.ProgressInterval, - progress: options.Progress, - } + // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. + ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) if err := ic.copyLayers(); err != nil { return err @@ -235,9 +272,9 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe } // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. // So if we are here, we will definitely be trying to convert the manifest. - // With !canModifyManifest, that would just be a string of repeated failures for the same reason, + // With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason, // so let’s bail out early and with a better error message. - if !canModifyManifest { + if !ic.canModifyManifest { return errors.Wrap(err, "Writing manifest failed (and converting it is not possible)") } @@ -245,7 +282,7 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)} for _, manifestMIMEType := range otherManifestMIMETypeCandidates { logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) - manifestUpdates.ManifestMIMEType = manifestMIMEType + ic.manifestUpdates.ManifestMIMEType = manifestMIMEType attemptedManifest, err := ic.copyUpdatedConfigAndManifest() if err != nil { logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) @@ -264,35 +301,44 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe } if options.SignBy != "" { - newSig, err := createSignature(dest, manifest, options.SignBy, reportWriter) + newSig, err := c.createSignature(manifest, options.SignBy) if err != nil { return err } sigs = append(sigs, newSig) } - writeReport("Storing signatures\n") - if err := dest.PutSignatures(sigs); err != nil { + c.Printf("Storing signatures\n") + if err := c.dest.PutSignatures(sigs); err != nil { return errors.Wrap(err, "Error writing signatures") } - if err := dest.Commit(); err != nil { - return errors.Wrap(err, "Error committing the finished image") - } - return nil } -func checkImageDestinationForCurrentRuntimeOS(src types.Image, dest types.ImageDestination) error { +// Printf writes a formatted string to c.reportWriter. +// Note that the method name Printf is not entirely arbitrary: (go tool vet) +// has a built-in list of functions/methods (whatever object they are for) +// which have their format strings checked; for other names we would have +// to pass a parameter to every (go tool vet) invocation. +func (c *copier) Printf(format string, a ...interface{}) { + fmt.Fprintf(c.reportWriter, format, a...) +} + +func checkImageDestinationForCurrentRuntimeOS(ctx *types.SystemContext, src types.Image, dest types.ImageDestination) error { if dest.MustMatchRuntimeOS() { + wantedOS := runtime.GOOS + if ctx != nil && ctx.OSChoice != "" { + wantedOS = ctx.OSChoice + } c, err := src.OCIConfig() if err != nil { return errors.Wrapf(err, "Error parsing image configuration") } - osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, runtime.GOOS) - if runtime.GOOS == "windows" && c.OS == "linux" { + osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, wantedOS) + if wantedOS == "windows" && c.OS == "linux" { return osErr - } else if runtime.GOOS != "windows" && c.OS == "windows" { + } else if wantedOS != "windows" && c.OS == "windows" { return osErr } } @@ -300,35 +346,44 @@ func checkImageDestinationForCurrentRuntimeOS(src types.Image, dest types.ImageD } // updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. -func updateEmbeddedDockerReference(manifestUpdates *types.ManifestUpdateOptions, dest types.ImageDestination, src types.Image, canModifyManifest bool) error { - destRef := dest.Reference().DockerReference() +func (ic *imageCopier) updateEmbeddedDockerReference() error { + destRef := ic.c.dest.Reference().DockerReference() if destRef == nil { return nil // Destination does not care about Docker references } - if !src.EmbeddedDockerReferenceConflicts(destRef) { + if !ic.src.EmbeddedDockerReferenceConflicts(destRef) { return nil // No reference embedded in the manifest, or it matches destRef already. } - if !canModifyManifest { + if !ic.canModifyManifest { return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would invalidate existing signatures. Explicitly enable signature removal to proceed anyway", - transports.ImageName(dest.Reference()), destRef.String()) + transports.ImageName(ic.c.dest.Reference()), destRef.String()) } - manifestUpdates.EmbeddedDockerReference = destRef + ic.manifestUpdates.EmbeddedDockerReference = destRef return nil } -// copyLayers copies layers from src/rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. +// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. func (ic *imageCopier) copyLayers() error { srcInfos := ic.src.LayerInfos() destInfos := []types.BlobInfo{} diffIDs := []digest.Digest{} + updatedSrcInfos := ic.src.LayerInfosForCopy() + srcInfosUpdated := false + if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { + if !ic.canModifyManifest { + return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden") + } + srcInfos = updatedSrcInfos + srcInfosUpdated = true + } for _, srcLayer := range srcInfos { var ( destInfo types.BlobInfo diffID digest.Digest err error ) - if ic.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { + if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { // DiffIDs are, currently, needed only when converting from schema1. // In which case src.LayerInfos will not have URLs because schema1 // does not support them. @@ -336,7 +391,7 @@ func (ic *imageCopier) copyLayers() error { return errors.New("getting DiffID for foreign layers is unimplemented") } destInfo = srcLayer - fmt.Fprintf(ic.reportWriter, "Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.dest.Reference().Transport().Name()) + ic.c.Printf("Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.c.dest.Reference().Transport().Name()) } else { destInfo, diffID, err = ic.copyLayer(srcLayer) if err != nil { @@ -350,7 +405,7 @@ func (ic *imageCopier) copyLayers() error { if ic.diffIDsAreNeeded { ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs } - if layerDigestsDiffer(srcInfos, destInfos) { + if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { ic.manifestUpdates.LayerInfos = destInfos } return nil @@ -381,7 +436,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest() ([]byte, error) { // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. // So, this can only happen if we are trying to upload using one of the other MIME type candidates. // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise - // when ic.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. + // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. return nil, errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) @@ -397,27 +452,27 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest() ([]byte, error) { return nil, errors.Wrap(err, "Error reading manifest") } - if err := ic.copyConfig(pendingImage); err != nil { + if err := ic.c.copyConfig(pendingImage); err != nil { return nil, err } - fmt.Fprintf(ic.reportWriter, "Writing manifest to image destination\n") - if err := ic.dest.PutManifest(manifest); err != nil { + ic.c.Printf("Writing manifest to image destination\n") + if err := ic.c.dest.PutManifest(manifest); err != nil { return nil, errors.Wrap(err, "Error writing manifest") } return manifest, nil } // copyConfig copies config.json, if any, from src to dest. -func (ic *imageCopier) copyConfig(src types.Image) error { +func (c *copier) copyConfig(src types.Image) error { srcInfo := src.ConfigInfo() if srcInfo.Digest != "" { - fmt.Fprintf(ic.reportWriter, "Copying config %s\n", srcInfo.Digest) + c.Printf("Copying config %s\n", srcInfo.Digest) configBlob, err := src.ConfigBlob() if err != nil { return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest) } - destInfo, err := ic.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false) + destInfo, err := c.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false) if err != nil { return err } @@ -439,12 +494,12 @@ type diffIDResult struct { // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) { // Check if we already have a blob with this digest - haveBlob, extantBlobSize, err := ic.dest.HasBlob(srcInfo) + haveBlob, extantBlobSize, err := ic.c.dest.HasBlob(srcInfo) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest) } // If we already have a cached diffID for this blob, we don't need to compute it - diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.cachedDiffIDs[srcInfo.Digest] == "") + diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.c.cachedDiffIDs[srcInfo.Digest] == "") // If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again if haveBlob && !diffIDIsNeeded { // Check the blob sizes match, if we were given a size this time @@ -453,17 +508,17 @@ func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest } srcInfo.Size = extantBlobSize // Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob - blobinfo, err := ic.dest.ReapplyBlob(srcInfo) + blobinfo, err := ic.c.dest.ReapplyBlob(srcInfo) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest) } - fmt.Fprintf(ic.reportWriter, "Skipping fetch of repeat blob %s\n", srcInfo.Digest) - return blobinfo, ic.cachedDiffIDs[srcInfo.Digest], err + ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest) + return blobinfo, ic.c.cachedDiffIDs[srcInfo.Digest], err } // Fallback: copy the layer, computing the diffID if we need to do so - fmt.Fprintf(ic.reportWriter, "Copying blob %s\n", srcInfo.Digest) - srcStream, srcBlobSize, err := ic.rawSource.GetBlob(srcInfo) + ic.c.Printf("Copying blob %s\n", srcInfo.Digest) + srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(srcInfo) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) } @@ -481,7 +536,7 @@ func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID") } logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) - ic.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest + ic.c.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest } return blobInfo, diffIDResult.digest, nil } @@ -515,7 +570,7 @@ func (ic *imageCopier) copyLayerFromStream(srcStream io.Reader, srcInfo types.Bl return pipeWriter } } - blobInfo, err := ic.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest) // Sets err to nil on success + blobInfo, err := ic.c.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest) // Sets err to nil on success return blobInfo, diffIDChan, err // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan } @@ -549,7 +604,7 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) // perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, // perhaps compressing it if canCompress, // and returns a complete blobInfo of the copied blob. -func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo, +func (c *copier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo, getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, canCompress bool) (types.BlobInfo, error) { // The copying happens through a pipeline of connected io.Readers. @@ -577,13 +632,13 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo // === Report progress using a pb.Reader. bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES) - bar.Output = ic.reportWriter + bar.Output = c.reportWriter bar.SetMaxWidth(80) bar.ShowTimeLeft = false bar.ShowPercent = false bar.Start() destStream = bar.NewProxyReader(destStream) - defer fmt.Fprint(ic.reportWriter, "\n") + defer bar.Finish() // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. @@ -594,7 +649,7 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo // === Compress the layer if it is uncompressed and compression is desired var inputInfo types.BlobInfo - if !canCompress || isCompressed || !ic.dest.ShouldCompressLayers() { + if !canCompress || isCompressed || !c.dest.ShouldCompressLayers() { logrus.Debugf("Using original blob without modification") inputInfo = srcInfo } else { @@ -611,19 +666,19 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo inputInfo.Size = -1 } - // === Report progress using the ic.progress channel, if required. - if ic.progress != nil && ic.progressInterval > 0 { + // === Report progress using the c.progress channel, if required. + if c.progress != nil && c.progressInterval > 0 { destStream = &progressReader{ source: destStream, - channel: ic.progress, - interval: ic.progressInterval, + channel: c.progress, + interval: c.progressInterval, artifact: srcInfo, lastTime: time.Now(), } } // === Finally, send the layer stream to dest. - uploadedInfo, err := ic.dest.PutBlob(destStream, inputInfo) + uploadedInfo, err := c.dest.PutBlob(destStream, inputInfo) if err != nil { return types.BlobInfo{}, errors.Wrap(err, "Error writing blob") } diff --git a/vendor/github.com/containers/image/copy/manifest.go b/vendor/github.com/containers/image/copy/manifest.go index e3b294dd..7e4cd10e 100644 --- a/vendor/github.com/containers/image/copy/manifest.go +++ b/vendor/github.com/containers/image/copy/manifest.go @@ -37,16 +37,20 @@ func (os *orderedSet) append(s string) { } } -// determineManifestConversion updates manifestUpdates to convert manifest to a supported MIME type, if necessary and canModifyManifest. -// Note that the conversion will only happen later, through src.UpdatedImage +// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest. +// Note that the conversion will only happen later, through ic.src.UpdatedImage // Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified), // and a list of other possible alternatives, in order. -func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, src types.Image, destSupportedManifestMIMETypes []string, canModifyManifest bool) (string, []string, error) { - _, srcType, err := src.Manifest() +func (ic *imageCopier) determineManifestConversion(destSupportedManifestMIMETypes []string, forceManifestMIMEType string) (string, []string, error) { + _, srcType, err := ic.src.Manifest() if err != nil { // This should have been cached?! return "", nil, errors.Wrap(err, "Error reading manifest") } + if forceManifestMIMEType != "" { + destSupportedManifestMIMETypes = []string{forceManifestMIMEType} + } + if len(destSupportedManifestMIMETypes) == 0 { return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions. } @@ -67,10 +71,10 @@ func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, s if _, ok := supportedByDest[srcType]; ok { prioritizedTypes.append(srcType) } - if !canModifyManifest { - // We could also drop the !canModifyManifest parameter and have the caller + if !ic.canModifyManifest { + // We could also drop the !ic.canModifyManifest check and have the caller // make the choice; it is already doing that to an extent, to improve error - // messages. But it is nice to hide the “if !canModifyManifest, do no conversion” + // messages. But it is nice to hide the “if !ic.canModifyManifest, do no conversion” // special case in here; the caller can then worry (or not) only about a good UI. logrus.Debugf("We can't modify the manifest, hoping for the best...") return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? @@ -94,9 +98,18 @@ func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, s } preferredType := prioritizedTypes.list[0] if preferredType != srcType { - manifestUpdates.ManifestMIMEType = preferredType + ic.manifestUpdates.ManifestMIMEType = preferredType } else { logrus.Debugf("... will first try using the original manifest unmodified") } return preferredType, prioritizedTypes.list[1:], nil } + +// isMultiImage returns true if img is a list of images +func isMultiImage(img types.UnparsedImage) (bool, error) { + _, mt, err := img.Manifest() + if err != nil { + return false, err + } + return manifest.MIMETypeIsMultiImage(mt), nil +} diff --git a/vendor/github.com/containers/image/copy/sign.go b/vendor/github.com/containers/image/copy/sign.go index 9187d70b..91394d2b 100644 --- a/vendor/github.com/containers/image/copy/sign.go +++ b/vendor/github.com/containers/image/copy/sign.go @@ -1,17 +1,13 @@ package copy import ( - "fmt" - "io" - "github.com/containers/image/signature" "github.com/containers/image/transports" - "github.com/containers/image/types" "github.com/pkg/errors" ) -// createSignature creates a new signature of manifest at (identified by) dest using keyIdentity. -func createSignature(dest types.ImageDestination, manifest []byte, keyIdentity string, reportWriter io.Writer) ([]byte, error) { +// createSignature creates a new signature of manifest using keyIdentity. +func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) { mech, err := signature.NewGPGSigningMechanism() if err != nil { return nil, errors.Wrap(err, "Error initializing GPG") @@ -21,12 +17,12 @@ func createSignature(dest types.ImageDestination, manifest []byte, keyIdentity s return nil, errors.Wrap(err, "Signing not supported") } - dockerReference := dest.Reference().DockerReference() + dockerReference := c.dest.Reference().DockerReference() if dockerReference == nil { - return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference())) + return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) } - fmt.Fprintf(reportWriter, "Signing manifest\n") + c.Printf("Signing manifest\n") newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity) if err != nil { return nil, errors.Wrap(err, "Error creating signature") diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go index ea46a27e..47d59d9f 100644 --- a/vendor/github.com/containers/image/directory/directory_dest.go +++ b/vendor/github.com/containers/image/directory/directory_dest.go @@ -4,19 +4,77 @@ import ( "io" "io/ioutil" "os" + "path/filepath" "github.com/containers/image/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) +const version = "Directory Transport Version: 1.0\n" + +// ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created +// using the 'dir' transport +var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data") + type dirImageDestination struct { - ref dirReference + ref dirReference + compress bool } -// newImageDestination returns an ImageDestination for writing to an existing directory. -func newImageDestination(ref dirReference) types.ImageDestination { - return &dirImageDestination{ref} +// newImageDestination returns an ImageDestination for writing to a directory. +func newImageDestination(ref dirReference, compress bool) (types.ImageDestination, error) { + d := &dirImageDestination{ref: ref, compress: compress} + + // If directory exists check if it is empty + // if not empty, check whether the contents match that of a container image directory and overwrite the contents + // if the contents don't match throw an error + dirExists, err := pathExists(d.ref.resolvedPath) + if err != nil { + return nil, errors.Wrapf(err, "error checking for path %q", d.ref.resolvedPath) + } + if dirExists { + isEmpty, err := isDirEmpty(d.ref.resolvedPath) + if err != nil { + return nil, err + } + + if !isEmpty { + versionExists, err := pathExists(d.ref.versionPath()) + if err != nil { + return nil, errors.Wrapf(err, "error checking if path exists %q", d.ref.versionPath()) + } + if versionExists { + contents, err := ioutil.ReadFile(d.ref.versionPath()) + if err != nil { + return nil, err + } + // check if contents of version file is what we expect it to be + if string(contents) != version { + return nil, ErrNotContainerImageDir + } + } else { + return nil, ErrNotContainerImageDir + } + // delete directory contents so that only one image is in the directory at a time + if err = removeDirContents(d.ref.resolvedPath); err != nil { + return nil, errors.Wrapf(err, "error erasing contents in %q", d.ref.resolvedPath) + } + logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath) + } + } else { + // create directory if it doesn't exist + if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil { + return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath) + } + } + // create version file + err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0755) + if err != nil { + return nil, errors.Wrapf(err, "error creating version file %q", d.ref.versionPath()) + } + return d, nil } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, @@ -42,7 +100,7 @@ func (d *dirImageDestination) SupportsSignatures() error { // ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. func (d *dirImageDestination) ShouldCompressLayers() bool { - return false + return d.compress } // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually @@ -147,3 +205,39 @@ func (d *dirImageDestination) PutSignatures(signatures [][]byte) error { func (d *dirImageDestination) Commit() error { return nil } + +// returns true if path exists +func pathExists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if err != nil && os.IsNotExist(err) { + return false, nil + } + return false, err +} + +// returns true if directory is empty +func isDirEmpty(path string) (bool, error) { + files, err := ioutil.ReadDir(path) + if err != nil { + return false, err + } + return len(files) == 0, nil +} + +// deletes the contents of a directory +func removeDirContents(path string) error { + files, err := ioutil.ReadDir(path) + if err != nil { + return err + } + + for _, file := range files { + if err := os.RemoveAll(filepath.Join(path, file.Name())); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go index fddc1c52..0a8acf6b 100644 --- a/vendor/github.com/containers/image/directory/directory_src.go +++ b/vendor/github.com/containers/image/directory/directory_src.go @@ -35,7 +35,12 @@ func (s *dirImageSource) Close() error { // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. -func (s *dirImageSource) GetManifest() ([]byte, string, error) { +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *dirImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`) + } m, err := ioutil.ReadFile(s.ref.manifestPath()) if err != nil { return nil, "", err @@ -43,10 +48,6 @@ func (s *dirImageSource) GetManifest() ([]byte, string, error) { return m, manifest.GuessMIMEType(m), err } -func (s *dirImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`) -} - // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). func (s *dirImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { r, err := os.Open(s.ref.layerPath(info.Digest)) @@ -60,7 +61,14 @@ func (s *dirImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, err return r, fi.Size(), nil } -func (s *dirImageSource) GetSignatures(ctx context.Context) ([][]byte, error) { +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.Errorf(`Manifests lists are not supported by "dir:"`) + } signatures := [][]byte{} for i := 0; ; i++ { signature, err := ioutil.ReadFile(s.ref.signaturePath(i)) @@ -74,3 +82,8 @@ func (s *dirImageSource) GetSignatures(ctx context.Context) ([][]byte, error) { } return signatures, nil } + +// LayerInfosForCopy() returns updated layer info that should be used when copying, in preference to values in the manifest, if specified. +func (s *dirImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/directory/directory_transport.go b/vendor/github.com/containers/image/directory/directory_transport.go index 34f74289..c3875308 100644 --- a/vendor/github.com/containers/image/directory/directory_transport.go +++ b/vendor/github.com/containers/image/directory/directory_transport.go @@ -134,27 +134,30 @@ func (ref dirReference) PolicyConfigurationNamespaces() []string { return res } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref dirReference) NewImage(ctx *types.SystemContext) (types.Image, error) { +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref dirReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { src := newImageSource(ref) - return image.FromSource(src) + return image.FromSource(ctx, src) } -// NewImageSource returns a types.ImageSource for this reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. -// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. +// NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. -func (ref dirReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { +func (ref dirReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { return newImageSource(ref), nil } // NewImageDestination returns a types.ImageDestination for this reference. // The caller must call .Close() on the returned ImageDestination. func (ref dirReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ref), nil + compress := false + if ctx != nil { + compress = ctx.DirForceCompress + } + return newImageDestination(ref, compress) } // DeleteImage deletes the named image from the registry, if supported. @@ -177,3 +180,8 @@ func (ref dirReference) layerPath(digest digest.Digest) string { func (ref dirReference) signaturePath(index int) string { return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)) } + +// versionPath returns a path for the version file within a directory using our conventions. +func (ref dirReference) versionPath() string { + return filepath.Join(ref.path, "version") +} diff --git a/vendor/github.com/containers/image/docker/archive/src.go b/vendor/github.com/containers/image/docker/archive/src.go index aebcaa82..b2ffd965 100644 --- a/vendor/github.com/containers/image/docker/archive/src.go +++ b/vendor/github.com/containers/image/docker/archive/src.go @@ -34,3 +34,8 @@ func (s *archiveImageSource) Reference() types.ImageReference { func (s *archiveImageSource) Close() error { return nil } + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *archiveImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/docker/archive/transport.go b/vendor/github.com/containers/image/docker/archive/transport.go index 59c68c3b..047df73d 100644 --- a/vendor/github.com/containers/image/docker/archive/transport.go +++ b/vendor/github.com/containers/image/docker/archive/transport.go @@ -125,20 +125,19 @@ func (ref archiveReference) PolicyConfigurationNamespaces() []string { return []string{} } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.Image, error) { +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { src := newImageSource(ctx, ref) - return ctrImage.FromSource(src) + return ctrImage.FromSource(ctx, src) } -// NewImageSource returns a types.ImageSource for this reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. -// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. +// NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. -func (ref archiveReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { +func (ref archiveReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { return newImageSource(ctx, ref), nil } diff --git a/vendor/github.com/containers/image/docker/daemon/client.go b/vendor/github.com/containers/image/docker/daemon/client.go new file mode 100644 index 00000000..82fab4b1 --- /dev/null +++ b/vendor/github.com/containers/image/docker/daemon/client.go @@ -0,0 +1,69 @@ +package daemon + +import ( + "net/http" + "path/filepath" + + "github.com/containers/image/types" + dockerclient "github.com/docker/docker/client" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + // The default API version to be used in case none is explicitly specified + defaultAPIVersion = "1.22" +) + +// NewDockerClient initializes a new API client based on the passed SystemContext. +func newDockerClient(ctx *types.SystemContext) (*dockerclient.Client, error) { + host := dockerclient.DefaultDockerHost + if ctx != nil && ctx.DockerDaemonHost != "" { + host = ctx.DockerDaemonHost + } + + // Sadly, unix:// sockets don't work transparently with dockerclient.NewClient. + // They work fine with a nil httpClient; with a non-nil httpClient, the transport’s + // TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket + // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport. + // + // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client. + proto, _, _, err := dockerclient.ParseHost(host) + if err != nil { + return nil, err + } + var httpClient *http.Client + if proto != "unix" { + hc, err := tlsConfig(ctx) + if err != nil { + return nil, err + } + httpClient = hc + } + + return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil) +} + +func tlsConfig(ctx *types.SystemContext) (*http.Client, error) { + options := tlsconfig.Options{} + if ctx != nil && ctx.DockerDaemonInsecureSkipTLSVerify { + options.InsecureSkipVerify = true + } + + if ctx != nil && ctx.DockerDaemonCertPath != "" { + options.CAFile = filepath.Join(ctx.DockerDaemonCertPath, "ca.pem") + options.CertFile = filepath.Join(ctx.DockerDaemonCertPath, "cert.pem") + options.KeyFile = filepath.Join(ctx.DockerDaemonCertPath, "key.pem") + } + + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + CheckRedirect: dockerclient.CheckRedirect, + }, nil +} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go index 559e5c71..f73ac233 100644 --- a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go +++ b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go @@ -14,6 +14,7 @@ import ( type daemonImageDestination struct { ref daemonReference + mustMatchRuntimeOS bool *tarfile.Destination // Implements most of types.ImageDestination // For talking to imageLoadGoroutine goroutineCancel context.CancelFunc @@ -24,7 +25,7 @@ type daemonImageDestination struct { } // newImageDestination returns a types.ImageDestination for the specified image reference. -func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { +func newImageDestination(ctx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { if ref.ref == nil { return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) } @@ -33,7 +34,12 @@ func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (t return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) } - c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host + var mustMatchRuntimeOS = true + if ctx != nil && ctx.DockerDaemonHost != client.DefaultDockerHost { + mustMatchRuntimeOS = false + } + + c, err := newDockerClient(ctx) if err != nil { return nil, errors.Wrap(err, "Error initializing docker engine client") } @@ -42,16 +48,17 @@ func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (t // Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it. statusChannel := make(chan error, 1) - ctx, goroutineCancel := context.WithCancel(context.Background()) - go imageLoadGoroutine(ctx, c, reader, statusChannel) + goroutineContext, goroutineCancel := context.WithCancel(context.Background()) + go imageLoadGoroutine(goroutineContext, c, reader, statusChannel) return &daemonImageDestination{ - ref: ref, - Destination: tarfile.NewDestination(writer, namedTaggedRef), - goroutineCancel: goroutineCancel, - statusChannel: statusChannel, - writer: writer, - committed: false, + ref: ref, + mustMatchRuntimeOS: mustMatchRuntimeOS, + Destination: tarfile.NewDestination(writer, namedTaggedRef), + goroutineCancel: goroutineCancel, + statusChannel: statusChannel, + writer: writer, + committed: false, }, nil } @@ -80,7 +87,7 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. func (d *daemonImageDestination) MustMatchRuntimeOS() bool { - return true + return d.mustMatchRuntimeOS } // Close removes resources associated with an initialized ImageDestination, if any. diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/docker/daemon/daemon_src.go index 644dbeec..5cf7679b 100644 --- a/vendor/github.com/containers/image/docker/daemon/daemon_src.go +++ b/vendor/github.com/containers/image/docker/daemon/daemon_src.go @@ -6,14 +6,12 @@ import ( "os" "github.com/containers/image/docker/tarfile" + "github.com/containers/image/internal/tmpdir" "github.com/containers/image/types" - "github.com/docker/docker/client" "github.com/pkg/errors" "golang.org/x/net/context" ) -const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. - type daemonImageSource struct { ref daemonReference *tarfile.Source // Implements most of types.ImageSource @@ -35,7 +33,7 @@ type layerInfo struct { // is the config, and that the following len(RootFS) files are the layers, but that feels // way too brittle.) func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageSource, error) { - c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host + c, err := newDockerClient(ctx) if err != nil { return nil, errors.Wrap(err, "Error initializing docker engine client") } @@ -48,7 +46,7 @@ func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageS defer inputStream.Close() // FIXME: use SystemContext here. - tarCopyFile, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-daemon-tar") + tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-daemon-tar") if err != nil { return nil, err } @@ -83,3 +81,8 @@ func (s *daemonImageSource) Reference() types.ImageReference { func (s *daemonImageSource) Close() error { return os.Remove(s.tarCopyPath) } + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *daemonImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/docker/daemon/daemon_transport.go index 41ccd1f1..8ad6b521 100644 --- a/vendor/github.com/containers/image/docker/daemon/daemon_transport.go +++ b/vendor/github.com/containers/image/docker/daemon/daemon_transport.go @@ -151,21 +151,22 @@ func (ref daemonReference) PolicyConfigurationNamespaces() []string { return []string{} } -// NewImage returns a types.Image for this reference. -// The caller must call .Close() on the returned Image. -func (ref daemonReference) NewImage(ctx *types.SystemContext) (types.Image, error) { +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref daemonReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { src, err := newImageSource(ctx, ref) if err != nil { return nil, err } - return image.FromSource(src) + return image.FromSource(ctx, src) } -// NewImageSource returns a types.ImageSource for this reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. -// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. +// NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. -func (ref daemonReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { +func (ref daemonReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { return newImageSource(ctx, ref) } diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go index 4c3d8b9f..217e9dcb 100644 --- a/vendor/github.com/containers/image/docker/docker_client.go +++ b/vendor/github.com/containers/image/docker/docker_client.go @@ -3,24 +3,20 @@ package docker import ( "context" "crypto/tls" - "encoding/base64" "encoding/json" "fmt" "io" "io/ioutil" - "net" "net/http" - "os" "path/filepath" "strings" "time" "github.com/containers/image/docker/reference" + "github.com/containers/image/pkg/docker/config" + "github.com/containers/image/pkg/tlsclientconfig" "github.com/containers/image/types" - "github.com/containers/storage/pkg/homedir" "github.com/docker/distribution/registry/client" - helperclient "github.com/docker/docker-credential-helpers/client" - "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -28,13 +24,8 @@ import ( ) const ( - dockerHostname = "docker.io" - dockerRegistry = "registry-1.docker.io" - dockerAuthRegistry = "https://index.docker.io/v1/" - - dockerCfg = ".docker" - dockerCfgFileName = "config.json" - dockerCfgObsolete = ".dockercfg" + dockerHostname = "docker.io" + dockerRegistry = "registry-1.docker.io" systemPerHostCertDirPath = "/etc/docker/certs.d" @@ -52,9 +43,13 @@ const ( extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type ) -// ErrV1NotSupported is returned when we're trying to talk to a -// docker V1 registry. -var ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") +var ( + // ErrV1NotSupported is returned when we're trying to talk to a + // docker V1 registry. + ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") + // ErrUnauthorizedForCredentials is returned when the status code returned is 401 + ErrUnauthorizedForCredentials = errors.New("unable to retrieve auth token: invalid username/password") +) // extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: // signature represents a Docker image signature. @@ -113,27 +108,7 @@ func serverDefault() *tls.Config { } } -func newTransport() *http.Transport { - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - } - tr := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: direct.Dial, - TLSHandshakeTimeout: 10 * time.Second, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - } - proxyDialer, err := sockets.DialerFromEnvironment(direct) - if err == nil { - tr.Dial = proxyDialer.Dial - } - return tr -} - -// dockerCertDir returns a path to a directory to be consumed by setupCertificates() depending on ctx and hostPort. +// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. func dockerCertDir(ctx *types.SystemContext, hostPort string) string { if ctx != nil && ctx.DockerCertPath != "" { return ctx.DockerCertPath @@ -149,115 +124,84 @@ func dockerCertDir(ctx *types.SystemContext, hostPort string) string { return filepath.Join(hostCertDir, hostPort) } -func setupCertificates(dir string, tlsc *tls.Config) error { - logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) - fs, err := ioutil.ReadDir(dir) - if err != nil { - if os.IsNotExist(err) { - return nil - } - if os.IsPermission(err) { - logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err) - return nil - } - return err - } - - for _, f := range fs { - fullPath := filepath.Join(dir, f.Name()) - if strings.HasSuffix(f.Name(), ".crt") { - systemPool, err := tlsconfig.SystemCertPool() - if err != nil { - return errors.Wrap(err, "unable to get system cert pool") - } - tlsc.RootCAs = systemPool - logrus.Debugf(" crt: %s", fullPath) - data, err := ioutil.ReadFile(fullPath) - if err != nil { - return err - } - tlsc.RootCAs.AppendCertsFromPEM(data) - } - if strings.HasSuffix(f.Name(), ".cert") { - certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" - logrus.Debugf(" cert: %s", fullPath) - if !hasFile(fs, keyName) { - return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) - if err != nil { - return err - } - tlsc.Certificates = append(tlsc.Certificates, cert) - } - if strings.HasSuffix(f.Name(), ".key") { - keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" - logrus.Debugf(" key: %s", fullPath) - if !hasFile(fs, certName) { - return errors.Errorf("missing client certificate %s for key %s", certName, keyName) - } - } - } - return nil -} - -func hasFile(files []os.FileInfo, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false -} - -// newDockerClient returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) +// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) // “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) -func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { +func newDockerClientFromRef(ctx *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { registry := reference.Domain(ref.ref) - if registry == dockerHostname { - registry = dockerRegistry + username, password, err := config.GetAuthentication(ctx, reference.Domain(ref.ref)) + if err != nil { + return nil, errors.Wrapf(err, "error getting username and password") } - username, password, err := getAuth(ctx, reference.Domain(ref.ref)) + sigBase, err := configuredSignatureStorageBase(ctx, ref, write) if err != nil { return nil, err } - tr := newTransport() + remoteName := reference.Path(ref.ref) + + return newDockerClientWithDetails(ctx, registry, username, password, actions, sigBase, remoteName) +} + +// newDockerClientWithDetails returns a new dockerClient instance for the given parameters +func newDockerClientWithDetails(ctx *types.SystemContext, registry, username, password, actions string, sigBase signatureStorageBase, remoteName string) (*dockerClient, error) { + hostName := registry + if registry == dockerHostname { + registry = dockerRegistry + } + tr := tlsclientconfig.NewTransport() tr.TLSClientConfig = serverDefault() + // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is // undocumented and may change if docker/docker changes. - certDir := dockerCertDir(ctx, reference.Domain(ref.ref)) - if err := setupCertificates(certDir, tr.TLSClientConfig); err != nil { + certDir := dockerCertDir(ctx, hostName) + if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil { return nil, err } + if ctx != nil && ctx.DockerInsecureSkipTLSVerify { tr.TLSClientConfig.InsecureSkipVerify = true } - client := &http.Client{Transport: tr} - - sigBase, err := configuredSignatureStorageBase(ctx, ref, write) - if err != nil { - return nil, err - } return &dockerClient{ ctx: ctx, registry: registry, username: username, password: password, - client: client, + client: &http.Client{Transport: tr}, signatureBase: sigBase, scope: authScope{ actions: actions, - remoteName: reference.Path(ref.ref), + remoteName: remoteName, }, }, nil } +// CheckAuth validates the credentials by attempting to log into the registry +// returns an error if an error occcured while making the http request or the status code received was 401 +func CheckAuth(ctx context.Context, sCtx *types.SystemContext, username, password, registry string) error { + newLoginClient, err := newDockerClientWithDetails(sCtx, registry, username, password, "", nil, "") + if err != nil { + return errors.Wrapf(err, "error creating new docker client") + } + + resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + return nil + case http.StatusUnauthorized: + return ErrUnauthorizedForCredentials + default: + return errors.Errorf("error occured with status code %q", resp.StatusCode) + } +} + // makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. // The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/. func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader) (*http.Response, error) { @@ -329,7 +273,10 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error { return errors.Errorf("missing realm in bearer auth challenge") } service, _ := challenge.Parameters["service"] // Will be "" if not present - scope := fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions) + var scope string + if c.scope.remoteName != "" && c.scope.actions != "" { + scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions) + } token, err := c.getBearerToken(req.Context(), realm, service, scope) if err != nil { return err @@ -364,7 +311,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope if c.username != "" && c.password != "" { authReq.SetBasicAuth(c.username, c.password) } - tr := newTransport() + tr := tlsclientconfig.NewTransport() // TODO(runcom): insecure for now to contact the external token service tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} client := &http.Client{Transport: tr} @@ -375,7 +322,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope defer res.Body.Close() switch res.StatusCode { case http.StatusUnauthorized: - return nil, errors.Errorf("unable to retrieve auth token: 401 unauthorized") + return nil, ErrUnauthorizedForCredentials case http.StatusOK: break default: @@ -399,65 +346,6 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope return &token, nil } -func getAuth(ctx *types.SystemContext, registry string) (string, string, error) { - if ctx != nil && ctx.DockerAuthConfig != nil { - return ctx.DockerAuthConfig.Username, ctx.DockerAuthConfig.Password, nil - } - var dockerAuth dockerConfigFile - dockerCfgPath := filepath.Join(getDefaultConfigDir(".docker"), dockerCfgFileName) - if _, err := os.Stat(dockerCfgPath); err == nil { - j, err := ioutil.ReadFile(dockerCfgPath) - if err != nil { - return "", "", err - } - if err := json.Unmarshal(j, &dockerAuth); err != nil { - return "", "", err - } - - } else if os.IsNotExist(err) { - // try old config path - oldDockerCfgPath := filepath.Join(getDefaultConfigDir(dockerCfgObsolete)) - if _, err := os.Stat(oldDockerCfgPath); err != nil { - if os.IsNotExist(err) { - return "", "", nil - } - return "", "", errors.Wrap(err, oldDockerCfgPath) - } - - j, err := ioutil.ReadFile(oldDockerCfgPath) - if err != nil { - return "", "", err - } - if err := json.Unmarshal(j, &dockerAuth.AuthConfigs); err != nil { - return "", "", err - } - - } else if err != nil { - return "", "", errors.Wrap(err, dockerCfgPath) - } - - // First try cred helpers. They should always be normalized. - if ch, exists := dockerAuth.CredHelpers[registry]; exists { - return getAuthFromCredHelper(ch, registry) - } - - // I'm feeling lucky. - if c, exists := dockerAuth.AuthConfigs[registry]; exists { - return decodeDockerAuth(c.Auth) - } - - // bad luck; let's normalize the entries first - registry = normalizeRegistry(registry) - normalizedAuths := map[string]dockerAuthConfig{} - for k, v := range dockerAuth.AuthConfigs { - normalizedAuths[normalizeRegistry(k)] = v - } - if c, exists := normalizedAuths[registry]; exists { - return decodeDockerAuth(c.Auth) - } - return "", "", nil -} - // detectProperties detects various properties of the registry. // See the dockerClient documentation for members which are affected by this. func (c *dockerClient) detectProperties(ctx context.Context) error { @@ -540,67 +428,3 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe } return &parsedBody, nil } - -func getDefaultConfigDir(confPath string) string { - return filepath.Join(homedir.Get(), confPath) -} - -type dockerAuthConfig struct { - Auth string `json:"auth,omitempty"` -} - -type dockerConfigFile struct { - AuthConfigs map[string]dockerAuthConfig `json:"auths"` - CredHelpers map[string]string `json:"credHelpers,omitempty"` -} - -func getAuthFromCredHelper(credHelper, registry string) (string, string, error) { - helperName := fmt.Sprintf("docker-credential-%s", credHelper) - p := helperclient.NewShellProgramFunc(helperName) - creds, err := helperclient.Get(p, registry) - if err != nil { - return "", "", err - } - - return creds.Username, creds.Secret, nil -} - -func decodeDockerAuth(s string) (string, string, error) { - decoded, err := base64.StdEncoding.DecodeString(s) - if err != nil { - return "", "", err - } - parts := strings.SplitN(string(decoded), ":", 2) - if len(parts) != 2 { - // if it's invalid just skip, as docker does - return "", "", nil - } - user := parts[0] - password := strings.Trim(parts[1], "\x00") - return user, password, nil -} - -// convertToHostname converts a registry url which has http|https prepended -// to just an hostname. -// Copied from github.com/docker/docker/registry/auth.go -func convertToHostname(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.TrimPrefix(url, "http://") - } else if strings.HasPrefix(url, "https://") { - stripped = strings.TrimPrefix(url, "https://") - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] -} - -func normalizeRegistry(registry string) string { - normalized := convertToHostname(registry) - switch normalized { - case "registry-1.docker.io", "docker.io": - return "index.docker.io" - } - return normalized -} diff --git a/vendor/github.com/containers/image/docker/docker_image.go b/vendor/github.com/containers/image/docker/docker_image.go index 992d9203..2148ed8b 100644 --- a/vendor/github.com/containers/image/docker/docker_image.go +++ b/vendor/github.com/containers/image/docker/docker_image.go @@ -12,26 +12,26 @@ import ( "github.com/pkg/errors" ) -// Image is a Docker-specific implementation of types.Image with a few extra methods +// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods // which are specific to Docker. type Image struct { - types.Image + types.ImageCloser src *dockerImageSource } // newImage returns a new Image interface type after setting up // a client to the registry hosting the given image. // The caller must call .Close() on the returned Image. -func newImage(ctx *types.SystemContext, ref dockerReference) (types.Image, error) { - s, err := newImageSource(ctx, ref, nil) +func newImage(ctx *types.SystemContext, ref dockerReference) (types.ImageCloser, error) { + s, err := newImageSource(ctx, ref) if err != nil { return nil, err } - img, err := image.FromSource(s) + img, err := image.FromSource(ctx, s) if err != nil { return nil, err } - return &Image{Image: img, src: s}, nil + return &Image{ImageCloser: img, src: s}, nil } // SourceRefFullName returns a fully expanded name for the repository this image is in. diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go index 68404bda..79c38622 100644 --- a/vendor/github.com/containers/image/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/docker/docker_image_dest.go @@ -20,25 +20,11 @@ import ( "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client" "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -var manifestMIMETypes = []string{ - // TODO(runcom): we'll add OCI as part of another PR here - manifest.DockerV2Schema2MediaType, - manifest.DockerV2Schema1SignedMediaType, - manifest.DockerV2Schema1MediaType, -} - -func supportedManifestMIMETypesMap() map[string]bool { - m := make(map[string]bool, len(manifestMIMETypes)) - for _, mt := range manifestMIMETypes { - m[mt] = true - } - return m -} - type dockerImageDestination struct { ref dockerReference c *dockerClient @@ -48,7 +34,7 @@ type dockerImageDestination struct { // newImageDestination creates a new ImageDestination for the specified image reference. func newImageDestination(ctx *types.SystemContext, ref dockerReference) (types.ImageDestination, error) { - c, err := newDockerClient(ctx, ref, true, "pull,push") + c, err := newDockerClientFromRef(ctx, ref, true, "pull,push") if err != nil { return nil, err } @@ -70,7 +56,12 @@ func (d *dockerImageDestination) Close() error { } func (d *dockerImageDestination) SupportedManifestMIMETypes() []string { - return manifestMIMETypes + return []string{ + imgspecv1.MediaTypeImageManifest, + manifest.DockerV2Schema2MediaType, + manifest.DockerV2Schema1SignedMediaType, + manifest.DockerV2Schema1MediaType, + } } // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. @@ -245,7 +236,7 @@ func (d *dockerImageDestination) PutManifest(m []byte) error { return err } defer res.Body.Close() - if res.StatusCode != http.StatusCreated { + if !successStatus(res.StatusCode) { err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest to %s", path) if isManifestInvalidError(errors.Cause(err)) { err = types.ManifestTypeRejectedError{Err: err} @@ -255,6 +246,12 @@ func (d *dockerImageDestination) PutManifest(m []byte) error { return nil } +// successStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func successStatus(status int) bool { + return status >= 200 && status <= 399 +} + // isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error. func isManifestInvalidError(err error) bool { errors, ok := err.(errcode.Errors) diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go index 88607210..63bfe8aa 100644 --- a/vendor/github.com/containers/image/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/docker/docker_image_src.go @@ -21,41 +21,23 @@ import ( ) type dockerImageSource struct { - ref dockerReference - requestedManifestMIMETypes []string - c *dockerClient + ref dockerReference + c *dockerClient // State cachedManifest []byte // nil if not loaded yet cachedManifestMIMEType string // Only valid if cachedManifest != nil } -// newImageSource creates a new ImageSource for the specified image reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. -// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. +// newImageSource creates a new ImageSource for the specified image reference. // The caller must call .Close() on the returned ImageSource. -func newImageSource(ctx *types.SystemContext, ref dockerReference, requestedManifestMIMETypes []string) (*dockerImageSource, error) { - c, err := newDockerClient(ctx, ref, false, "pull") +func newImageSource(ctx *types.SystemContext, ref dockerReference) (*dockerImageSource, error) { + c, err := newDockerClientFromRef(ctx, ref, false, "pull") if err != nil { return nil, err } - if requestedManifestMIMETypes == nil { - requestedManifestMIMETypes = manifest.DefaultRequestedManifestMIMETypes - } - supportedMIMEs := supportedManifestMIMETypesMap() - acceptableRequestedMIMEs := false - for _, mtrequested := range requestedManifestMIMETypes { - if supportedMIMEs[mtrequested] { - acceptableRequestedMIMEs = true - break - } - } - if !acceptableRequestedMIMEs { - requestedManifestMIMETypes = manifest.DefaultRequestedManifestMIMETypes - } return &dockerImageSource{ ref: ref, - requestedManifestMIMETypes: requestedManifestMIMETypes, - c: c, + c: c, }, nil } @@ -70,6 +52,11 @@ func (s *dockerImageSource) Close() error { return nil } +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *dockerImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} + // simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) // Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string. func simplifyContentType(contentType string) string { @@ -85,7 +72,12 @@ func simplifyContentType(contentType string) string { // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. -func (s *dockerImageSource) GetManifest() ([]byte, string, error) { +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *dockerImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return s.fetchManifest(context.TODO(), instanceDigest.String()) + } err := s.ensureManifestIsLoaded(context.TODO()) if err != nil { return nil, "", err @@ -96,7 +88,7 @@ func (s *dockerImageSource) GetManifest() ([]byte, string, error) { func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) { path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest) headers := make(map[string][]string) - headers["Accept"] = s.requestedManifestMIMETypes + headers["Accept"] = manifest.DefaultRequestedManifestMIMETypes res, err := s.c.makeRequest(ctx, "GET", path, headers, nil) if err != nil { return nil, "", err @@ -112,18 +104,12 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil } -// GetTargetManifest returns an image's manifest given a digest. -// This is mainly used to retrieve a single image's manifest out of a manifest list. -func (s *dockerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - return s.fetchManifest(context.TODO(), digest.String()) -} - // ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType // // ImageSource implementations are not required or expected to do any caching, // but because our signatures are “attached” to the manifest digest, -// we need to ensure that the digest of the manifest returned by GetManifest -// and used by GetSignatures are consistent, otherwise we would get spurious +// we need to ensure that the digest of the manifest returned by GetManifest(nil) +// and used by GetSignatures(ctx, nil) are consistent, otherwise we would get spurious // signature verification failures when pulling while a tag is being updated. func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error { if s.cachedManifest != nil { @@ -158,6 +144,7 @@ func (s *dockerImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64 logrus.Debug(err) continue } + break } } if resp.Body != nil && err == nil { @@ -193,22 +180,30 @@ func (s *dockerImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, return res.Body, getBlobSize(res), nil } -func (s *dockerImageSource) GetSignatures(ctx context.Context) ([][]byte, error) { +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *dockerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { if err := s.c.detectProperties(ctx); err != nil { return nil, err } switch { case s.c.signatureBase != nil: - return s.getSignaturesFromLookaside(ctx) + return s.getSignaturesFromLookaside(ctx, instanceDigest) case s.c.supportsSignatures: - return s.getSignaturesFromAPIExtension(ctx) + return s.getSignaturesFromAPIExtension(ctx, instanceDigest) default: return [][]byte{}, nil } } -// manifestDigest returns a digest of the manifest, either from the supplied reference or from a fetched manifest. -func (s *dockerImageSource) manifestDigest(ctx context.Context) (digest.Digest, error) { +// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference, +// or finally, from a fetched manifest. +func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *digest.Digest) (digest.Digest, error) { + if instanceDigest != nil { + return *instanceDigest, nil + } if digested, ok := s.ref.ref.(reference.Digested); ok { d := digested.Digest() if d.Algorithm() == digest.Canonical { @@ -223,8 +218,8 @@ func (s *dockerImageSource) manifestDigest(ctx context.Context) (digest.Digest, // getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase, // which is not nil. -func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context) ([][]byte, error) { - manifestDigest, err := s.manifestDigest(ctx) +func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { return nil, err } @@ -293,8 +288,8 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( } // getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension. -func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context) ([][]byte, error) { - manifestDigest, err := s.manifestDigest(ctx) +func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { return nil, err } @@ -315,7 +310,7 @@ func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context) ( // deleteImage deletes the named image from the registry, if supported. func deleteImage(ctx *types.SystemContext, ref dockerReference) error { - c, err := newDockerClient(ctx, ref, true, "push") + c, err := newDockerClientFromRef(ctx, ref, true, "push") if err != nil { return err } diff --git a/vendor/github.com/containers/image/docker/docker_transport.go b/vendor/github.com/containers/image/docker/docker_transport.go index 15d68e99..cc0aa298 100644 --- a/vendor/github.com/containers/image/docker/docker_transport.go +++ b/vendor/github.com/containers/image/docker/docker_transport.go @@ -122,20 +122,19 @@ func (ref dockerReference) PolicyConfigurationNamespaces() []string { return policyconfiguration.DockerReferenceNamespaces(ref.ref) } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref dockerReference) NewImage(ctx *types.SystemContext) (types.Image, error) { +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref dockerReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { return newImage(ctx, ref) } -// NewImageSource returns a types.ImageSource for this reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. -// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. +// NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. -func (ref dockerReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { - return newImageSource(ctx, ref, requestedManifestMIMETypes) +func (ref dockerReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, ref) } // NewImageDestination returns a types.ImageDestination for this reference. diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go index 72c85c70..eb11ca86 100644 --- a/vendor/github.com/containers/image/docker/tarfile/dest.go +++ b/vendor/github.com/containers/image/docker/tarfile/dest.go @@ -11,6 +11,7 @@ import ( "time" "github.com/containers/image/docker/reference" + "github.com/containers/image/internal/tmpdir" "github.com/containers/image/manifest" "github.com/containers/image/types" "github.com/opencontainers/go-digest" @@ -18,8 +19,6 @@ import ( "github.com/sirupsen/logrus" ) -const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. - // Destination is a partial implementation of types.ImageDestination for writing to an io.Writer. type Destination struct { writer io.Writer @@ -107,7 +106,7 @@ func (d *Destination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size. logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") - streamCopy, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-tarfile-blob") + streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tarfile-blob") if err != nil { return types.BlobInfo{}, err } @@ -168,7 +167,7 @@ func (d *Destination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { func (d *Destination) PutManifest(m []byte) error { // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative, // so the caller trying a different manifest kind would be pointless. - var man schema2Manifest + var man manifest.Schema2 if err := json.Unmarshal(m, &man); err != nil { return errors.Wrap(err, "Error parsing manifest") } @@ -177,12 +176,12 @@ func (d *Destination) PutManifest(m []byte) error { } layerPaths := []string{} - for _, l := range man.Layers { + for _, l := range man.LayersDescriptors { layerPaths = append(layerPaths, l.Digest.String()) } items := []ManifestItem{{ - Config: man.Config.Digest.String(), + Config: man.ConfigDescriptor.Digest.String(), RepoTags: []string{d.repoTag}, Layers: layerPaths, Parent: "", diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go index f77cb713..a18e2105 100644 --- a/vendor/github.com/containers/image/docker/tarfile/src.go +++ b/vendor/github.com/containers/image/docker/tarfile/src.go @@ -24,8 +24,8 @@ type Source struct { tarManifest *ManifestItem // nil if not available yet. configBytes []byte configDigest digest.Digest - orderedDiffIDList []diffID - knownLayers map[diffID]*layerInfo + orderedDiffIDList []digest.Digest + knownLayers map[digest.Digest]*layerInfo // Other state generatedManifest []byte // Private cache for GetManifest(), nil if not set yet. } @@ -156,7 +156,7 @@ func (s *Source) ensureCachedDataIsPresent() error { if err != nil { return err } - var parsedConfig image // Most fields ommitted, we only care about layer DiffIDs. + var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs. if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config) } @@ -194,12 +194,12 @@ func (s *Source) LoadTarManifest() ([]ManifestItem, error) { return s.loadTarManifest() } -func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *image) (map[diffID]*layerInfo, error) { +func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) { // Collect layer data available in manifest and config. if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) } - knownLayers := map[diffID]*layerInfo{} + knownLayers := map[digest.Digest]*layerInfo{} unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes. for i, diffID := range parsedConfig.RootFS.DiffIDs { if _, ok := knownLayers[diffID]; ok { @@ -249,28 +249,34 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *image // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. -func (s *Source) GetManifest() ([]byte, string, error) { +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *Source) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) + } if s.generatedManifest == nil { if err := s.ensureCachedDataIsPresent(); err != nil { return nil, "", err } - m := schema2Manifest{ + m := manifest.Schema2{ SchemaVersion: 2, MediaType: manifest.DockerV2Schema2MediaType, - Config: distributionDescriptor{ + ConfigDescriptor: manifest.Schema2Descriptor{ MediaType: manifest.DockerV2Schema2ConfigMediaType, Size: int64(len(s.configBytes)), Digest: s.configDigest, }, - Layers: []distributionDescriptor{}, + LayersDescriptors: []manifest.Schema2Descriptor{}, } for _, diffID := range s.orderedDiffIDList { li, ok := s.knownLayers[diffID] if !ok { return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID) } - m.Layers = append(m.Layers, distributionDescriptor{ - Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball + m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{ + Digest: diffID, // diffID is a digest of the uncompressed tarball MediaType: manifest.DockerV2Schema2LayerMediaType, Size: li.size, }) @@ -284,13 +290,6 @@ func (s *Source) GetManifest() ([]byte, string, error) { return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil } -// GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest -// out of a manifest list. -func (s *Source) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - // How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType. - return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) -} - type readCloseWrapper struct { io.Reader closeFunc func() error @@ -313,7 +312,7 @@ func (s *Source) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil } - if li, ok := s.knownLayers[diffID(info.Digest)]; ok { // diffID is a digest of the uncompressed tarball, + if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, stream, err := s.openTarComponent(li.path) if err != nil { return nil, 0, err @@ -355,6 +354,13 @@ func (s *Source) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { } // GetSignatures returns the image's signatures. It may use a remote (= slow) service. -func (s *Source) GetSignatures(ctx context.Context) ([][]byte, error) { +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) + } return [][]byte{}, nil } diff --git a/vendor/github.com/containers/image/docker/tarfile/types.go b/vendor/github.com/containers/image/docker/tarfile/types.go index f16cc8c6..2aa56754 100644 --- a/vendor/github.com/containers/image/docker/tarfile/types.go +++ b/vendor/github.com/containers/image/docker/tarfile/types.go @@ -1,6 +1,9 @@ package tarfile -import "github.com/opencontainers/go-digest" +import ( + "github.com/containers/image/manifest" + "github.com/opencontainers/go-digest" +) // Various data structures. @@ -18,37 +21,8 @@ type ManifestItem struct { Config string RepoTags []string Layers []string - Parent imageID `json:",omitempty"` - LayerSources map[diffID]distributionDescriptor `json:",omitempty"` + Parent imageID `json:",omitempty"` + LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"` } type imageID string -type diffID digest.Digest - -// Based on github.com/docker/distribution/blobs.go -type distributionDescriptor struct { - MediaType string `json:"mediaType,omitempty"` - Size int64 `json:"size,omitempty"` - Digest digest.Digest `json:"digest,omitempty"` - URLs []string `json:"urls,omitempty"` -} - -// Based on github.com/docker/distribution/manifest/schema2/manifest.go -// FIXME: We are repeating this all over the place; make a public copy? -type schema2Manifest struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType,omitempty"` - Config distributionDescriptor `json:"config"` - Layers []distributionDescriptor `json:"layers"` -} - -// Based on github.com/docker/docker/image/image.go -// MOST CONTENT OMITTED AS UNNECESSARY -type image struct { - RootFS *rootFS `json:"rootfs,omitempty"` -} - -type rootFS struct { - Type string `json:"type"` - DiffIDs []diffID `json:"diff_ids,omitempty"` -} diff --git a/vendor/github.com/containers/image/image/docker_list.go b/vendor/github.com/containers/image/image/docker_list.go index c79adacc..412261dd 100644 --- a/vendor/github.com/containers/image/image/docker_list.go +++ b/vendor/github.com/containers/image/image/docker_list.go @@ -2,6 +2,7 @@ package image import ( "encoding/json" + "fmt" "runtime" "github.com/containers/image/manifest" @@ -21,7 +22,7 @@ type platformSpec struct { // A manifestDescriptor references a platform-specific manifest. type manifestDescriptor struct { - descriptor + manifest.Schema2Descriptor Platform platformSpec `json:"platform"` } @@ -31,22 +32,36 @@ type manifestList struct { Manifests []manifestDescriptor `json:"manifests"` } -func manifestSchema2FromManifestList(src types.ImageSource, manblob []byte) (genericManifest, error) { - list := manifestList{} - if err := json.Unmarshal(manblob, &list); err != nil { - return nil, err +// chooseDigestFromManifestList parses blob as a schema2 manifest list, +// and returns the digest of the image appropriate for the current environment. +func chooseDigestFromManifestList(ctx *types.SystemContext, blob []byte) (digest.Digest, error) { + wantedArch := runtime.GOARCH + if ctx != nil && ctx.ArchitectureChoice != "" { + wantedArch = ctx.ArchitectureChoice + } + wantedOS := runtime.GOOS + if ctx != nil && ctx.OSChoice != "" { + wantedOS = ctx.OSChoice + } + + list := manifestList{} + if err := json.Unmarshal(blob, &list); err != nil { + return "", err } - var targetManifestDigest digest.Digest for _, d := range list.Manifests { - if d.Platform.Architecture == runtime.GOARCH && d.Platform.OS == runtime.GOOS { - targetManifestDigest = d.Digest - break + if d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS { + return d.Digest, nil } } - if targetManifestDigest == "" { - return nil, errors.New("no supported platform found in manifest list") + return "", fmt.Errorf("no image found in manifest list for architecture %s, OS %s", wantedArch, wantedOS) +} + +func manifestSchema2FromManifestList(ctx *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { + targetManifestDigest, err := chooseDigestFromManifestList(ctx, manblob) + if err != nil { + return nil, err } - manblob, mt, err := src.GetTargetManifest(targetManifestDigest) + manblob, mt, err := src.GetManifest(&targetManifestDigest) if err != nil { return nil, err } @@ -59,5 +74,20 @@ func manifestSchema2FromManifestList(src types.ImageSource, manblob []byte) (gen return nil, errors.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest) } - return manifestInstanceFromBlob(src, manblob, mt) + return manifestInstanceFromBlob(ctx, src, manblob, mt) +} + +// ChooseManifestInstanceFromManifestList returns a digest of a manifest appropriate +// for the current system from the manifest available from src. +func ChooseManifestInstanceFromManifestList(ctx *types.SystemContext, src types.UnparsedImage) (digest.Digest, error) { + // For now this only handles manifest.DockerV2ListMediaType; we can generalize it later, + // probably along with manifest list editing. + blob, mt, err := src.Manifest() + if err != nil { + return "", err + } + if mt != manifest.DockerV2ListMediaType { + return "", fmt.Errorf("Internal error: Trying to select an image from a non-manifest-list manifest type %s", mt) + } + return chooseDigestFromManifestList(ctx, blob) } diff --git a/vendor/github.com/containers/image/image/docker_schema1.go b/vendor/github.com/containers/image/image/docker_schema1.go index 4152b3cd..c6a6989d 100644 --- a/vendor/github.com/containers/image/image/docker_schema1.go +++ b/vendor/github.com/containers/image/image/docker_schema1.go @@ -2,9 +2,6 @@ package image import ( "encoding/json" - "regexp" - "strings" - "time" "github.com/containers/image/docker/reference" "github.com/containers/image/manifest" @@ -14,87 +11,25 @@ import ( "github.com/pkg/errors" ) -var ( - validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) -) - -type fsLayersSchema1 struct { - BlobSum digest.Digest `json:"blobSum"` -} - -type historySchema1 struct { - V1Compatibility string `json:"v1Compatibility"` -} - -// historySchema1 is a string containing this. It is similar to v1Image but not the same, in particular note the ThrowAway field. -type v1Compatibility struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig struct { - Cmd []string - } `json:"container_config,omitempty"` - Author string `json:"author,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` -} - type manifestSchema1 struct { - Name string `json:"name"` - Tag string `json:"tag"` - Architecture string `json:"architecture"` - FSLayers []fsLayersSchema1 `json:"fsLayers"` - History []historySchema1 `json:"history"` - SchemaVersion int `json:"schemaVersion"` + m *manifest.Schema1 } -func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) { - mschema1 := &manifestSchema1{} - if err := json.Unmarshal(manifest, mschema1); err != nil { - return nil, err - } - if mschema1.SchemaVersion != 1 { - return nil, errors.Errorf("unsupported schema version %d", mschema1.SchemaVersion) - } - if len(mschema1.FSLayers) != len(mschema1.History) { - return nil, errors.New("length of history not equal to number of layers") - } - if len(mschema1.FSLayers) == 0 { - return nil, errors.New("no FSLayers in manifest") - } - - if err := fixManifestLayers(mschema1); err != nil { - return nil, err - } - return mschema1, nil -} - -// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. -func manifestSchema1FromComponents(ref reference.Named, fsLayers []fsLayersSchema1, history []historySchema1, architecture string) genericManifest { - var name, tag string - if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. - name = reference.Path(ref) - if tagged, ok := ref.(reference.NamedTagged); ok { - tag = tagged.Tag() - } - } - return &manifestSchema1{ - Name: name, - Tag: tag, - Architecture: architecture, - FSLayers: fsLayers, - History: history, - SchemaVersion: 1, - } -} - -func (m *manifestSchema1) serialize() ([]byte, error) { - // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. - unsigned, err := json.Marshal(*m) +func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema1FromManifest(manifestBlob) if err != nil { return nil, err } - return manifest.AddDummyV2S1Signature(unsigned) + return &manifestSchema1{m: m}, nil +} + +// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. +func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) genericManifest { + return &manifestSchema1{m: manifest.Schema1FromComponents(ref, fsLayers, history, architecture)} +} + +func (m *manifestSchema1) serialize() ([]byte, error) { + return m.m.Serialize() } func (m *manifestSchema1) manifestMIMEType() string { @@ -104,7 +39,7 @@ func (m *manifestSchema1) manifestMIMEType() string { // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. func (m *manifestSchema1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{} + return m.m.ConfigInfo() } // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. @@ -128,11 +63,7 @@ func (m *manifestSchema1) OCIConfig() (*imgspecv1.Image, error) { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestSchema1) LayerInfos() []types.BlobInfo { - layers := make([]types.BlobInfo, len(m.FSLayers)) - for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) - layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1} - } - return layers + return m.m.LayerInfos() } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -153,22 +84,11 @@ func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) } else { tag = "" } - return m.Name != name || m.Tag != tag + return m.m.Name != name || m.m.Tag != tag } func (m *manifestSchema1) imageInspectInfo() (*types.ImageInspectInfo, error) { - v1 := &v1Image{} - if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), v1); err != nil { - return nil, err - } - return &types.ImageInspectInfo{ - Tag: m.Tag, - DockerVersion: v1.DockerVersion, - Created: v1.Created, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Os: v1.OS, - }, nil + return m.m.Inspect(nil) } // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. @@ -181,25 +101,18 @@ func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := *m + copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} if options.LayerInfos != nil { - // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well. - if len(copy.FSLayers) != len(options.LayerInfos) { - return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos)) - } - for i, info := range options.LayerInfos { - // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest, - // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. - // So, we don't bother recomputing the IDs in m.History.V1Compatibility. - copy.FSLayers[(len(options.LayerInfos)-1)-i].BlobSum = info.Digest + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err } } if options.EmbeddedDockerReference != nil { - copy.Name = reference.Path(options.EmbeddedDockerReference) + copy.m.Name = reference.Path(options.EmbeddedDockerReference) if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { - copy.Tag = tagged.Tag() + copy.m.Tag = tagged.Tag() } else { - copy.Tag = "" + copy.m.Tag = "" } } @@ -209,7 +122,21 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, // handle conversions between them by doing nothing. case manifest.DockerV2Schema2MediaType: - return copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) + m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) + if err != nil { + return nil, err + } + return memoryImageFromManifest(m2), nil + case imgspecv1.MediaTypeImageManifest: + // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest + m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) + if err != nil { + return nil, err + } + return m2.UpdatedImage(types.ManifestUpdateOptions{ + ManifestMIMEType: imgspecv1.MediaTypeImageManifest, + InformationOnly: options.InformationOnly, + }) default: return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType) } @@ -217,102 +144,32 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ return memoryImageFromManifest(©), nil } -// fixManifestLayers, after validating the supplied manifest -// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History), -// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates, -// both from manifest.History and manifest.FSLayers). -// Note that even after this succeeds, manifest.FSLayers may contain duplicate entries -// (for Dockerfile operations which change the configuration but not the filesystem). -func fixManifestLayers(manifest *manifestSchema1) error { - type imageV1 struct { - ID string - Parent string - } - // Per the specification, we can assume that len(manifest.FSLayers) == len(manifest.History) - imgs := make([]*imageV1, len(manifest.FSLayers)) - for i := range manifest.FSLayers { - img := &imageV1{} - - if err := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), img); err != nil { - return err - } - - imgs[i] = img - if err := validateV1ID(img.ID); err != nil { - return err - } - } - if imgs[len(imgs)-1].Parent != "" { - return errors.New("Invalid parent ID in the base layer of the image") - } - // check general duplicates to error instead of a deadlock - idmap := make(map[string]struct{}) - var lastID string - for _, img := range imgs { - // skip IDs that appear after each other, we handle those later - if _, exists := idmap[img.ID]; img.ID != lastID && exists { - return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) - } - lastID = img.ID - idmap[lastID] = struct{}{} - } - // backwards loop so that we keep the remaining indexes after removing items - for i := len(imgs) - 2; i >= 0; i-- { - if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue - manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...) - manifest.History = append(manifest.History[:i], manifest.History[i+1:]...) - } else if imgs[i].Parent != imgs[i+1].ID { - return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) - } - } - return nil -} - -func validateV1ID(id string) error { - if ok := validHex.MatchString(id); !ok { - return errors.Errorf("image ID %q is invalid", id) - } - return nil -} - // Based on github.com/docker/docker/distribution/pull_v2.go -func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (types.Image, error) { - if len(m.History) == 0 { +func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) { + if len(m.m.History) == 0 { // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) } - if len(m.History) != len(m.FSLayers) { - return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers)) + if len(m.m.History) != len(m.m.FSLayers) { + return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.History), len(m.m.FSLayers)) } - if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.FSLayers) { - return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers)) + if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) } - if layerDiffIDs != nil && len(layerDiffIDs) != len(m.FSLayers) { - return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers)) + if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) } - rootFS := rootFS{ - Type: "layers", - DiffIDs: []digest.Digest{}, - BaseLayer: "", - } - var layers []descriptor - history := make([]imageHistory, len(m.History)) - for v1Index := len(m.History) - 1; v1Index >= 0; v1Index-- { - v2Index := (len(m.History) - 1) - v1Index + // Build a list of the diffIDs for the non-empty layers. + diffIDs := []digest.Digest{} + var layers []manifest.Schema2Descriptor + for v1Index := len(m.m.History) - 1; v1Index >= 0; v1Index-- { + v2Index := (len(m.m.History) - 1) - v1Index - var v1compat v1Compatibility - if err := json.Unmarshal([]byte(m.History[v1Index].V1Compatibility), &v1compat); err != nil { + var v1compat manifest.Schema1V1Compatibility + if err := json.Unmarshal([]byte(m.m.History[v1Index].V1Compatibility), &v1compat); err != nil { return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index) } - history[v2Index] = imageHistory{ - Created: v1compat.Created, - Author: v1compat.Author, - CreatedBy: strings.Join(v1compat.ContainerConfig.Cmd, " "), - Comment: v1compat.Comment, - EmptyLayer: v1compat.ThrowAway, - } - if !v1compat.ThrowAway { var size int64 if uploadedLayerInfos != nil { @@ -322,54 +179,23 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl if layerDiffIDs != nil { d = layerDiffIDs[v2Index] } - layers = append(layers, descriptor{ + layers = append(layers, manifest.Schema2Descriptor{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Size: size, - Digest: m.FSLayers[v1Index].BlobSum, + Digest: m.m.FSLayers[v1Index].BlobSum, }) - rootFS.DiffIDs = append(rootFS.DiffIDs, d) + diffIDs = append(diffIDs, d) } } - configJSON, err := configJSONFromV1Config([]byte(m.History[0].V1Compatibility), rootFS, history) + configJSON, err := m.m.ToSchema2(diffIDs) if err != nil { return nil, err } - configDescriptor := descriptor{ + configDescriptor := manifest.Schema2Descriptor{ MediaType: "application/vnd.docker.container.image.v1+json", Size: int64(len(configJSON)), Digest: digest.FromBytes(configJSON), } - m2 := manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers) - return memoryImageFromManifest(m2), nil -} - -func configJSONFromV1Config(v1ConfigJSON []byte, rootFS rootFS, history []imageHistory) ([]byte, error) { - // github.com/docker/docker/image/v1/imagev1.go:MakeConfigFromV1Config unmarshals and re-marshals the input if docker_version is < 1.8.3 to remove blank fields; - // we don't do that here. FIXME? Should we? AFAICT it would only affect the digest value of the schema2 manifest, and we don't particularly need that to be - // a consistently reproducible value. - - // Preserve everything we don't specifically know about. - // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) - rawContents := map[string]*json.RawMessage{} - if err := json.Unmarshal(v1ConfigJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! - return nil, err - } - - delete(rawContents, "id") - delete(rawContents, "parent") - delete(rawContents, "Size") - delete(rawContents, "parent_id") - delete(rawContents, "layer_id") - delete(rawContents, "throwaway") - - updates := map[string]interface{}{"rootfs": rootFS, "history": history} - for field, value := range updates { - encoded, err := json.Marshal(value) - if err != nil { - return nil, err - } - rawContents[field] = (*json.RawMessage)(&encoded) - } - return json.Marshal(rawContents) + return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil } diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go index 8cc3c495..b43bc17c 100644 --- a/vendor/github.com/containers/image/image/docker_schema2.go +++ b/vendor/github.com/containers/image/image/docker_schema2.go @@ -29,54 +29,44 @@ var gzippedEmptyLayer = []byte{ // gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") -type descriptor struct { - MediaType string `json:"mediaType"` - Size int64 `json:"size"` - Digest digest.Digest `json:"digest"` - URLs []string `json:"urls,omitempty"` -} - type manifestSchema2 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of ConfigDescriptor. - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - ConfigDescriptor descriptor `json:"config"` - LayersDescriptors []descriptor `json:"layers"` + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of ConfigDescriptor. + m *manifest.Schema2 } -func manifestSchema2FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) { - v2s2 := manifestSchema2{src: src} - if err := json.Unmarshal(manifest, &v2s2); err != nil { +func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema2FromManifest(manifestBlob) + if err != nil { return nil, err } - return &v2s2, nil + return &manifestSchema2{ + src: src, + m: m, + }, nil } // manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: -func manifestSchema2FromComponents(config descriptor, src types.ImageSource, configBlob []byte, layers []descriptor) genericManifest { +func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest { return &manifestSchema2{ - src: src, - configBlob: configBlob, - SchemaVersion: 2, - MediaType: manifest.DockerV2Schema2MediaType, - ConfigDescriptor: config, - LayersDescriptors: layers, + src: src, + configBlob: configBlob, + m: manifest.Schema2FromComponents(config, layers), } } func (m *manifestSchema2) serialize() ([]byte, error) { - return json.Marshal(*m) + return m.m.Serialize() } func (m *manifestSchema2) manifestMIMEType() string { - return m.MediaType + return m.m.MediaType } // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. func (m *manifestSchema2) ConfigInfo() types.BlobInfo { - return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size} + return m.m.ConfigInfo() } // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about @@ -105,9 +95,9 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) { return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") } stream, _, err := m.src.GetBlob(types.BlobInfo{ - Digest: m.ConfigDescriptor.Digest, - Size: m.ConfigDescriptor.Size, - URLs: m.ConfigDescriptor.URLs, + Digest: m.m.ConfigDescriptor.Digest, + Size: m.m.ConfigDescriptor.Size, + URLs: m.m.ConfigDescriptor.URLs, }) if err != nil { return nil, err @@ -118,8 +108,8 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) { return nil, err } computedDigest := digest.FromBytes(blob) - if computedDigest != m.ConfigDescriptor.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest) + if computedDigest != m.m.ConfigDescriptor.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) } m.configBlob = blob } @@ -130,15 +120,7 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestSchema2) LayerInfos() []types.BlobInfo { - blobs := []types.BlobInfo{} - for _, layer := range m.LayersDescriptors { - blobs = append(blobs, types.BlobInfo{ - Digest: layer.Digest, - Size: layer.Size, - URLs: layer.URLs, - }) - } - return blobs + return m.m.LayerInfos() } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -149,21 +131,18 @@ func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) } func (m *manifestSchema2) imageInspectInfo() (*types.ImageInspectInfo, error) { - config, err := m.ConfigBlob() - if err != nil { - return nil, err + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob() + if err != nil { + return nil, err + } + return config, nil } - v1 := &v1Image{} - if err := json.Unmarshal(config, v1); err != nil { - return nil, err - } - return &types.ImageInspectInfo{ - DockerVersion: v1.DockerVersion, - Created: v1.Created, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Os: v1.OS, - }, nil + return m.m.Inspect(getter) } // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. @@ -176,17 +155,14 @@ func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := *m // NOTE: This is not a deep copy, it still shares slices etc. + copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.Schema2Clone(m.m), + } if options.LayerInfos != nil { - if len(copy.LayersDescriptors) != len(options.LayerInfos) { - return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos)) - } - copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos)) - for i, info := range options.LayerInfos { - copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType - copy.LayersDescriptors[i].Digest = info.Digest - copy.LayersDescriptors[i].Size = info.Size - copy.LayersDescriptors[i].URLs = info.URLs + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err } } // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. @@ -204,6 +180,15 @@ func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (typ return memoryImageFromManifest(©), nil } +func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { + return imgspecv1.Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) { configOCI, err := m.OCIConfig() if err != nil { @@ -214,18 +199,16 @@ func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) { return nil, err } - config := descriptorOCI1{ - descriptor: descriptor{ - MediaType: imgspecv1.MediaTypeImageConfig, - Size: int64(len(configOCIBytes)), - Digest: digest.FromBytes(configOCIBytes), - }, + config := imgspecv1.Descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + Size: int64(len(configOCIBytes)), + Digest: digest.FromBytes(configOCIBytes), } - layers := make([]descriptorOCI1, len(m.LayersDescriptors)) + layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) for idx := range layers { - layers[idx] = descriptorOCI1{descriptor: m.LayersDescriptors[idx]} - if m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType { + layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) + if m.m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType { layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable } else { // we assume layers are gzip'ed because docker v2s2 only deals with @@ -244,14 +227,14 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) if err != nil { return nil, err } - imageConfig := &image{} + imageConfig := &manifest.Schema2Image{} if err := json.Unmarshal(configBytes, imageConfig); err != nil { return nil, err } // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. - fsLayers := make([]fsLayersSchema1, len(imageConfig.History)) - history := make([]historySchema1, len(imageConfig.History)) + fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) + history := make([]manifest.Schema1History, len(imageConfig.History)) nonemptyLayerIndex := 0 var parentV1ID string // Set in the loop v1ID := "" @@ -279,10 +262,10 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) } blobDigest = gzippedEmptyLayerDigest } else { - if nonemptyLayerIndex >= len(m.LayersDescriptors) { - return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors)) + if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { + return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) } - blobDigest = m.LayersDescriptors[nonemptyLayerIndex].Digest + blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest nonemptyLayerIndex++ } @@ -293,7 +276,7 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) } v1ID = v - fakeImage := v1Compatibility{ + fakeImage := manifest.Schema1V1Compatibility{ ID: v1ID, Parent: parentV1ID, Comment: historyEntry.Comment, @@ -307,8 +290,8 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) } - fsLayers[v1Index] = fsLayersSchema1{BlobSum: blobDigest} - history[v1Index] = historySchema1{V1Compatibility: string(v1CompatibilityBytes)} + fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} + history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} // Note that parentV1ID of the top layer is preserved when exiting this loop } diff --git a/vendor/github.com/containers/image/image/manifest.go b/vendor/github.com/containers/image/image/manifest.go index 75c9e711..cdd4233f 100644 --- a/vendor/github.com/containers/image/image/manifest.go +++ b/vendor/github.com/containers/image/image/manifest.go @@ -1,57 +1,14 @@ package image import ( - "time" + "fmt" "github.com/containers/image/docker/reference" "github.com/containers/image/manifest" - "github.com/containers/image/pkg/strslice" "github.com/containers/image/types" - "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) -type config struct { - Cmd strslice.StrSlice - Labels map[string]string -} - -type v1Image struct { - ID string `json:"id,omitempty"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig *config `json:"container_config,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - Author string `json:"author,omitempty"` - // Config is the configuration of the container received from the client - Config *config `json:"config,omitempty"` - // Architecture is the hardware that the image is build and runs on - Architecture string `json:"architecture,omitempty"` - // OS is the operating system used to build and run the image - OS string `json:"os,omitempty"` -} - -type image struct { - v1Image - History []imageHistory `json:"history,omitempty"` - RootFS *rootFS `json:"rootfs,omitempty"` -} - -type imageHistory struct { - Created time.Time `json:"created"` - Author string `json:"author,omitempty"` - CreatedBy string `json:"created_by,omitempty"` - Comment string `json:"comment,omitempty"` - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -type rootFS struct { - Type string `json:"type"` - DiffIDs []digest.Digest `json:"diff_ids,omitempty"` - BaseLayer string `json:"base_layer,omitempty"` -} - // genericManifest is an interface for parsing, modifying image manifests and related data. // Note that the public methods are intended to be a subset of types.Image // so that embedding a genericManifest into structs works. @@ -87,43 +44,24 @@ type genericManifest interface { UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) } -func manifestInstanceFromBlob(src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { - switch mt { - // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . - // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might - // need to happen within the ImageSource. - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, "application/json": +// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. +// If manblob is a manifest list, it implicitly chooses an appropriate image from the list. +func manifestInstanceFromBlob(ctx *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { + switch manifest.NormalizedMIMEType(mt) { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: return manifestSchema1FromManifest(manblob) case imgspecv1.MediaTypeImageManifest: return manifestOCI1FromManifest(src, manblob) case manifest.DockerV2Schema2MediaType: return manifestSchema2FromManifest(src, manblob) case manifest.DockerV2ListMediaType: - return manifestSchema2FromManifestList(src, manblob) - default: - // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time - // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 - // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 - // - // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. - // This makes no real sense, but it happens - // because requests for manifests are - // redirected to a content distribution - // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 - return manifestSchema1FromManifest(manblob) + return manifestSchema2FromManifestList(ctx, src, manblob) + default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) } } // inspectManifest is an implementation of types.Image.Inspect func inspectManifest(m genericManifest) (*types.ImageInspectInfo, error) { - info, err := m.imageInspectInfo() - if err != nil { - return nil, err - } - layers := m.LayerInfos() - info.Layers = make([]string, len(layers)) - for i, layer := range layers { - info.Layers[i] = layer.Digest.String() - } - return info, nil + return m.imageInspectInfo() } diff --git a/vendor/github.com/containers/image/image/memory.go b/vendor/github.com/containers/image/image/memory.go index 62995f61..4639c49a 100644 --- a/vendor/github.com/containers/image/image/memory.go +++ b/vendor/github.com/containers/image/image/memory.go @@ -33,11 +33,6 @@ func (i *memoryImage) Reference() types.ImageReference { return nil } -// Close removes resources associated with an initialized UnparsedImage, if any. -func (i *memoryImage) Close() error { - return nil -} - // Size returns the size of the image as stored, if known, or -1 if not. func (i *memoryImage) Size() (int64, error) { return -1, nil @@ -67,7 +62,9 @@ func (i *memoryImage) Inspect() (*types.ImageInspectInfo, error) { return inspectManifest(i.genericManifest) } -// IsMultiImage returns true if the image's manifest is a list of images, false otherwise. -func (i *memoryImage) IsMultiImage() bool { - return false +// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (i *memoryImage) LayerInfosForCopy() []types.BlobInfo { + return nil } diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go index 7ea5d2bb..3c03e49b 100644 --- a/vendor/github.com/containers/image/image/oci.go +++ b/vendor/github.com/containers/image/image/oci.go @@ -12,41 +12,34 @@ import ( "github.com/pkg/errors" ) -type descriptorOCI1 struct { - descriptor - Annotations map[string]string `json:"annotations,omitempty"` -} - type manifestOCI1 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of ConfigDescriptor. - SchemaVersion int `json:"schemaVersion"` - ConfigDescriptor descriptorOCI1 `json:"config"` - LayersDescriptors []descriptorOCI1 `json:"layers"` - Annotations map[string]string `json:"annotations,omitempty"` + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of m.Config. + m *manifest.OCI1 } -func manifestOCI1FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) { - oci := manifestOCI1{src: src} - if err := json.Unmarshal(manifest, &oci); err != nil { +func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.OCI1FromManifest(manifestBlob) + if err != nil { return nil, err } - return &oci, nil + return &manifestOCI1{ + src: src, + m: m, + }, nil } // manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: -func manifestOCI1FromComponents(config descriptorOCI1, src types.ImageSource, configBlob []byte, layers []descriptorOCI1) genericManifest { +func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { return &manifestOCI1{ - src: src, - configBlob: configBlob, - SchemaVersion: 2, - ConfigDescriptor: config, - LayersDescriptors: layers, + src: src, + configBlob: configBlob, + m: manifest.OCI1FromComponents(config, layers), } } func (m *manifestOCI1) serialize() ([]byte, error) { - return json.Marshal(*m) + return m.m.Serialize() } func (m *manifestOCI1) manifestMIMEType() string { @@ -56,7 +49,7 @@ func (m *manifestOCI1) manifestMIMEType() string { // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. func (m *manifestOCI1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size, Annotations: m.ConfigDescriptor.Annotations} + return m.m.ConfigInfo() } // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. @@ -67,9 +60,9 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) { return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") } stream, _, err := m.src.GetBlob(types.BlobInfo{ - Digest: m.ConfigDescriptor.Digest, - Size: m.ConfigDescriptor.Size, - URLs: m.ConfigDescriptor.URLs, + Digest: m.m.Config.Digest, + Size: m.m.Config.Size, + URLs: m.m.Config.URLs, }) if err != nil { return nil, err @@ -80,8 +73,8 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) { return nil, err } computedDigest := digest.FromBytes(blob) - if computedDigest != m.ConfigDescriptor.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest) + if computedDigest != m.m.Config.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) } m.configBlob = blob } @@ -107,11 +100,7 @@ func (m *manifestOCI1) OCIConfig() (*imgspecv1.Image, error) { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestOCI1) LayerInfos() []types.BlobInfo { - blobs := []types.BlobInfo{} - for _, layer := range m.LayersDescriptors { - blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations}) - } - return blobs + return m.m.LayerInfos() } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -122,21 +111,18 @@ func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) boo } func (m *manifestOCI1) imageInspectInfo() (*types.ImageInspectInfo, error) { - config, err := m.ConfigBlob() - if err != nil { - return nil, err + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob() + if err != nil { + return nil, err + } + return config, nil } - v1 := &v1Image{} - if err := json.Unmarshal(config, v1); err != nil { - return nil, err - } - return &types.ImageInspectInfo{ - DockerVersion: v1.DockerVersion, - Created: v1.Created, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Os: v1.OS, - }, nil + return m.m.Inspect(getter) } // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. @@ -149,17 +135,14 @@ func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdat // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := *m // NOTE: This is not a deep copy, it still shares slices etc. + copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.OCI1Clone(m.m), + } if options.LayerInfos != nil { - if len(copy.LayersDescriptors) != len(options.LayerInfos) { - return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos)) - } - copy.LayersDescriptors = make([]descriptorOCI1, len(options.LayerInfos)) - for i, info := range options.LayerInfos { - copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType - copy.LayersDescriptors[i].Digest = info.Digest - copy.LayersDescriptors[i].Size = info.Size - copy.LayersDescriptors[i].Annotations = info.Annotations + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err } } // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. @@ -175,17 +158,26 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types. return memoryImageFromManifest(©), nil } +func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { + return manifest.Schema2Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { // Create a copy of the descriptor. - config := m.ConfigDescriptor.descriptor + config := schema2DescriptorFromOCI1Descriptor(m.m.Config) // The only difference between OCI and DockerSchema2 is the mediatypes. The // media type of the manifest is handled by manifestSchema2FromComponents. config.MediaType = manifest.DockerV2Schema2ConfigMediaType - layers := make([]descriptor, len(m.LayersDescriptors)) + layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) for idx := range layers { - layers[idx] = m.LayersDescriptors[idx].descriptor + layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType } diff --git a/vendor/github.com/containers/image/image/sourced.go b/vendor/github.com/containers/image/image/sourced.go index ef35b3c3..3477f341 100644 --- a/vendor/github.com/containers/image/image/sourced.go +++ b/vendor/github.com/containers/image/image/sourced.go @@ -4,12 +4,22 @@ package image import ( - "github.com/containers/image/manifest" "github.com/containers/image/types" ) -// FromSource returns a types.Image implementation for source. -// The caller must call .Close() on the returned Image. +// imageCloser implements types.ImageCloser, perhaps allowing simple users +// to use a single object without having keep a reference to a types.ImageSource +// only to call types.ImageSource.Close(). +type imageCloser struct { + types.Image + src types.ImageSource +} + +// FromSource returns a types.ImageCloser implementation for the default instance of source. +// If source is a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate image instance. +// +// The caller must call .Close() on the returned ImageCloser. // // FromSource “takes ownership” of the input ImageSource and will call src.Close() // when the image is closed. (This does not prevent callers from using both the @@ -18,8 +28,19 @@ import ( // // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. -func FromSource(src types.ImageSource) (types.Image, error) { - return FromUnparsedImage(UnparsedFromSource(src)) +func FromSource(ctx *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { + img, err := FromUnparsedImage(ctx, UnparsedInstance(src, nil)) + if err != nil { + return nil, err + } + return &imageCloser{ + Image: img, + src: src, + }, nil +} + +func (ic *imageCloser) Close() error { + return ic.src.Close() } // sourcedImage is a general set of utilities for working with container images, @@ -38,27 +59,22 @@ type sourcedImage struct { } // FromUnparsedImage returns a types.Image implementation for unparsed. -// The caller must call .Close() on the returned Image. +// If unparsed represents a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate single image. // -// FromSource “takes ownership” of the input UnparsedImage and will call uparsed.Close() -// when the image is closed. (This does not prevent callers from using both the -// UnparsedImage and ImageSource objects simultaneously, but it means that they only need to -// keep a reference to the Image.) -func FromUnparsedImage(unparsed *UnparsedImage) (types.Image, error) { +// The Image must not be used after the underlying ImageSource is Close()d. +func FromUnparsedImage(ctx *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) { // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, // this is the only UnparsedImage implementation around, anyway. - // Also, we do not explicitly implement types.Image.Close; we let the implementation fall through to - // unparsed.Close. - // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). manifestBlob, manifestMIMEType, err := unparsed.Manifest() if err != nil { return nil, err } - parsedManifest, err := manifestInstanceFromBlob(unparsed.src, manifestBlob, manifestMIMEType) + parsedManifest, err := manifestInstanceFromBlob(ctx, unparsed.src, manifestBlob, manifestMIMEType) if err != nil { return nil, err } @@ -85,6 +101,6 @@ func (i *sourcedImage) Inspect() (*types.ImageInspectInfo, error) { return inspectManifest(i.genericManifest) } -func (i *sourcedImage) IsMultiImage() bool { - return i.manifestMIMEType == manifest.DockerV2ListMediaType +func (i *sourcedImage) LayerInfosForCopy() []types.BlobInfo { + return i.UnparsedImage.LayerInfosForCopy() } diff --git a/vendor/github.com/containers/image/image/unparsed.go b/vendor/github.com/containers/image/image/unparsed.go index 483cfd04..aff06d8a 100644 --- a/vendor/github.com/containers/image/image/unparsed.go +++ b/vendor/github.com/containers/image/image/unparsed.go @@ -11,8 +11,10 @@ import ( ) // UnparsedImage implements types.UnparsedImage . +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. type UnparsedImage struct { src types.ImageSource + instanceDigest *digest.Digest cachedManifest []byte // A private cache for Manifest(); nil if not yet known. // A private cache for Manifest(), may be the empty string if guessing failed. // Valid iff cachedManifest is not nil. @@ -20,49 +22,41 @@ type UnparsedImage struct { cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known. } -// UnparsedFromSource returns a types.UnparsedImage implementation for source. -// The caller must call .Close() on the returned UnparsedImage. +// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). // -// UnparsedFromSource “takes ownership” of the input ImageSource and will call src.Close() -// when the image is closed. (This does not prevent callers from using both the -// UnparsedImage and ImageSource objects simultaneously, but it means that they only need to -// keep a reference to the UnparsedImage.) -func UnparsedFromSource(src types.ImageSource) *UnparsedImage { - return &UnparsedImage{src: src} +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { + return &UnparsedImage{ + src: src, + instanceDigest: instanceDigest, + } } // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. func (i *UnparsedImage) Reference() types.ImageReference { + // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. return i.src.Reference() } -// Close removes resources associated with an initialized UnparsedImage, if any. -func (i *UnparsedImage) Close() error { - return i.src.Close() -} - // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. func (i *UnparsedImage) Manifest() ([]byte, string, error) { if i.cachedManifest == nil { - m, mt, err := i.src.GetManifest() + m, mt, err := i.src.GetManifest(i.instanceDigest) if err != nil { return nil, "", err } // ImageSource.GetManifest does not do digest verification, but we do; // this immediately protects also any user of types.Image. - ref := i.Reference().DockerReference() - if ref != nil { - if canonical, ok := ref.(reference.Canonical); ok { - digest := digest.Digest(canonical.Digest()) - matches, err := manifest.MatchesDigest(m, digest) - if err != nil { - return nil, "", errors.Wrap(err, "Error computing manifest digest") - } - if !matches { - return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) - } + if digest, haveDigest := i.expectedManifestDigest(); haveDigest { + matches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return nil, "", errors.Wrap(err, "Error computing manifest digest") + } + if !matches { + return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) } } @@ -72,10 +66,26 @@ func (i *UnparsedImage) Manifest() ([]byte, string, error) { return i.cachedManifest, i.cachedManifestMIMEType, nil } +// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. +// The bool return value seems redundant with digest != ""; it is used explicitly +// to refuse (unexpected) situations when the digest exists but is "". +func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { + if i.instanceDigest != nil { + return *i.instanceDigest, true + } + ref := i.Reference().DockerReference() + if ref != nil { + if canonical, ok := ref.(reference.Canonical); ok { + return canonical.Digest(), true + } + } + return "", false +} + // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { if i.cachedSignatures == nil { - sigs, err := i.src.GetSignatures(ctx) + sigs, err := i.src.GetSignatures(ctx, i.instanceDigest) if err != nil { return nil, err } @@ -83,3 +93,10 @@ func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { } return i.cachedSignatures, nil } + +// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (i *UnparsedImage) LayerInfosForCopy() []types.BlobInfo { + return i.src.LayerInfosForCopy() +} diff --git a/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go new file mode 100644 index 00000000..a28020ed --- /dev/null +++ b/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go @@ -0,0 +1,19 @@ +package tmpdir + +import ( + "os" + "runtime" +) + +// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files. +// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp +// which on systemd based systems could be the unsuitable tmpfs filesystem. +func TemporaryDirectoryForBigFiles() string { + var temporaryDirectoryForBigFiles string + if runtime.GOOS == "windows" { + temporaryDirectoryForBigFiles = os.TempDir() + } else { + temporaryDirectoryForBigFiles = "/var/tmp" + } + return temporaryDirectoryForBigFiles +} diff --git a/vendor/github.com/containers/image/manifest/docker_schema1.go b/vendor/github.com/containers/image/manifest/docker_schema1.go new file mode 100644 index 00000000..b1c1cfe9 --- /dev/null +++ b/vendor/github.com/containers/image/manifest/docker_schema1.go @@ -0,0 +1,310 @@ +package manifest + +import ( + "encoding/json" + "regexp" + "strings" + "time" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/types" + "github.com/docker/docker/api/types/versions" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. +type Schema1FSLayers struct { + BlobSum digest.Digest `json:"blobSum"` +} + +// Schema1History is an entry of the "history" array in docker/distribution schema 1. +type Schema1History struct { + V1Compatibility string `json:"v1Compatibility"` +} + +// Schema1 is a manifest in docker/distribution schema 1. +type Schema1 struct { + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []Schema1FSLayers `json:"fsLayers"` + History []Schema1History `json:"history"` + SchemaVersion int `json:"schemaVersion"` +} + +// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. +type Schema1V1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` +} + +// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. +// (NOTE: The instance is not necessary a literal representation of the original blob, +// layers with duplicate IDs are eliminated.) +func Schema1FromManifest(manifest []byte) (*Schema1, error) { + s1 := Schema1{} + if err := json.Unmarshal(manifest, &s1); err != nil { + return nil, err + } + if s1.SchemaVersion != 1 { + return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion) + } + if len(s1.FSLayers) != len(s1.History) { + return nil, errors.New("length of history not equal to number of layers") + } + if len(s1.FSLayers) == 0 { + return nil, errors.New("no FSLayers in manifest") + } + if err := s1.fixManifestLayers(); err != nil { + return nil, err + } + return &s1, nil +} + +// Schema1FromComponents creates an Schema1 manifest instance from the supplied data. +func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) *Schema1 { + var name, tag string + if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. + name = reference.Path(ref) + if tagged, ok := ref.(reference.NamedTagged); ok { + tag = tagged.Tag() + } + } + return &Schema1{ + Name: name, + Tag: tag, + Architecture: architecture, + FSLayers: fsLayers, + History: history, + SchemaVersion: 1, + } +} + +// Schema1Clone creates a copy of the supplied Schema1 manifest. +func Schema1Clone(src *Schema1) *Schema1 { + copy := *src + return © +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema1) ConfigInfo() types.BlobInfo { + return types.BlobInfo{} +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema1) LayerInfos() []types.BlobInfo { + layers := make([]types.BlobInfo, len(m.FSLayers)) + for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) + layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1} + } + return layers +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well. + if len(m.FSLayers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) + } + for i, info := range layerInfos { + // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest, + // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. + // So, we don't bother recomputing the IDs in m.History.V1Compatibility. + m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema1) Serialize() ([]byte, error) { + // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. + unsigned, err := json.Marshal(*m) + if err != nil { + return nil, err + } + return AddDummyV2S1Signature(unsigned) +} + +// fixManifestLayers, after validating the supplied manifest +// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History), +// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates, +// both from m.History and m.FSLayers). +// Note that even after this succeeds, m.FSLayers may contain duplicate entries +// (for Dockerfile operations which change the configuration but not the filesystem). +func (m *Schema1) fixManifestLayers() error { + type imageV1 struct { + ID string + Parent string + } + // Per the specification, we can assume that len(m.FSLayers) == len(m.History) + imgs := make([]*imageV1, len(m.FSLayers)) + for i := range m.FSLayers { + img := &imageV1{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := validateV1ID(img.ID); err != nil { + return err + } + } + if imgs[len(imgs)-1].Parent != "" { + return errors.New("Invalid parent ID in the base layer of the image") + } + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) + } + } + return nil +} + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +func validateV1ID(id string) error { + if ok := validHex.MatchString(id); !ok { + return errors.Errorf("image ID %q is invalid", id) + } + return nil +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + s1 := &Schema2V1Image{} + if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { + return nil, err + } + return &types.ImageInspectInfo{ + Tag: m.Tag, + Created: s1.Created, + DockerVersion: s1.DockerVersion, + Labels: make(map[string]string), + Architecture: s1.Architecture, + Os: s1.OS, + Layers: LayerInfosToStrings(m.LayerInfos()), + }, nil +} + +// ToSchema2 builds a schema2-style configuration blob using the supplied diffIDs. +func (m *Schema1) ToSchema2(diffIDs []digest.Digest) ([]byte, error) { + // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields + // that aren't directly comparable using info from the manifest. + if len(m.History) == 0 { + return nil, errors.New("image has no layers") + } + s2 := struct { + Schema2Image + ID string `json:"id,omitempty"` + Parent string `json:"parent,omitempty"` + ParentID string `json:"parent_id,omitempty"` + LayerID string `json:"layer_id,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` + Size int64 `json:",omitempty"` + }{} + config := []byte(m.History[0].V1Compatibility) + err := json.Unmarshal(config, &s2) + if err != nil { + return nil, errors.Wrapf(err, "error decoding configuration") + } + // Images created with versions prior to 1.8.3 require us to re-encode the encoded object, + // adding some fields that aren't "omitempty". + if s2.DockerVersion != "" && versions.LessThan(s2.DockerVersion, "1.8.3") { + config, err = json.Marshal(&s2) + if err != nil { + return nil, errors.Wrapf(err, "error re-encoding compat image config %#v", s2) + } + } + // Build the history. + convertedHistory := []Schema2History{} + for _, h := range m.History { + compat := Schema1V1Compatibility{} + if err := json.Unmarshal([]byte(h.V1Compatibility), &compat); err != nil { + return nil, errors.Wrapf(err, "error decoding history information") + } + hitem := Schema2History{ + Created: compat.Created, + CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), + Author: compat.Author, + Comment: compat.Comment, + EmptyLayer: compat.ThrowAway, + } + convertedHistory = append([]Schema2History{hitem}, convertedHistory...) + } + // Build the rootfs information. We need the decompressed sums that we've been + // calculating to fill in the DiffIDs. It's expected (but not enforced by us) + // that the number of diffIDs corresponds to the number of non-EmptyLayer + // entries in the history. + rootFS := &Schema2RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + // And now for some raw manipulation. + raw := make(map[string]*json.RawMessage) + err = json.Unmarshal(config, &raw) + if err != nil { + return nil, errors.Wrapf(err, "error re-decoding compat image config %#v: %v", s2) + } + // Drop some fields. + delete(raw, "id") + delete(raw, "parent") + delete(raw, "parent_id") + delete(raw, "layer_id") + delete(raw, "throwaway") + delete(raw, "Size") + // Add the history and rootfs information. + rootfs, err := json.Marshal(rootFS) + if err != nil { + return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err) + } + rawRootfs := json.RawMessage(rootfs) + raw["rootfs"] = &rawRootfs + history, err := json.Marshal(convertedHistory) + if err != nil { + return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err) + } + rawHistory := json.RawMessage(history) + raw["history"] = &rawHistory + // Encode the result. + config, err = json.Marshal(raw) + if err != nil { + return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s2, err) + } + return config, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) { + image, err := m.ToSchema2(diffIDs) + if err != nil { + return "", err + } + return digest.FromBytes(image).Hex(), nil +} diff --git a/vendor/github.com/containers/image/manifest/docker_schema2.go b/vendor/github.com/containers/image/manifest/docker_schema2.go new file mode 100644 index 00000000..ef82ffc2 --- /dev/null +++ b/vendor/github.com/containers/image/manifest/docker_schema2.go @@ -0,0 +1,251 @@ +package manifest + +import ( + "encoding/json" + "time" + + "github.com/containers/image/pkg/strslice" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. +type Schema2Descriptor struct { + MediaType string `json:"mediaType"` + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` + URLs []string `json:"urls,omitempty"` +} + +// Schema2 is a manifest in docker/distribution schema 2. +type Schema2 struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + ConfigDescriptor Schema2Descriptor `json:"config"` + LayersDescriptors []Schema2Descriptor `json:"layers"` +} + +// Schema2Port is a Port, a string containing port number and protocol in the +// format "80/tcp", from docker/go-connections/nat. +type Schema2Port string + +// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from +// docker/go-connections/nat. +type Schema2PortSet map[Schema2Port]struct{} + +// Schema2HealthConfig is a HealthConfig, which holds configuration settings +// for the HEALTHCHECK feature, from docker/docker/api/types/container. +type Schema2HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Schema2Config is a Config in docker/docker/api/types/container. +type Schema2Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// Schema2V1Image is a V1Image in docker/docker/image. +type Schema2V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig Schema2Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *Schema2Config `json:"config,omitempty"` + // Architecture is the hardware that the image is build and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image. +type Schema2RootFS struct { + Type string `json:"type"` + DiffIDs []digest.Digest `json:"diff_ids,omitempty"` +} + +// Schema2History stores build commands that were used to create an image, from docker/docker/image. +type Schema2History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Schema2Image is an Image in docker/docker/image. +type Schema2Image struct { + Schema2V1Image + Parent digest.Digest `json:"parent,omitempty"` + RootFS *Schema2RootFS `json:"rootfs,omitempty"` + History []Schema2History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + + // rawJSON caches the immutable JSON associated with this image. + rawJSON []byte + + // computedID is the ID computed from the hash of the image config. + // Not to be confused with the legacy V1 ID in V1Image. + computedID digest.Digest +} + +// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. +func Schema2FromManifest(manifest []byte) (*Schema2, error) { + s2 := Schema2{} + if err := json.Unmarshal(manifest, &s2); err != nil { + return nil, err + } + return &s2, nil +} + +// Schema2FromComponents creates an Schema2 manifest instance from the supplied data. +func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 { + return &Schema2{ + SchemaVersion: 2, + MediaType: DockerV2Schema2MediaType, + ConfigDescriptor: config, + LayersDescriptors: layers, + } +} + +// Schema2Clone creates a copy of the supplied Schema2 manifest. +func Schema2Clone(src *Schema2) *Schema2 { + copy := *src + return © +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema2) ConfigInfo() types.BlobInfo { + return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size} +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema2) LayerInfos() []types.BlobInfo { + blobs := []types.BlobInfo{} + for _, layer := range m.LayersDescriptors { + blobs = append(blobs, types.BlobInfo{ + Digest: layer.Digest, + Size: layer.Size, + URLs: layer.URLs, + }) + } + return blobs +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.LayersDescriptors) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) + } + original := m.LayersDescriptors + m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) + for i, info := range layerInfos { + m.LayersDescriptors[i].MediaType = original[i].MediaType + m.LayersDescriptors[i].Digest = info.Digest + m.LayersDescriptors[i].Size = info.Size + m.LayersDescriptors[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema2) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + s2 := &Schema2Image{} + if err := json.Unmarshal(config, s2); err != nil { + return nil, err + } + i := &types.ImageInspectInfo{ + Tag: "", + Created: s2.Created, + DockerVersion: s2.DockerVersion, + Architecture: s2.Architecture, + Os: s2.OS, + Layers: LayerInfosToStrings(m.LayerInfos()), + } + if s2.Config != nil { + i.Labels = s2.Config.Labels + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema2) ImageID([]digest.Digest) (string, error) { + if err := m.ConfigDescriptor.Digest.Validate(); err != nil { + return "", err + } + return m.ConfigDescriptor.Digest.Hex(), nil +} diff --git a/vendor/github.com/containers/image/manifest/manifest.go b/vendor/github.com/containers/image/manifest/manifest.go index 605bab1d..2bc801d8 100644 --- a/vendor/github.com/containers/image/manifest/manifest.go +++ b/vendor/github.com/containers/image/manifest/manifest.go @@ -2,7 +2,9 @@ package manifest import ( "encoding/json" + "fmt" + "github.com/containers/image/types" "github.com/docker/libtrust" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -38,6 +40,39 @@ var DefaultRequestedManifestMIMETypes = []string{ DockerV2ListMediaType, } +// Manifest is an interface for parsing, modifying image manifests in isolation. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members +// directly. +// +// See types.Image for functionality not limited to manifests, including format conversions and config parsing. +// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image. +type Manifest interface { + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + ConfigInfo() types.BlobInfo + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []types.BlobInfo + // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) + UpdateLayerInfos(layerInfos []types.BlobInfo) error + + // ImageID computes an ID which can uniquely identify this image by its contents, irrespective + // of which (of possibly more than one simultaneously valid) reference was used to locate the + // image, and unchanged by whether or how the layers are compressed. The result takes the form + // of the hexadecimal portion of a digest.Digest. + ImageID(diffIDs []digest.Digest) (string, error) + + // Inspect returns various information for (skopeo inspect) parsed from the manifest, + // incorporating information from a configuration blob returned by configGetter, if + // the underlying image format is expected to include a configuration blob. + Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) + + // Serialize returns the manifest in a blob format. + // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! + Serialize() ([]byte, error) +} + // GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. // FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, // but we may not have such metadata available (e.g. when the manifest is a local file). @@ -142,3 +177,62 @@ func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { } return js.PrettySignature("signatures") } + +// MIMETypeIsMultiImage returns true if mimeType is a list of images +func MIMETypeIsMultiImage(mimeType string) bool { + return mimeType == DockerV2ListMediaType +} + +// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, +// centralizing various workarounds. +func NormalizedMIMEType(input string) string { + switch input { + // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . + // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might + // need to happen within the ImageSource. + case "application/json": + return DockerV2Schema1SignedMediaType + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, + imgspecv1.MediaTypeImageManifest, + DockerV2Schema2MediaType, + DockerV2ListMediaType: + return input + default: + // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time + // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 + // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 + // + // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. + // This makes no real sense, but it happens + // because requests for manifests are + // redirected to a content distribution + // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 + return DockerV2Schema1SignedMediaType + } +} + +// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type +func FromBlob(manblob []byte, mt string) (Manifest, error) { + switch NormalizedMIMEType(mt) { + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType: + return Schema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return OCI1FromManifest(manblob) + case DockerV2Schema2MediaType: + return Schema2FromManifest(manblob) + case DockerV2ListMediaType: + return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") + default: // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) + } +} + +// LayerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() +// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. +func LayerInfosToStrings(infos []types.BlobInfo) []string { + layers := make([]string, len(infos)) + for i, info := range infos { + layers[i] = info.Digest.String() + } + return layers +} diff --git a/vendor/github.com/containers/image/manifest/oci.go b/vendor/github.com/containers/image/manifest/oci.go new file mode 100644 index 00000000..0ffb35b7 --- /dev/null +++ b/vendor/github.com/containers/image/manifest/oci.go @@ -0,0 +1,120 @@ +package manifest + +import ( + "encoding/json" + "time" + + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// OCI1 is a manifest.Manifest implementation for OCI images. +// The underlying data from imgspecv1.Manifest is also available. +type OCI1 struct { + imgspecv1.Manifest +} + +// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. +func OCI1FromManifest(manifest []byte) (*OCI1, error) { + oci1 := OCI1{} + if err := json.Unmarshal(manifest, &oci1); err != nil { + return nil, err + } + return &oci1, nil +} + +// OCI1FromComponents creates an OCI1 manifest instance from the supplied data. +func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 { + return &OCI1{ + imgspecv1.Manifest{ + Versioned: specs.Versioned{SchemaVersion: 2}, + Config: config, + Layers: layers, + }, + } +} + +// OCI1Clone creates a copy of the supplied OCI1 manifest. +func OCI1Clone(src *OCI1) *OCI1 { + return &OCI1{ + Manifest: src.Manifest, + } +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *OCI1) ConfigInfo() types.BlobInfo { + return types.BlobInfo{Digest: m.Config.Digest, Size: m.Config.Size, Annotations: m.Config.Annotations} +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *OCI1) LayerInfos() []types.BlobInfo { + blobs := []types.BlobInfo{} + for _, layer := range m.Layers { + blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType}) + } + return blobs +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.Layers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) + } + original := m.Layers + m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) + for i, info := range layerInfos { + m.Layers[i].MediaType = original[i].MediaType + m.Layers[i].Digest = info.Digest + m.Layers[i].Size = info.Size + m.Layers[i].Annotations = info.Annotations + m.Layers[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *OCI1) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + v1 := &imgspecv1.Image{} + if err := json.Unmarshal(config, v1); err != nil { + return nil, err + } + d1 := &Schema2V1Image{} + json.Unmarshal(config, d1) + created := time.Time{} + if v1.Created != nil { + created = *v1.Created + } + i := &types.ImageInspectInfo{ + Tag: "", + Created: created, + DockerVersion: d1.DockerVersion, + Labels: v1.Config.Labels, + Architecture: v1.Architecture, + Os: v1.OS, + Layers: LayerInfosToStrings(m.LayerInfos()), + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *OCI1) ImageID([]digest.Digest) (string, error) { + if err := m.Config.Digest.Validate(); err != nil { + return "", err + } + return m.Config.Digest.Hex(), nil +} diff --git a/vendor/github.com/containers/image/oci/archive/oci_dest.go b/vendor/github.com/containers/image/oci/archive/oci_dest.go index b2c4bb63..52e99a43 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/oci/archive/oci_dest.go @@ -106,11 +106,7 @@ func (d *ociArchiveImageDestination) Commit() error { src := d.tempDirRef.tempDirectory // path to save tarred up file dst := d.ref.resolvedFile - if err := tarDirectory(src, dst); err != nil { - return err - } - - return nil + return tarDirectory(src, dst) } // tar converts the directory at src and saves it to dst diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/oci/archive/oci_src.go index 9aa16a3d..aee5d8d5 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_src.go +++ b/vendor/github.com/containers/image/oci/archive/oci_src.go @@ -19,13 +19,13 @@ type ociArchiveImageSource struct { // newImageSource returns an ImageSource for reading from an existing directory. // newImageSource untars the file and saves it in a temp directory -func newImageSource(ctx *types.SystemContext, ref ociArchiveReference, requestedManifestMIMETypes []string) (types.ImageSource, error) { +func newImageSource(ctx *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) { tempDirRef, err := createUntarTempDir(ref) if err != nil { return nil, errors.Wrap(err, "error creating temp directory") } - unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, requestedManifestMIMETypes) + unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx) if err != nil { if err := tempDirRef.deleteTempDir(); err != nil { return nil, errors.Wrapf(err, "error deleting temp directory", tempDirRef.tempDirectory) @@ -68,14 +68,12 @@ func (s *ociArchiveImageSource) Close() error { return s.unpackedSrc.Close() } -// GetManifest returns the image's manifest along with its MIME type -// (which may be empty when it can't be determined but the manifest is available). -func (s *ociArchiveImageSource) GetManifest() ([]byte, string, error) { - return s.unpackedSrc.GetManifest() -} - -func (s *ociArchiveImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - return s.unpackedSrc.GetTargetManifest(digest) +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociArchiveImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + return s.unpackedSrc.GetManifest(instanceDigest) } // GetBlob returns a stream for the specified blob, and the blob's size. @@ -83,6 +81,15 @@ func (s *ociArchiveImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int return s.unpackedSrc.GetBlob(info) } -func (s *ociArchiveImageSource) GetSignatures(c context.Context) ([][]byte, error) { - return s.unpackedSrc.GetSignatures(c) +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return s.unpackedSrc.GetSignatures(ctx, instanceDigest) +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *ociArchiveImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil } diff --git a/vendor/github.com/containers/image/oci/archive/oci_transport.go b/vendor/github.com/containers/image/oci/archive/oci_transport.go index da68b031..c4a4fa71 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_transport.go +++ b/vendor/github.com/containers/image/oci/archive/oci_transport.go @@ -4,13 +4,13 @@ import ( "fmt" "io/ioutil" "os" - "path/filepath" - "regexp" "strings" "github.com/containers/image/directory/explicitfilepath" "github.com/containers/image/docker/reference" "github.com/containers/image/image" + "github.com/containers/image/internal/tmpdir" + "github.com/containers/image/oci/internal" ocilayout "github.com/containers/image/oci/layout" "github.com/containers/image/transports" "github.com/containers/image/types" @@ -48,51 +48,12 @@ func (t ociArchiveTransport) ParseReference(reference string) (types.ImageRefere // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) error { - var file string - sep := strings.SplitN(scope, ":", 2) - file = sep[0] - - if len(sep) == 2 { - image := sep[1] - if !refRegexp.MatchString(image) { - return errors.Errorf("Invalid image %s", image) - } - } - - if !strings.HasPrefix(file, "/") { - return errors.Errorf("Invalid scope %s: must be an absolute path", scope) - } - // Refuse also "/", otherwise "/" and "" would have the same semantics, - // and "" could be unexpectedly shadowed by the "/" entry. - // (Note: we do allow "/:someimage", a bit ridiculous but why refuse it?) - if scope == "/" { - return errors.New(`Invalid scope "/": Use the generic default scope ""`) - } - cleaned := filepath.Clean(file) - if cleaned != file { - return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) - } - return nil + return internal.ValidateScope(scope) } -// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys -const ( - separator = `(?:[-._:@+]|--)` - alphanum = `(?:[A-Za-z0-9]+)` - component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` -) - -var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) - // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. func ParseReference(reference string) (types.ImageReference, error) { - var file, image string - sep := strings.SplitN(reference, ":", 2) - file = sep[0] - - if len(sep) == 2 { - image = sep[1] - } + file, image := internal.SplitPathAndImage(reference) return NewReference(file, image) } @@ -102,14 +63,15 @@ func NewReference(file, image string) (types.ImageReference, error) { if err != nil { return nil, err } - // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces - // from being ambiguous with values of PolicyConfigurationIdentity. - if strings.Contains(resolved, ":") { - return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", file, image, resolved) + + if err := internal.ValidateOCIPath(file); err != nil { + return nil, err } - if len(image) > 0 && !refRegexp.MatchString(image) { - return nil, errors.Errorf("Invalid image %s", image) + + if err := internal.ValidateImageName(image); err != nil { + return nil, err } + return ociArchiveReference{file: file, resolvedFile: resolved, image: image}, nil } @@ -154,21 +116,23 @@ func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string { return res } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. -func (ref ociArchiveReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - src, err := newImageSource(ctx, ref, nil) +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociArchiveReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, ref) if err != nil { return nil, err } - return image.FromSource(src) + return image.FromSource(ctx, src) } -// NewImageSource returns a types.ImageSource for this reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. +// NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. -func (ref ociArchiveReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { - return newImageSource(ctx, ref, requestedManifestMIMETypes) +func (ref ociArchiveReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, ref) } // NewImageDestination returns a types.ImageDestination for this reference. @@ -195,7 +159,7 @@ func (t *tempDirOCIRef) deleteTempDir() error { // createOCIRef creates the oci reference of the image func createOCIRef(image string) (tempDirOCIRef, error) { - dir, err := ioutil.TempDir("/var/tmp", "oci") + dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "oci") if err != nil { return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory") } diff --git a/vendor/github.com/containers/image/oci/internal/oci_util.go b/vendor/github.com/containers/image/oci/internal/oci_util.go new file mode 100644 index 00000000..c2012e50 --- /dev/null +++ b/vendor/github.com/containers/image/oci/internal/oci_util.go @@ -0,0 +1,126 @@ +package internal + +import ( + "github.com/pkg/errors" + "path/filepath" + "regexp" + "runtime" + "strings" +) + +// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys +const ( + separator = `(?:[-._:@+]|--)` + alphanum = `(?:[A-Za-z0-9]+)` + component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` +) + +var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) +var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`) + +// ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs. +// In any other case an error is returned. +func ValidateImageName(image string) error { + if len(image) == 0 { + return nil + } + + var err error + if !refRegexp.MatchString(image) { + err = errors.Errorf("Invalid image %s", image) + } + return err +} + +// SplitPathAndImage tries to split the provided OCI reference into the OCI path and image. +// Neither path nor image parts are validated at this stage. +func SplitPathAndImage(reference string) (string, string) { + if runtime.GOOS == "windows" { + return splitPathAndImageWindows(reference) + } + return splitPathAndImageNonWindows(reference) +} + +func splitPathAndImageWindows(reference string) (string, string) { + groups := windowsRefRegexp.FindStringSubmatch(reference) + // nil group means no match + if groups == nil { + return reference, "" + } + + // we expect three elements. First one full match, second the capture group for the path and + // the third the capture group for the image + if len(groups) != 3 { + return reference, "" + } + return groups[1], groups[2] +} + +func splitPathAndImageNonWindows(reference string) (string, string) { + sep := strings.SplitN(reference, ":", 2) + path := sep[0] + + var image string + if len(sep) == 2 { + image = sep[1] + } + return path, image +} + +// ValidateOCIPath takes the OCI path and validates it. +func ValidateOCIPath(path string) error { + if runtime.GOOS == "windows" { + // On Windows we must allow for a ':' as part of the path + if strings.Count(path, ":") > 1 { + return errors.Errorf("Invalid OCI reference: path %s contains more than one colon", path) + } + } else { + if strings.Contains(path, ":") { + return errors.Errorf("Invalid OCI reference: path %s contains a colon", path) + } + } + return nil +} + +// ValidateScope validates a policy configuration scope for an OCI transport. +func ValidateScope(scope string) error { + var err error + if runtime.GOOS == "windows" { + err = validateScopeWindows(scope) + } else { + err = validateScopeNonWindows(scope) + } + if err != nil { + return err + } + + cleaned := filepath.Clean(scope) + if cleaned != scope { + return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } + + return nil +} + +func validateScopeWindows(scope string) error { + matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope)) + if !matched { + return errors.Errorf("Invalid scope '%s'. Must be an absolute path", scope) + } + + return nil +} + +func validateScopeNonWindows(scope string) error { + if !strings.HasPrefix(scope, "/") { + return errors.Errorf("Invalid scope %s: must be an absolute path", scope) + } + + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + + return nil +} diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go index c4801e34..e95f6516 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/oci/layout/oci_dest.go @@ -18,21 +18,47 @@ import ( ) type ociImageDestination struct { - ref ociReference - index imgspecv1.Index + ref ociReference + index imgspecv1.Index + sharedBlobDir string } // newImageDestination returns an ImageDestination for writing to an existing directory. -func newImageDestination(ref ociReference) (types.ImageDestination, error) { +func newImageDestination(ctx *types.SystemContext, ref ociReference) (types.ImageDestination, error) { if ref.image == "" { return nil, errors.Errorf("cannot save image with empty image.ref.name") } - index := imgspecv1.Index{ - Versioned: imgspec.Versioned{ - SchemaVersion: 2, - }, + + var index *imgspecv1.Index + if indexExists(ref) { + var err error + index, err = ref.getIndex() + if err != nil { + return nil, err + } + } else { + index = &imgspecv1.Index{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + } } - return &ociImageDestination{ref: ref, index: index}, nil + + d := &ociImageDestination{ref: ref, index: *index} + if ctx != nil { + d.sharedBlobDir = ctx.OCISharedBlobDirPath + } + + if err := ensureDirectoryExists(d.ref.dir); err != nil { + return nil, err + } + // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, + // but it MAY be empty (e.g. if we never end up calling PutBlob) + // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 + if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil { + return nil, err + } + return d, nil } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, @@ -66,7 +92,7 @@ func (d *ociImageDestination) ShouldCompressLayers() bool { // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually // uploaded to the image destination, true otherwise. func (d *ociImageDestination) AcceptsForeignLayerURLs() bool { - return false + return true } // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. @@ -81,16 +107,16 @@ func (d *ociImageDestination) MustMatchRuntimeOS() bool { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - if err := ensureDirectoryExists(d.ref.dir); err != nil { - return types.BlobInfo{}, err - } blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") if err != nil { return types.BlobInfo{}, err } succeeded := false + explicitClosed := false defer func() { - blobFile.Close() + if !explicitClosed { + blobFile.Close() + } if !succeeded { os.Remove(blobFile.Name()) } @@ -110,17 +136,28 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo if err := blobFile.Sync(); err != nil { return types.BlobInfo{}, err } - if err := blobFile.Chmod(0644); err != nil { - return types.BlobInfo{}, err + + // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. + // On Windows, the “permissions of newly created files” argument to syscall.Open is + // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, + // always fails on Windows. + if runtime.GOOS != "windows" { + if err := blobFile.Chmod(0644); err != nil { + return types.BlobInfo{}, err + } } - blobPath, err := d.ref.blobPath(computedDigest) + blobPath, err := d.ref.blobPath(computedDigest, d.sharedBlobDir) if err != nil { return types.BlobInfo{}, err } if err := ensureParentDirectoryExists(blobPath); err != nil { return types.BlobInfo{}, err } + + // need to explicitly close the file, since a rename won't otherwise not work on Windows + blobFile.Close() + explicitClosed = true if err := os.Rename(blobFile.Name(), blobPath); err != nil { return types.BlobInfo{}, err } @@ -136,7 +173,7 @@ func (d *ociImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) if info.Digest == "" { return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`) } - blobPath, err := d.ref.blobPath(info.Digest) + blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) if err != nil { return false, -1, err } @@ -169,7 +206,7 @@ func (d *ociImageDestination) PutManifest(m []byte) error { desc.MediaType = imgspecv1.MediaTypeImageManifest desc.Size = int64(len(m)) - blobPath, err := d.ref.blobPath(digest) + blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir) if err != nil { return err } @@ -191,23 +228,20 @@ func (d *ociImageDestination) PutManifest(m []byte) error { Architecture: runtime.GOARCH, OS: runtime.GOOS, } - d.index.Manifests = append(d.index.Manifests, desc) + d.addManifest(&desc) return nil } -func ensureDirectoryExists(path string) error { - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - if err := os.MkdirAll(path, 0755); err != nil { - return err +func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { + for i, manifest := range d.index.Manifests { + if manifest.Annotations["org.opencontainers.image.ref.name"] == desc.Annotations["org.opencontainers.image.ref.name"] { + // TODO Should there first be a cleanup based on the descriptor we are going to replace? + d.index.Manifests[i] = *desc + return } } - return nil -} - -// ensureParentDirectoryExists ensures the parent of the supplied path exists. -func ensureParentDirectoryExists(path string) error { - return ensureDirectoryExists(filepath.Dir(path)) + d.index.Manifests = append(d.index.Manifests, *desc) } func (d *ociImageDestination) PutSignatures(signatures [][]byte) error { @@ -231,3 +265,30 @@ func (d *ociImageDestination) Commit() error { } return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644) } + +func ensureDirectoryExists(path string) error { + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } + return nil +} + +// ensureParentDirectoryExists ensures the parent of the supplied path exists. +func ensureParentDirectoryExists(path string) error { + return ensureDirectoryExists(filepath.Dir(path)) +} + +// indexExists checks whether the index location specified in the OCI reference exists. +// The implementation is opinionated, since in case of unexpected errors false is returned +func indexExists(ref ociReference) bool { + _, err := os.Stat(ref.indexPath()) + if err == nil { + return true + } + if os.IsNotExist(err) { + return false + } + return true +} diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go index 99b9f208..1109f65c 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/oci/layout/oci_src.go @@ -4,25 +4,49 @@ import ( "context" "io" "io/ioutil" + "net/http" "os" + "strconv" + "github.com/containers/image/pkg/tlsclientconfig" "github.com/containers/image/types" + "github.com/docker/go-connections/tlsconfig" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" ) type ociImageSource struct { - ref ociReference - descriptor imgspecv1.Descriptor + ref ociReference + descriptor imgspecv1.Descriptor + client *http.Client + sharedBlobDir string } // newImageSource returns an ImageSource for reading from an existing directory. -func newImageSource(ref ociReference) (types.ImageSource, error) { +func newImageSource(ctx *types.SystemContext, ref ociReference) (types.ImageSource, error) { + tr := tlsclientconfig.NewTransport() + tr.TLSClientConfig = tlsconfig.ServerDefault() + + if ctx != nil && ctx.OCICertPath != "" { + if err := tlsclientconfig.SetupCertificates(ctx.OCICertPath, tr.TLSClientConfig); err != nil { + return nil, err + } + tr.TLSClientConfig.InsecureSkipVerify = ctx.OCIInsecureSkipTLSVerify + } + + client := &http.Client{} + client.Transport = tr descriptor, err := ref.getManifestDescriptor() if err != nil { return nil, err } - return &ociImageSource{ref: ref, descriptor: descriptor}, nil + d := &ociImageSource{ref: ref, descriptor: descriptor, client: client} + if ctx != nil { + // TODO(jonboulle): check dir existence? + d.sharedBlobDir = ctx.OCISharedBlobDirPath + } + return d, nil } // Reference returns the reference used to set up this source. @@ -37,8 +61,26 @@ func (s *ociImageSource) Close() error { // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. -func (s *ociImageSource) GetManifest() ([]byte, string, error) { - manifestPath, err := s.ref.blobPath(digest.Digest(s.descriptor.Digest)) +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + var dig digest.Digest + var mimeType string + if instanceDigest == nil { + dig = digest.Digest(s.descriptor.Digest) + mimeType = s.descriptor.MediaType + } else { + dig = *instanceDigest + // XXX: instanceDigest means that we don't immediately have the context of what + // mediaType the manifest has. In OCI this means that we don't know + // what reference it came from, so we just *assume* that its + // MediaTypeImageManifest. + // FIXME: We should actually be able to look up the manifest in the index, + // and see the MIME type there. + mimeType = imgspecv1.MediaTypeImageManifest + } + + manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir) if err != nil { return nil, "", err } @@ -47,30 +89,16 @@ func (s *ociImageSource) GetManifest() ([]byte, string, error) { return nil, "", err } - return m, s.descriptor.MediaType, nil -} - -func (s *ociImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - manifestPath, err := s.ref.blobPath(digest) - if err != nil { - return nil, "", err - } - - m, err := ioutil.ReadFile(manifestPath) - if err != nil { - return nil, "", err - } - - // XXX: GetTargetManifest means that we don't have the context of what - // mediaType the manifest has. In OCI this means that we don't know - // what reference it came from, so we just *assume* that its - // MediaTypeImageManifest. - return m, imgspecv1.MediaTypeImageManifest, nil + return m, mimeType, nil } // GetBlob returns a stream for the specified blob, and the blob's size. func (s *ociImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { - path, err := s.ref.blobPath(info.Digest) + if len(info.URLs) != 0 { + return s.getExternalBlob(info.URLs) + } + + path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir) if err != nil { return nil, 0, err } @@ -86,6 +114,44 @@ func (s *ociImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, err return r, fi.Size(), nil } -func (s *ociImageSource) GetSignatures(context.Context) ([][]byte, error) { +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { return [][]byte{}, nil } + +func (s *ociImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64, error) { + errWrap := errors.New("failed fetching external blob from all urls") + for _, url := range urls { + resp, err := s.client.Get(url) + if err != nil { + errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error()) + continue + } + + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + errWrap = errors.Wrapf(errWrap, "fetching %s failed, response code not 200", url) + continue + } + + return resp.Body, getBlobSize(resp), nil + } + + return nil, 0, errWrap +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *ociImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} + +func getBlobSize(resp *http.Response) int64 { + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + size = -1 + } + return size +} diff --git a/vendor/github.com/containers/image/oci/layout/oci_transport.go b/vendor/github.com/containers/image/oci/layout/oci_transport.go index 1406dd18..c181c4c7 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_transport.go +++ b/vendor/github.com/containers/image/oci/layout/oci_transport.go @@ -5,12 +5,12 @@ import ( "fmt" "os" "path/filepath" - "regexp" "strings" "github.com/containers/image/directory/explicitfilepath" "github.com/containers/image/docker/reference" "github.com/containers/image/image" + "github.com/containers/image/oci/internal" "github.com/containers/image/transports" "github.com/containers/image/types" "github.com/opencontainers/go-digest" @@ -36,45 +36,12 @@ func (t ociTransport) ParseReference(reference string) (types.ImageReference, er return ParseReference(reference) } -// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys -const ( - separator = `(?:[-._:@+]|--)` - alphanum = `(?:[A-Za-z0-9]+)` - component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` -) - -var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) - // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. // scope passed to this function will not be "", that value is always allowed. func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error { - var dir string - sep := strings.SplitN(scope, ":", 2) - dir = sep[0] - - if len(sep) == 2 { - image := sep[1] - if !refRegexp.MatchString(image) { - return errors.Errorf("Invalid image %s", image) - } - } - - if !strings.HasPrefix(dir, "/") { - return errors.Errorf("Invalid scope %s: must be an absolute path", scope) - } - // Refuse also "/", otherwise "/" and "" would have the same semantics, - // and "" could be unexpectedly shadowed by the "/" entry. - // (Note: we do allow "/:someimage", a bit ridiculous but why refuse it?) - if scope == "/" { - return errors.New(`Invalid scope "/": Use the generic default scope ""`) - } - cleaned := filepath.Clean(dir) - if cleaned != dir { - return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) - } - return nil + return internal.ValidateScope(scope) } // ociReference is an ImageReference for OCI directory paths. @@ -92,13 +59,7 @@ type ociReference struct { // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. func ParseReference(reference string) (types.ImageReference, error) { - var dir, image string - sep := strings.SplitN(reference, ":", 2) - dir = sep[0] - - if len(sep) == 2 { - image = sep[1] - } + dir, image := internal.SplitPathAndImage(reference) return NewReference(dir, image) } @@ -111,14 +72,15 @@ func NewReference(dir, image string) (types.ImageReference, error) { if err != nil { return nil, err } - // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces - // from being ambiguous with values of PolicyConfigurationIdentity. - if strings.Contains(resolved, ":") { - return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", dir, image, resolved) + + if err := internal.ValidateOCIPath(dir); err != nil { + return nil, err } - if len(image) > 0 && !refRegexp.MatchString(image) { - return nil, errors.Errorf("Invalid image %s", image) + + if err = internal.ValidateImageName(image); err != nil { + return nil, err } + return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil } @@ -177,28 +139,40 @@ func (ref ociReference) PolicyConfigurationNamespaces() []string { return res } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref ociReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - src, err := newImageSource(ref) +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, ref) if err != nil { return nil, err } - return image.FromSource(src) + return image.FromSource(ctx, src) +} + +// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together +// with an error. +func (ref ociReference) getIndex() (*imgspecv1.Index, error) { + indexJSON, err := os.Open(ref.indexPath()) + if err != nil { + return nil, err + } + defer indexJSON.Close() + + index := &imgspecv1.Index{} + if err := json.NewDecoder(indexJSON).Decode(index); err != nil { + return nil, err + } + return index, nil } func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { - indexJSON, err := os.Open(ref.indexPath()) + index, err := ref.getIndex() if err != nil { return imgspecv1.Descriptor{}, err } - defer indexJSON.Close() - index := imgspecv1.Index{} - if err := json.NewDecoder(indexJSON).Decode(&index); err != nil { - return imgspecv1.Descriptor{}, err - } var d *imgspecv1.Descriptor if ref.image == "" { @@ -241,18 +215,16 @@ func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, return ociRef.getManifestDescriptor() } -// NewImageSource returns a types.ImageSource for this reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. -// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. +// NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. -func (ref ociReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { - return newImageSource(ref) +func (ref ociReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, ref) } // NewImageDestination returns a types.ImageDestination for this reference. // The caller must call .Close() on the returned ImageDestination. func (ref ociReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ref) + return newImageDestination(ctx, ref) } // DeleteImage deletes the named image from the registry, if supported. @@ -271,9 +243,13 @@ func (ref ociReference) indexPath() string { } // blobPath returns a path for a blob within a directory using OCI image-layout conventions. -func (ref ociReference) blobPath(digest digest.Digest) (string, error) { +func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) { if err := digest.Validate(); err != nil { return "", errors.Wrapf(err, "unexpected digest reference %s", digest) } - return filepath.Join(ref.dir, "blobs", digest.Algorithm().String(), digest.Hex()), nil + blobDir := filepath.Join(ref.dir, "blobs") + if sharedBlobDir != "" { + blobDir = sharedBlobDir + } + return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil } diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go index 19ad2537..54655914 100644 --- a/vendor/github.com/containers/image/openshift/openshift.go +++ b/vendor/github.com/containers/image/openshift/openshift.go @@ -162,18 +162,15 @@ func (c *openshiftClient) convertDockerImageReference(ref string) (string, error type openshiftImageSource struct { client *openshiftClient // Values specific to this image - ctx *types.SystemContext - requestedManifestMIMETypes []string + ctx *types.SystemContext // State docker types.ImageSource // The Docker Registry endpoint, or nil if not resolved yet imageStreamImageName string // Resolved image identifier, or "" if not known yet } -// newImageSource creates a new ImageSource for the specified reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. -// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. +// newImageSource creates a new ImageSource for the specified reference. // The caller must call .Close() on the returned ImageSource. -func newImageSource(ctx *types.SystemContext, ref openshiftReference, requestedManifestMIMETypes []string) (types.ImageSource, error) { +func newImageSource(ctx *types.SystemContext, ref openshiftReference) (types.ImageSource, error) { client, err := newOpenshiftClient(ref) if err != nil { return nil, err @@ -182,7 +179,6 @@ func newImageSource(ctx *types.SystemContext, ref openshiftReference, requestedM return &openshiftImageSource{ client: client, ctx: ctx, - requestedManifestMIMETypes: requestedManifestMIMETypes, }, nil } @@ -204,20 +200,15 @@ func (s *openshiftImageSource) Close() error { return nil } -func (s *openshiftImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - if err := s.ensureImageIsResolved(context.TODO()); err != nil { - return nil, "", err - } - return s.docker.GetTargetManifest(digest) -} - // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. -func (s *openshiftImageSource) GetManifest() ([]byte, string, error) { +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { if err := s.ensureImageIsResolved(context.TODO()); err != nil { return nil, "", err } - return s.docker.GetManifest() + return s.docker.GetManifest(instanceDigest) } // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). @@ -228,12 +219,21 @@ func (s *openshiftImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int6 return s.docker.GetBlob(info) } -func (s *openshiftImageSource) GetSignatures(ctx context.Context) ([][]byte, error) { - if err := s.ensureImageIsResolved(ctx); err != nil { - return nil, err +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + var imageName string + if instanceDigest == nil { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, err + } + imageName = s.imageStreamImageName + } else { + imageName = instanceDigest.String() } - - image, err := s.client.getImage(ctx, s.imageStreamImageName) + image, err := s.client.getImage(ctx, imageName) if err != nil { return nil, err } @@ -246,6 +246,11 @@ func (s *openshiftImageSource) GetSignatures(ctx context.Context) ([][]byte, err return sigs, nil } +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *openshiftImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} + // ensureImageIsResolved sets up s.docker and s.imageStreamImageName func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error { if s.docker != nil { @@ -286,7 +291,7 @@ func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error if err != nil { return err } - d, err := dockerRef.NewImageSource(s.ctx, s.requestedManifestMIMETypes) + d, err := dockerRef.NewImageSource(s.ctx) if err != nil { return err } diff --git a/vendor/github.com/containers/image/openshift/openshift_transport.go b/vendor/github.com/containers/image/openshift/openshift_transport.go index 108e1102..686d806f 100644 --- a/vendor/github.com/containers/image/openshift/openshift_transport.go +++ b/vendor/github.com/containers/image/openshift/openshift_transport.go @@ -125,24 +125,23 @@ func (ref openshiftReference) PolicyConfigurationNamespaces() []string { return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference) } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref openshiftReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - src, err := newImageSource(ctx, ref, nil) +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref openshiftReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, ref) if err != nil { return nil, err } - return genericImage.FromSource(src) + return genericImage.FromSource(ctx, src) } -// NewImageSource returns a types.ImageSource for this reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. -// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. +// NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. -func (ref openshiftReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { - return newImageSource(ctx, ref, requestedManifestMIMETypes) +func (ref openshiftReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, ref) } // NewImageDestination returns a types.ImageDestination for this reference. diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go index f03ef65a..704e1ece 100644 --- a/vendor/github.com/containers/image/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/ostree/ostree_dest.go @@ -4,6 +4,8 @@ package ostree import ( "bytes" + "compress/gzip" + "encoding/base64" "encoding/json" "fmt" "io" @@ -12,18 +14,27 @@ import ( "os/exec" "path/filepath" "strconv" - "strings" "time" "github.com/containers/image/manifest" "github.com/containers/image/types" "github.com/containers/storage/pkg/archive" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/ostreedev/ostree-go/pkg/otbuiltin" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" ) +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + type blobToImport struct { Size int64 Digest digest.Digest @@ -35,17 +46,24 @@ type descriptor struct { Digest digest.Digest `json:"digest"` } +type fsLayersSchema1 struct { + BlobSum digest.Digest `json:"blobSum"` +} + type manifestSchema struct { - ConfigDescriptor descriptor `json:"config"` - LayersDescriptors []descriptor `json:"layers"` + LayersDescriptors []descriptor `json:"layers"` + FSLayers []fsLayersSchema1 `json:"fsLayers"` } type ostreeImageDestination struct { - ref ostreeReference - manifest string - schema manifestSchema - tmpDirPath string - blobs map[string]*blobToImport + ref ostreeReference + manifest string + schema manifestSchema + tmpDirPath string + blobs map[string]*blobToImport + digest digest.Digest + signaturesLen int + repo *C.struct_OstreeRepo } // newImageDestination returns an ImageDestination for writing to an existing ostree. @@ -54,7 +72,7 @@ func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDes if err := ensureDirectoryExists(tmpDirPath); err != nil { return nil, err } - return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}}, nil + return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, "", 0, nil}, nil } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, @@ -65,6 +83,9 @@ func (d *ostreeImageDestination) Reference() types.ImageReference { // Close removes resources associated with an initialized ImageDestination, if any. func (d *ostreeImageDestination) Close() error { + if d.repo != nil { + C.g_object_unref(C.gpointer(d.repo)) + } return os.RemoveAll(d.tmpDirPath) } @@ -153,7 +174,7 @@ func fixFiles(dir string, usermode bool) error { if err != nil { return err } - } else if usermode && (info.Mode().IsRegular() || (info.Mode()&os.ModeSymlink) != 0) { + } else if usermode && (info.Mode().IsRegular()) { if err := os.Chmod(fullpath, info.Mode()|0600); err != nil { return err } @@ -173,6 +194,35 @@ func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch strin return err } +func generateTarSplitMetadata(output *bytes.Buffer, file string) error { + mfz := gzip.NewWriter(output) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + stream, err := os.OpenFile(file, os.O_RDONLY, 0) + if err != nil { + return err + } + defer stream.Close() + + gzReader, err := gzip.NewReader(stream) + if err != nil { + return err + } + defer gzReader.Close() + + its, err := asm.NewInputTarStream(gzReader, metaPacker, nil) + if err != nil { + return err + } + + _, err = io.Copy(ioutil.Discard, its) + if err != nil { + return err + } + return nil +} + func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToImport) error { ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root") @@ -184,6 +234,11 @@ func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToIm os.RemoveAll(destinationPath) }() + var tarSplitOutput bytes.Buffer + if err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath); err != nil { + return err + } + if os.Getuid() == 0 { if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil { return err @@ -201,28 +256,35 @@ func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToIm return err } } + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size), + fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))}) + +} + +func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error { + ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) + destinationPath := filepath.Dir(blob.BlobPath) + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) } -func (d *ostreeImageDestination) importConfig(blob *blobToImport) error { - ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) - - return exec.Command("ostree", "commit", - "--repo", d.ref.repo, - fmt.Sprintf("--add-metadata-string=docker.size=%d", blob.Size), - "--branch", ostreeBranch, filepath.Dir(blob.BlobPath)).Run() -} - func (d *ostreeImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { - branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) - output, err := exec.Command("ostree", "show", "--repo", d.ref.repo, "--print-metadata-key=docker.size", branch).CombinedOutput() - if err != nil { - if bytes.Index(output, []byte("not found")) >= 0 || bytes.Index(output, []byte("No such")) >= 0 { - return false, -1, nil + + if d.repo == nil { + repo, err := openRepo(d.ref.repo) + if err != nil { + return false, 0, err } - return false, -1, err + d.repo = repo } - size, err := strconv.ParseInt(strings.Trim(string(output), "'\n"), 10, 64) + branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) + + found, data, err := readMetadata(d.repo, branch, "docker.size") + if err != nil || !found { + return found, -1, err + } + + size, err := strconv.ParseInt(data, 10, 64) if err != nil { return false, -1, err } @@ -238,10 +300,10 @@ func (d *ostreeImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInf // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *ostreeImageDestination) PutManifest(manifest []byte) error { - d.manifest = string(manifest) +func (d *ostreeImageDestination) PutManifest(manifestBlob []byte) error { + d.manifest = string(manifestBlob) - if err := json.Unmarshal(manifest, &d.schema); err != nil { + if err := json.Unmarshal(manifestBlob, &d.schema); err != nil { return err } @@ -250,7 +312,13 @@ func (d *ostreeImageDestination) PutManifest(manifest []byte) error { return err } - return ioutil.WriteFile(manifestPath, manifest, 0644) + digest, err := manifest.Digest(manifestBlob) + if err != nil { + return err + } + d.digest = digest + + return ioutil.WriteFile(manifestPath, manifestBlob, 0644) } func (d *ostreeImageDestination) PutSignatures(signatures [][]byte) error { @@ -265,6 +333,7 @@ func (d *ostreeImageDestination) PutSignatures(signatures [][]byte) error { return err } } + d.signaturesLen = len(signatures) return nil } @@ -279,24 +348,37 @@ func (d *ostreeImageDestination) Commit() error { return err } - for _, layer := range d.schema.LayersDescriptors { - hash := layer.Digest.Hex() + checkLayer := func(hash string) error { blob := d.blobs[hash] // if the blob is not present in d.blobs then it is already stored in OSTree, // and we don't need to import it. if blob == nil { - continue + return nil } err := d.importBlob(repo, blob) if err != nil { return err } + + delete(d.blobs, hash) + return nil + } + for _, layer := range d.schema.LayersDescriptors { + hash := layer.Digest.Hex() + if err = checkLayer(hash); err != nil { + return err + } + } + for _, layer := range d.schema.FSLayers { + hash := layer.BlobSum.Hex() + if err = checkLayer(hash); err != nil { + return err + } } - hash := d.schema.ConfigDescriptor.Digest.Hex() - blob := d.blobs[hash] - if blob != nil { - err := d.importConfig(blob) + // Import the other blobs that are not layers + for _, blob := range d.blobs { + err := d.importConfig(repo, blob) if err != nil { return err } @@ -304,7 +386,9 @@ func (d *ostreeImageDestination) Commit() error { manifestPath := filepath.Join(d.tmpDirPath, "manifest") - metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest))} + metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), + fmt.Sprintf("signatures=%d", d.signaturesLen), + fmt.Sprintf("docker.digest=%s", string(d.digest))} err = d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata) _, err = repo.CommitTransaction() diff --git a/vendor/github.com/containers/image/ostree/ostree_src.go b/vendor/github.com/containers/image/ostree/ostree_src.go new file mode 100644 index 00000000..c65a07b7 --- /dev/null +++ b/vendor/github.com/containers/image/ostree/ostree_src.go @@ -0,0 +1,354 @@ +// +build !containers_image_ostree_stub + +package ostree + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "strconv" + "strings" + "unsafe" + + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/containers/storage/pkg/ioutils" + "github.com/opencontainers/go-digest" + glib "github.com/ostreedev/ostree-go/pkg/glibobject" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +type ostreeImageSource struct { + ref ostreeReference + tmpDir string + repo *C.struct_OstreeRepo +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(ctx *types.SystemContext, tmpDir string, ref ostreeReference) (types.ImageSource, error) { + return &ostreeImageSource{ref: ref, tmpDir: tmpDir}, nil +} + +// Reference returns the reference used to set up this source. +func (s *ostreeImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ostreeImageSource) Close() error { + if s.repo != nil { + C.g_object_unref(C.gpointer(s.repo)) + } + return nil +} + +func (s *ostreeImageSource) getLayerSize(blob string) (int64, error) { + b := fmt.Sprintf("ociimage/%s", blob) + found, data, err := readMetadata(s.repo, b, "docker.size") + if err != nil || !found { + return 0, err + } + return strconv.ParseInt(data, 10, 64) +} + +func (s *ostreeImageSource) getLenSignatures() (int64, error) { + b := fmt.Sprintf("ociimage/%s", s.ref.branchName) + found, data, err := readMetadata(s.repo, b, "signatures") + if err != nil { + return -1, err + } + if !found { + // if 'signatures' is not present, just return 0 signatures. + return 0, nil + } + return strconv.ParseInt(data, 10, 64) +} + +func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) { + b := fmt.Sprintf("ociimage/%s", blob) + found, out, err := readMetadata(s.repo, b, "tarsplit.output") + if err != nil || !found { + return nil, err + } + return base64.StdEncoding.DecodeString(out) +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +func (s *ostreeImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.Errorf(`Manifest lists are not supported by "ostree:"`) + } + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, "", err + } + s.repo = repo + } + + b := fmt.Sprintf("ociimage/%s", s.ref.branchName) + found, out, err := readMetadata(s.repo, b, "docker.manifest") + if err != nil { + return nil, "", err + } + if !found { + return nil, "", errors.New("manifest not found") + } + m := []byte(out) + return m, manifest.GuessMIMEType(m), nil +} + +func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { + return nil, "", errors.New("manifest lists are not supported by this transport") +} + +func openRepo(path string) (*C.struct_OstreeRepo, error) { + var cerr *C.GError + cpath := C.CString(path) + defer C.free(unsafe.Pointer(cpath)) + pathc := C.g_file_new_for_path(cpath) + defer C.g_object_unref(C.gpointer(pathc)) + repo := C.ostree_repo_new(pathc) + r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) + if !r { + C.g_object_unref(C.gpointer(repo)) + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + return repo, nil +} + +type ostreePathFileGetter struct { + repo *C.struct_OstreeRepo + parentRoot *C.GFile +} + +type ostreeReader struct { + stream *C.GFileInputStream +} + +func (o ostreeReader) Close() error { + C.g_object_unref(C.gpointer(o.stream)) + return nil +} +func (o ostreeReader) Read(p []byte) (int, error) { + var cerr *C.GError + instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type()) + stream := (*C.GInputStream)(unsafe.Pointer(instanceCast)) + + b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr) + if b == nil { + return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + defer C.g_bytes_unref(b) + + count := int(C.g_bytes_get_size(b)) + if count == 0 { + return 0, io.EOF + } + data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count] + copy(p, data) + return count, nil +} + +func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) { + var cerr *C.GError + var ref *C.char + defer C.free(unsafe.Pointer(ref)) + + cCommit := C.CString(commit) + defer C.free(unsafe.Pointer(cCommit)) + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) { + return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + if ref == nil { + return false, "", nil + } + + var variant *C.GVariant + if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) { + return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + defer C.g_variant_unref(variant) + if variant != nil { + cKey := C.CString(key) + defer C.free(unsafe.Pointer(cKey)) + + metadata := C.g_variant_get_child_value(variant, 0) + defer C.g_variant_unref(metadata) + + data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil) + if data != nil { + defer C.g_variant_unref(data) + ptr := (*C.char)(C.g_variant_get_string(data, nil)) + val := C.GoString(ptr) + return true, val, nil + } + } + return false, "", nil +} + +func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) { + var cerr *C.GError + var parentRoot *C.GFile + cCommit := C.CString(commit) + defer C.free(unsafe.Pointer(cCommit)) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) { + return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + C.g_object_ref(C.gpointer(repo)) + + return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil +} + +func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) { + var file *C.GFile + if strings.HasPrefix(filename, "./") { + filename = filename[2:] + } + cfilename := C.CString(filename) + defer C.free(unsafe.Pointer(cfilename)) + + file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename)) + + var cerr *C.GError + stream := C.g_file_read(file, nil, &cerr) + if stream == nil { + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + return &ostreeReader{stream: stream}, nil +} + +func (o ostreePathFileGetter) Close() { + C.g_object_unref(C.gpointer(o.repo)) + C.g_object_unref(C.gpointer(o.parentRoot)) +} + +func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) { + getter, err := newOSTreePathFileGetter(s.repo, commit) + if err != nil { + return nil, err + } + defer getter.Close() + + return getter.Get(path) +} + +// GetBlob returns a stream for the specified blob, and the blob's size. +func (s *ostreeImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { + blob := info.Digest.Hex() + branch := fmt.Sprintf("ociimage/%s", blob) + + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, 0, err + } + s.repo = repo + } + + layerSize, err := s.getLayerSize(blob) + if err != nil { + return nil, 0, err + } + + tarsplit, err := s.getTarSplitData(blob) + if err != nil { + return nil, 0, err + } + + // if tarsplit is nil we are looking at the manifest. Return directly the file in /content + if tarsplit == nil { + file, err := s.readSingleFile(branch, "/content") + if err != nil { + return nil, 0, err + } + return file, layerSize, nil + } + + mf := bytes.NewReader(tarsplit) + mfz, err := gzip.NewReader(mf) + if err != nil { + return nil, 0, err + } + defer mfz.Close() + metaUnpacker := storage.NewJSONUnpacker(mfz) + + getter, err := newOSTreePathFileGetter(s.repo, branch) + if err != nil { + return nil, 0, err + } + + ots := asm.NewOutputTarStream(getter, metaUnpacker) + + pipeReader, pipeWriter := io.Pipe() + go func() { + io.Copy(pipeWriter, ots) + pipeWriter.Close() + }() + + rc := ioutils.NewReadCloserWrapper(pipeReader, func() error { + getter.Close() + return ots.Close() + }) + return rc, layerSize, nil +} + +func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.New("manifest lists are not supported by this transport") + } + lenSignatures, err := s.getLenSignatures() + if err != nil { + return nil, err + } + branch := fmt.Sprintf("ociimage/%s", s.ref.branchName) + + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, err + } + s.repo = repo + } + + signatures := [][]byte{} + for i := int64(1); i <= lenSignatures; i++ { + sigReader, err := s.readSingleFile(branch, fmt.Sprintf("/signature-%d", i)) + if err != nil { + return nil, err + } + defer sigReader.Close() + + sig, err := ioutil.ReadAll(sigReader) + if err != nil { + return nil, err + } + signatures = append(signatures, sig) + } + return signatures, nil +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *ostreeImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/ostree/ostree_transport.go b/vendor/github.com/containers/image/ostree/ostree_transport.go index defdc63c..cc85a43f 100644 --- a/vendor/github.com/containers/image/ostree/ostree_transport.go +++ b/vendor/github.com/containers/image/ostree/ostree_transport.go @@ -10,12 +10,12 @@ import ( "regexp" "strings" - "github.com/pkg/errors" - "github.com/containers/image/directory/explicitfilepath" "github.com/containers/image/docker/reference" + "github.com/containers/image/image" "github.com/containers/image/transports" "github.com/containers/image/types" + "github.com/pkg/errors" ) const defaultOSTreeRepo = "/ostree/repo" @@ -66,6 +66,11 @@ type ostreeReference struct { repo string } +type ostreeImageCloser struct { + types.ImageCloser + size int64 +} + func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) { var repo = "" var image = "" @@ -84,24 +89,15 @@ func NewReference(image string, repo string) (types.ImageReference, error) { // image is not _really_ in a containers/image/docker/reference format; // as far as the libOSTree ociimage/* namespace is concerned, it is more or // less an arbitrary string with an implied tag. - // We use the reference.* parsers basically for the default tag name in - // reference.TagNameOnly, and incidentally for some character set and length - // restrictions. - var ostreeImage reference.Named - s := strings.SplitN(image, ":", 2) - - named, err := reference.WithName(s[0]) + // Parse the image using reference.ParseNormalizedNamed so that we can + // check whether the images has a tag specified and we can add ":latest" if needed + ostreeImage, err := reference.ParseNormalizedNamed(image) if err != nil { return nil, err } - if len(s) == 1 { - ostreeImage = reference.TagNameOnly(named) - } else { - ostreeImage, err = reference.WithTag(named, s[1]) - if err != nil { - return nil, err - } + if reference.IsNameOnly(ostreeImage) { + image = image + ":latest" } resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo) @@ -119,12 +115,12 @@ func NewReference(image string, repo string) (types.ImageReference, error) { // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces // from being ambiguous with values of PolicyConfigurationIdentity. if strings.Contains(resolved, ":") { - return nil, errors.Errorf("Invalid OSTreeCI reference %s@%s: path %s contains a colon", image, repo, resolved) + return nil, errors.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved) } return ostreeReference{ - image: ostreeImage.String(), - branchName: encodeOStreeRef(ostreeImage.String()), + image: image, + branchName: encodeOStreeRef(image), repo: resolved, }, nil } @@ -177,20 +173,38 @@ func (ref ostreeReference) PolicyConfigurationNamespaces() []string { return res } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref ostreeReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - return nil, errors.New("Reading ostree: images is currently not supported") +func (s *ostreeImageCloser) Size() (int64, error) { + return s.size, nil } -// NewImageSource returns a types.ImageSource for this reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. -// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +func (ref ostreeReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { + var tmpDir string + if ctx == nil || ctx.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = ctx.OSTreeTmpDirPath + } + src, err := newImageSource(ctx, tmpDir, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, src) +} + +// NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. -func (ref ostreeReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { - return nil, errors.New("Reading ostree: images is currently not supported") +func (ref ostreeReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { + var tmpDir string + if ctx == nil || ctx.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = ctx.OSTreeTmpDirPath + } + return newImageSource(ctx, tmpDir, ref) } // NewImageDestination returns a types.ImageDestination for this reference. diff --git a/vendor/github.com/containers/image/pkg/docker/config/config.go b/vendor/github.com/containers/image/pkg/docker/config/config.go new file mode 100644 index 00000000..fd0ae7d8 --- /dev/null +++ b/vendor/github.com/containers/image/pkg/docker/config/config.go @@ -0,0 +1,295 @@ +package config + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/containers/image/types" + helperclient "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" + "github.com/docker/docker/pkg/homedir" + "github.com/pkg/errors" +) + +type dockerAuthConfig struct { + Auth string `json:"auth,omitempty"` +} + +type dockerConfigFile struct { + AuthConfigs map[string]dockerAuthConfig `json:"auths"` + CredHelpers map[string]string `json:"credHelpers,omitempty"` +} + +const ( + defaultPath = "/run/user" + authCfg = "containers" + authCfgFileName = "auth.json" + dockerCfg = ".docker" + dockerCfgFileName = "config.json" + dockerLegacyCfg = ".dockercfg" +) + +var ( + // ErrNotLoggedIn is returned for users not logged into a registry + // that they are trying to logout of + ErrNotLoggedIn = errors.New("not logged in") +) + +// SetAuthentication stores the username and password in the auth.json file +func SetAuthentication(ctx *types.SystemContext, registry, username, password string) error { + return modifyJSON(ctx, func(auths *dockerConfigFile) (bool, error) { + if ch, exists := auths.CredHelpers[registry]; exists { + return false, setAuthToCredHelper(ch, registry, username, password) + } + + creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) + newCreds := dockerAuthConfig{Auth: creds} + auths.AuthConfigs[registry] = newCreds + return true, nil + }) +} + +// GetAuthentication returns the registry credentials stored in +// either auth.json file or .docker/config.json +// If an entry is not found empty strings are returned for the username and password +func GetAuthentication(ctx *types.SystemContext, registry string) (string, string, error) { + if ctx != nil && ctx.DockerAuthConfig != nil { + return ctx.DockerAuthConfig.Username, ctx.DockerAuthConfig.Password, nil + } + + dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyCfg) + paths := [3]string{getPathToAuth(ctx), filepath.Join(homedir.Get(), dockerCfg, dockerCfgFileName), dockerLegacyPath} + + for _, path := range paths { + legacyFormat := path == dockerLegacyPath + username, password, err := findAuthentication(registry, path, legacyFormat) + if err != nil { + return "", "", err + } + if username != "" && password != "" { + return username, password, nil + } + } + return "", "", nil +} + +// GetUserLoggedIn returns the username logged in to registry from either +// auth.json or XDG_RUNTIME_DIR +// Used to tell the user if someone is logged in to the registry when logging in +func GetUserLoggedIn(ctx *types.SystemContext, registry string) string { + path := getPathToAuth(ctx) + username, _, _ := findAuthentication(registry, path, false) + if username != "" { + return username + } + return "" +} + +// RemoveAuthentication deletes the credentials stored in auth.json +func RemoveAuthentication(ctx *types.SystemContext, registry string) error { + return modifyJSON(ctx, func(auths *dockerConfigFile) (bool, error) { + // First try cred helpers. + if ch, exists := auths.CredHelpers[registry]; exists { + return false, deleteAuthFromCredHelper(ch, registry) + } + + if _, ok := auths.AuthConfigs[registry]; ok { + delete(auths.AuthConfigs, registry) + } else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok { + delete(auths.AuthConfigs, normalizeRegistry(registry)) + } else { + return false, ErrNotLoggedIn + } + return true, nil + }) +} + +// RemoveAllAuthentication deletes all the credentials stored in auth.json +func RemoveAllAuthentication(ctx *types.SystemContext) error { + return modifyJSON(ctx, func(auths *dockerConfigFile) (bool, error) { + auths.CredHelpers = make(map[string]string) + auths.AuthConfigs = make(map[string]dockerAuthConfig) + return true, nil + }) +} + +// getPath gets the path of the auth.json file +// The path can be overriden by the user if the overwrite-path flag is set +// If the flag is not set and XDG_RUNTIME_DIR is ser, the auth.json file is saved in XDG_RUNTIME_DIR/containers +// Otherwise, the auth.json file is stored in /run/user/UID/containers +func getPathToAuth(ctx *types.SystemContext) string { + if ctx != nil { + if ctx.AuthFilePath != "" { + return ctx.AuthFilePath + } + if ctx.RootForImplicitAbsolutePaths != "" { + return filepath.Join(ctx.RootForImplicitAbsolutePaths, defaultPath, strconv.Itoa(os.Getuid()), authCfg, authCfgFileName) + } + } + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + if runtimeDir == "" { + runtimeDir = filepath.Join(defaultPath, strconv.Itoa(os.Getuid())) + } + return filepath.Join(runtimeDir, authCfg, authCfgFileName) +} + +// readJSONFile unmarshals the authentications stored in the auth.json file and returns it +// or returns an empty dockerConfigFile data structure if auth.json does not exist +// if the file exists and is empty, readJSONFile returns an error +func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { + var auths dockerConfigFile + + raw, err := ioutil.ReadFile(path) + if os.IsNotExist(err) { + auths.AuthConfigs = map[string]dockerAuthConfig{} + return auths, nil + } + + if legacyFormat { + if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil { + return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path) + } + return auths, nil + } + + if err = json.Unmarshal(raw, &auths); err != nil { + return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path) + } + + return auths, nil +} + +// modifyJSON writes to auth.json if the dockerConfigFile has been updated +func modifyJSON(ctx *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) error { + path := getPathToAuth(ctx) + dir := filepath.Dir(path) + if _, err := os.Stat(dir); os.IsNotExist(err) { + if err = os.Mkdir(dir, 0700); err != nil { + return errors.Wrapf(err, "error creating directory %q", dir) + } + } + + auths, err := readJSONFile(path, false) + if err != nil { + return errors.Wrapf(err, "error reading JSON file %q", path) + } + + updated, err := editor(&auths) + if err != nil { + return errors.Wrapf(err, "error updating %q", path) + } + if updated { + newData, err := json.MarshalIndent(auths, "", "\t") + if err != nil { + return errors.Wrapf(err, "error marshaling JSON %q", path) + } + + if err = ioutil.WriteFile(path, newData, 0755); err != nil { + return errors.Wrapf(err, "error writing to file %q", path) + } + } + + return nil +} + +func getAuthFromCredHelper(credHelper, registry string) (string, string, error) { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + creds, err := helperclient.Get(p, registry) + if err != nil { + return "", "", err + } + return creds.Username, creds.Secret, nil +} + +func setAuthToCredHelper(credHelper, registry, username, password string) error { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + creds := &credentials.Credentials{ + ServerURL: registry, + Username: username, + Secret: password, + } + return helperclient.Store(p, creds) +} + +func deleteAuthFromCredHelper(credHelper, registry string) error { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + return helperclient.Erase(p, registry) +} + +// findAuthentication looks for auth of registry in path +func findAuthentication(registry, path string, legacyFormat bool) (string, string, error) { + auths, err := readJSONFile(path, legacyFormat) + if err != nil { + return "", "", errors.Wrapf(err, "error reading JSON file %q", path) + } + + // First try cred helpers. They should always be normalized. + if ch, exists := auths.CredHelpers[registry]; exists { + return getAuthFromCredHelper(ch, registry) + } + + // I'm feeling lucky + if val, exists := auths.AuthConfigs[registry]; exists { + return decodeDockerAuth(val.Auth) + } + + // bad luck; let's normalize the entries first + registry = normalizeRegistry(registry) + normalizedAuths := map[string]dockerAuthConfig{} + for k, v := range auths.AuthConfigs { + normalizedAuths[normalizeRegistry(k)] = v + } + if val, exists := normalizedAuths[registry]; exists { + return decodeDockerAuth(val.Auth) + } + return "", "", nil +} + +func decodeDockerAuth(s string) (string, string, error) { + decoded, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return "", "", err + } + parts := strings.SplitN(string(decoded), ":", 2) + if len(parts) != 2 { + // if it's invalid just skip, as docker does + return "", "", nil + } + user := parts[0] + password := strings.Trim(parts[1], "\x00") + return user, password, nil +} + +// convertToHostname converts a registry url which has http|https prepended +// to just an hostname. +// Copied from github.com/docker/docker/registry/auth.go +func convertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} + +func normalizeRegistry(registry string) string { + normalized := convertToHostname(registry) + switch normalized { + case "registry-1.docker.io", "docker.io": + return "index.docker.io" + } + return normalized +} diff --git a/vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go new file mode 100644 index 00000000..0a32861c --- /dev/null +++ b/vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go @@ -0,0 +1,102 @@ +package tlsclientconfig + +import ( + "crypto/tls" + "io/ioutil" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc +func SetupCertificates(dir string, tlsc *tls.Config) error { + logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) + fs, err := ioutil.ReadDir(dir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + if os.IsPermission(err) { + logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err) + return nil + } + return err + } + + for _, f := range fs { + fullPath := filepath.Join(dir, f.Name()) + if strings.HasSuffix(f.Name(), ".crt") { + systemPool, err := tlsconfig.SystemCertPool() + if err != nil { + return errors.Wrap(err, "unable to get system cert pool") + } + tlsc.RootCAs = systemPool + logrus.Debugf(" crt: %s", fullPath) + data, err := ioutil.ReadFile(fullPath) + if err != nil { + return err + } + tlsc.RootCAs.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + logrus.Debugf(" cert: %s", fullPath) + if !hasFile(fs, keyName) { + return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) + if err != nil { + return err + } + tlsc.Certificates = append(tlsc.Certificates, cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + logrus.Debugf(" key: %s", fullPath) + if !hasFile(fs, certName) { + return errors.Errorf("missing client certificate %s for key %s", certName, keyName) + } + } + } + return nil +} + +func hasFile(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false +} + +// NewTransport Creates a default transport +func NewTransport() *http.Transport { + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: direct.Dial, + TLSHandshakeTimeout: 10 * time.Second, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + proxyDialer, err := sockets.DialerFromEnvironment(direct) + if err == nil { + tr.Dial = proxyDialer.Dial + } + return tr +} diff --git a/vendor/github.com/containers/image/signature/policy_config.go b/vendor/github.com/containers/image/signature/policy_config.go index bc6c5e9a..42cc12ab 100644 --- a/vendor/github.com/containers/image/signature/policy_config.go +++ b/vendor/github.com/containers/image/signature/policy_config.go @@ -70,7 +70,11 @@ func NewPolicyFromFile(fileName string) (*Policy, error) { if err != nil { return nil, err } - return NewPolicyFromBytes(contents) + policy, err := NewPolicyFromBytes(contents) + if err != nil { + return nil, errors.Wrapf(err, "invalid policy in %q", fileName) + } + return policy, nil } // NewPolicyFromBytes returns a policy parsed from the specified blob. diff --git a/vendor/github.com/containers/image/signature/signature.go b/vendor/github.com/containers/image/signature/signature.go index f6219bec..41f13f72 100644 --- a/vendor/github.com/containers/image/signature/signature.go +++ b/vendor/github.com/containers/image/signature/signature.go @@ -180,13 +180,9 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { } s.UntrustedDockerManifestDigest = digest.Digest(digestString) - if err := paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ + return paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ "docker-reference": &s.UntrustedDockerReference, - }); err != nil { - return err - } - - return nil + }) } // Sign formats the signature and returns a blob signed using mech and keyIdentity diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go index 08fa71b5..038195c1 100644 --- a/vendor/github.com/containers/image/storage/storage_image.go +++ b/vendor/github.com/containers/image/storage/storage_image.go @@ -1,14 +1,17 @@ +// +build !containers_image_storage_stub + package storage import ( "bytes" "context" "encoding/json" + "fmt" "io" "io/ioutil" - "time" - - "github.com/pkg/errors" + "os" + "path/filepath" + "sync/atomic" "github.com/containers/image/image" "github.com/containers/image/manifest" @@ -16,10 +19,14 @@ import ( "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/ioutils" - ddigest "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) +const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. + var ( // ErrBlobDigestMismatch is returned when PutBlob() is given a blob // with a digest-based name that doesn't match its contents. @@ -27,8 +34,8 @@ var ( // ErrBlobSizeMismatch is returned when PutBlob() is given a blob // with an expected size that doesn't match the reader. ErrBlobSizeMismatch = errors.New("blob size mismatch") - // ErrNoManifestLists is returned when GetTargetManifest() is - // called. + // ErrNoManifestLists is returned when GetManifest() is called. + // with a non-nil instanceDigest. ErrNoManifestLists = errors.New("manifest lists are not supported by this transport") // ErrNoSuchImage is returned when we attempt to access an image which // doesn't exist in the storage area. @@ -37,256 +44,318 @@ var ( type storageImageSource struct { imageRef storageReference - Tag string `json:"tag,omitempty"` - Created time.Time `json:"created-time,omitempty"` - ID string `json:"id"` - BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle - Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs - LayerPosition map[ddigest.Digest]int `json:"-"` // Where we are in reading a blob's layers - SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice + ID string + layerPosition map[digest.Digest]int // Where we are in reading a blob's layers + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice } type storageImageDestination struct { - imageRef storageReference - Tag string `json:"tag,omitempty"` - Created time.Time `json:"created-time,omitempty"` - ID string `json:"id"` - BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle - Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs - BlobData map[ddigest.Digest][]byte `json:"-"` // Map from names of blobs that aren't layers to contents, temporary - Manifest []byte `json:"-"` // Manifest contents, temporary - Signatures []byte `json:"-"` // Signature contents, temporary - SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice + image types.ImageCloser + systemContext *types.SystemContext + imageRef storageReference // The reference we'll use to name the image + publicRef storageReference // The reference we return when asked about the name we'll give to the image + directory string // Temporary directory where we store blobs until Commit() time + nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs + manifest []byte // Manifest contents, temporary + signatures []byte // Signature contents, temporary + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes + filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice } -type storageLayerMetadata struct { - Digest string `json:"digest,omitempty"` - Size int64 `json:"size"` - CompressedSize int64 `json:"compressed-size,omitempty"` -} - -type storageImage struct { - types.Image +type storageImageCloser struct { + types.ImageCloser size int64 } -// newImageSource sets us up to read out an image, which needs to already exist. +// newImageSource sets up an image for reading. func newImageSource(imageRef storageReference) (*storageImageSource, error) { + // First, locate the image. img, err := imageRef.resolveImage() if err != nil { return nil, err } + + // Build the reader object. image := &storageImageSource{ imageRef: imageRef, - Created: time.Now(), ID: img.ID, - BlobList: []types.BlobInfo{}, - Layers: make(map[ddigest.Digest][]string), - LayerPosition: make(map[ddigest.Digest]int), + layerPosition: make(map[digest.Digest]int), SignatureSizes: []int{}, } - if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { - return nil, errors.Wrap(err, "error decoding metadata for source image") - } - return image, nil -} - -// newImageDestination sets us up to write a new image. -func newImageDestination(imageRef storageReference) (*storageImageDestination, error) { - image := &storageImageDestination{ - imageRef: imageRef, - Tag: imageRef.reference, - Created: time.Now(), - ID: imageRef.id, - BlobList: []types.BlobInfo{}, - Layers: make(map[ddigest.Digest][]string), - BlobData: make(map[ddigest.Digest][]byte), - SignatureSizes: []int{}, + if img.Metadata != "" { + if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { + return nil, errors.Wrap(err, "error decoding metadata for source image") + } } return image, nil } +// Reference returns the image reference that we used to find this image. func (s storageImageSource) Reference() types.ImageReference { return s.imageRef } -func (s storageImageDestination) Reference() types.ImageReference { - return s.imageRef -} - +// Close cleans up any resources we tied up while reading the image. func (s storageImageSource) Close() error { return nil } -func (s storageImageDestination) Close() error { - return nil +// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given. +func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { + rc, n, _, err = s.getBlobAndLayerID(info) + return rc, n, err } +// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. +func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { + var layer storage.Layer + var diffOptions *storage.DiffOptions + // We need a valid digest value. + err = info.Digest.Validate() + if err != nil { + return nil, -1, "", err + } + // Check if the blob corresponds to a diff that was used to initialize any layers. Our + // callers should try to retrieve layers using their uncompressed digests, so no need to + // check if they're using one of the compressed digests, which we can't reproduce anyway. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest) + // If it's not a layer, then it must be a data item. + if len(layers) == 0 { + b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String()) + if err != nil { + return nil, -1, "", err + } + r := bytes.NewReader(b) + logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) + return ioutil.NopCloser(r), int64(r.Len()), "", nil + } + // Step through the list of matching layers. Tests may want to verify that if we have multiple layers + // which claim to have the same contents, that we actually do have multiple layers, otherwise we could + // just go ahead and use the first one every time. + i := s.layerPosition[info.Digest] + s.layerPosition[info.Digest] = i + 1 + if len(layers) > 0 { + layer = layers[i%len(layers)] + } + // Force the storage layer to not try to match any compression that was used when the layer was first + // handed to it. + noCompression := archive.Uncompressed + diffOptions = &storage.DiffOptions{ + Compression: &noCompression, + } + if layer.UncompressedSize < 0 { + n = -1 + } else { + n = layer.UncompressedSize + } + logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) + rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) + if err != nil { + return nil, -1, "", err + } + return rc, n, layer.ID, err +} + +// GetManifest() reads the image's manifest. +func (s *storageImageSource) GetManifest(instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) { + if instanceDigest != nil { + return nil, "", ErrNoManifestLists + } + if len(s.cachedManifest) == 0 { + // We stored the manifest as an item named after storage.ImageDigestBigDataKey. + cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, storage.ImageDigestBigDataKey) + if err != nil { + return nil, "", err + } + s.cachedManifest = cachedBlob + } + return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err +} + +// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of +// the image, after they've been decompressed. +func (s *storageImageSource) LayerInfosForCopy() []types.BlobInfo { + simg, err := s.imageRef.transport.store.Image(s.ID) + if err != nil { + logrus.Errorf("error reading image %q: %v", s.ID, err) + return nil + } + updatedBlobInfos := []types.BlobInfo{} + layerID := simg.TopLayer + _, manifestType, err := s.GetManifest(nil) + if err != nil { + logrus.Errorf("error reading image manifest for %q: %v", s.ID, err) + return nil + } + uncompressedLayerType := "" + switch manifestType { + case imgspecv1.MediaTypeImageManifest: + uncompressedLayerType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + // This is actually a compressed type, but there's no uncompressed type defined + uncompressedLayerType = manifest.DockerV2Schema2LayerMediaType + } + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + logrus.Errorf("error reading layer %q in image %q: %v", layerID, s.ID, err) + return nil + } + if layer.UncompressedDigest == "" { + logrus.Errorf("uncompressed digest for layer %q is unknown", layerID) + return nil + } + if layer.UncompressedSize < 0 { + logrus.Errorf("uncompressed size for layer %q is unknown", layerID) + return nil + } + blobInfo := types.BlobInfo{ + Digest: layer.UncompressedDigest, + Size: layer.UncompressedSize, + MediaType: uncompressedLayerType, + } + updatedBlobInfos = append([]types.BlobInfo{blobInfo}, updatedBlobInfos...) + layerID = layer.Parent + } + return updatedBlobInfos +} + +// GetSignatures() parses the image's signatures blob into a slice of byte slices. +func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) { + if instanceDigest != nil { + return nil, ErrNoManifestLists + } + var offset int + sigslice := [][]byte{} + signature := []byte{} + if len(s.SignatureSizes) > 0 { + signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures") + if err != nil { + return nil, errors.Wrapf(err, "error looking up signatures data for image %q", s.ID) + } + signature = signatureBlob + } + for _, length := range s.SignatureSizes { + sigslice = append(sigslice, signature[offset:offset+length]) + offset += length + } + if offset != len(signature) { + return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) + } + return sigslice, nil +} + +// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until +// it's time to Commit() the image +func newImageDestination(ctx *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) { + directory, err := ioutil.TempDir(temporaryDirectoryForBigFiles, "storage") + if err != nil { + return nil, errors.Wrapf(err, "error creating a temporary directory") + } + // Break reading of the reference we're writing, so that copy.Image() won't try to rewrite + // schema1 image manifests to remove embedded references, since that changes the manifest's + // digest, and that makes the image unusable if we subsequently try to access it using a + // reference that mentions the no-longer-correct digest. + publicRef := imageRef + publicRef.name = nil + image := &storageImageDestination{ + systemContext: ctx, + imageRef: imageRef, + publicRef: publicRef, + directory: directory, + blobDiffIDs: make(map[digest.Digest]digest.Digest), + fileSizes: make(map[digest.Digest]int64), + filenames: make(map[digest.Digest]string), + SignatureSizes: []int{}, + } + return image, nil +} + +// Reference returns a mostly-usable image reference that can't return a DockerReference, to +// avoid triggering logic in copy.Image() that rewrites schema 1 image manifests in order to +// remove image names that they contain which don't match the value we're using. +func (s storageImageDestination) Reference() types.ImageReference { + return s.publicRef +} + +// Close cleans up the temporary directory. +func (s *storageImageDestination) Close() error { + return os.RemoveAll(s.directory) +} + +// ShouldCompressLayers indicates whether or not a caller should compress not-already-compressed +// data when handing it to us. func (s storageImageDestination) ShouldCompressLayers() bool { - // We ultimately have to decompress layers to populate trees on disk, - // so callers shouldn't bother compressing them before handing them to - // us, if they're not already compressed. + // We ultimately have to decompress layers to populate trees on disk, so callers shouldn't + // bother compressing them before handing them to us, if they're not already compressed. return false } -// putBlob stores a layer or data blob, optionally enforcing that a digest in -// blobinfo matches the incoming data. -func (s *storageImageDestination) putBlob(stream io.Reader, blobinfo types.BlobInfo, enforceDigestAndSize bool) (types.BlobInfo, error) { - blobSize := blobinfo.Size - digest := blobinfo.Digest +// PutBlob stores a layer or data blob in our temporary directory, checking that any information +// in the blobinfo matches the incoming data. +func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) { errorBlobInfo := types.BlobInfo{ Digest: "", Size: -1, } - // Try to read an initial snippet of the blob. - buf := [archive.HeaderSize]byte{} - n, err := io.ReadAtLeast(stream, buf[:], len(buf)) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return errorBlobInfo, err - } - // Set up to read the whole blob (the initial snippet, plus the rest) - // while digesting it with either the default, or the passed-in digest, - // if one was specified. - hasher := ddigest.Canonical.Digester() - if digest.Validate() == nil { - if a := digest.Algorithm(); a.Available() { + // Set up to digest the blob and count its size while saving it to a file. + hasher := digest.Canonical.Digester() + if blobinfo.Digest.Validate() == nil { + if a := blobinfo.Digest.Algorithm(); a.Available() { hasher = a.Digester() } } - hash := "" + diffID := digest.Canonical.Digester() + filename := filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename) + } + defer file.Close() counter := ioutils.NewWriteCounter(hasher.Hash()) - defragmented := io.MultiReader(bytes.NewBuffer(buf[:n]), stream) - multi := io.TeeReader(defragmented, counter) - if (n > 0) && archive.IsArchive(buf[:n]) { - // It's a filesystem layer. If it's not the first one in the - // image, we assume that the most recently added layer is its - // parent. - parentLayer := "" - for _, blob := range s.BlobList { - if layerList, ok := s.Layers[blob.Digest]; ok { - parentLayer = layerList[len(layerList)-1] - } - } - // If we have an expected content digest, generate a layer ID - // based on the parent's ID and the expected content digest. - id := "" - if digest.Validate() == nil { - id = ddigest.Canonical.FromBytes([]byte(parentLayer + "+" + digest.String())).Hex() - } - // Attempt to create the identified layer and import its contents. - layer, uncompressedSize, err := s.imageRef.transport.store.PutLayer(id, parentLayer, nil, "", true, multi) - if err != nil && errors.Cause(err) != storage.ErrDuplicateID { - logrus.Debugf("error importing layer blob %q as %q: %v", blobinfo.Digest, id, err) - return errorBlobInfo, err - } - if errors.Cause(err) == storage.ErrDuplicateID { - // We specified an ID, and there's already a layer with - // the same ID. Drain the input so that we can look at - // its length and digest. - _, err := io.Copy(ioutil.Discard, multi) - if err != nil && err != io.EOF { - logrus.Debugf("error digesting layer blob %q: %v", blobinfo.Digest, id, err) - return errorBlobInfo, err - } - hash = hasher.Digest().String() - } else { - // Applied the layer with the specified ID. Note the - // size info and computed digest. - hash = hasher.Digest().String() - layerMeta := storageLayerMetadata{ - Digest: hash, - CompressedSize: counter.Count, - Size: uncompressedSize, - } - if metadata, err := json.Marshal(&layerMeta); len(metadata) != 0 && err == nil { - s.imageRef.transport.store.SetMetadata(layer.ID, string(metadata)) - } - // Hang on to the new layer's ID. - id = layer.ID - } - // Check if the size looks right. - if enforceDigestAndSize && blobinfo.Size >= 0 && blobinfo.Size != counter.Count { - logrus.Debugf("layer blob %q size is %d, not %d, rejecting", blobinfo.Digest, counter.Count, blobinfo.Size) - if layer != nil { - // Something's wrong; delete the newly-created layer. - s.imageRef.transport.store.DeleteLayer(layer.ID) - } - return errorBlobInfo, ErrBlobSizeMismatch - } - // If the content digest was specified, verify it. - if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash { - logrus.Debugf("layer blob %q digests to %q, rejecting", blobinfo.Digest, hash) - if layer != nil { - // Something's wrong; delete the newly-created layer. - s.imageRef.transport.store.DeleteLayer(layer.ID) - } - return errorBlobInfo, ErrBlobDigestMismatch - } - // If we didn't get a blob size, return the one we calculated. - if blobSize == -1 { - blobSize = counter.Count - } - // If we didn't get a digest, construct one. - if digest == "" { - digest = ddigest.Digest(hash) - } - // Record that this layer blob is a layer, and the layer ID it - // ended up having. This is a list, in case the same blob is - // being applied more than once. - s.Layers[digest] = append(s.Layers[digest], id) - s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: counter.Count}) - if layer != nil { - logrus.Debugf("blob %q imported as a filesystem layer %q", blobinfo.Digest, id) - } else { - logrus.Debugf("layer blob %q already present as layer %q", blobinfo.Digest, id) - } - } else { - // It's just data. Finish scanning it in, check that our - // computed digest matches the passed-in digest, and store it, - // but leave it out of the blob-to-layer-ID map so that we can - // tell that it's not a layer. - blob, err := ioutil.ReadAll(multi) - if err != nil && err != io.EOF { - return errorBlobInfo, err - } - hash = hasher.Digest().String() - if enforceDigestAndSize && blobinfo.Size >= 0 && int64(len(blob)) != blobinfo.Size { - logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, int64(len(blob)), blobinfo.Size) - return errorBlobInfo, ErrBlobSizeMismatch - } - // If we were given a digest, verify that the content matches - // it. - if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash { - logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash) - return errorBlobInfo, ErrBlobDigestMismatch - } - // If we didn't get a blob size, return the one we calculated. - if blobSize == -1 { - blobSize = int64(len(blob)) - } - // If we didn't get a digest, construct one. - if digest == "" { - digest = ddigest.Digest(hash) - } - // Save the blob for when we Commit(). - s.BlobData[digest] = blob - s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: int64(len(blob))}) - logrus.Debugf("blob %q imported as opaque data %q", blobinfo.Digest, digest) + reader := io.TeeReader(io.TeeReader(stream, counter), file) + decompressed, err := archive.DecompressStream(reader) + if err != nil { + return errorBlobInfo, errors.Wrap(err, "error setting up to decompress blob") + } + // Copy the data to the file. + _, err = io.Copy(diffID.Hash(), decompressed) + decompressed.Close() + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "error storing blob to file %q", filename) + } + // Ensure that any information that we were given about the blob is correct. + if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() { + return errorBlobInfo, ErrBlobDigestMismatch + } + if blobinfo.Size >= 0 && blobinfo.Size != counter.Count { + return errorBlobInfo, ErrBlobSizeMismatch + } + // Record information about the blob. + s.blobDiffIDs[hasher.Digest()] = diffID.Digest() + s.fileSizes[hasher.Digest()] = counter.Count + s.filenames[hasher.Digest()] = filename + blobDigest := blobinfo.Digest + if blobDigest.Validate() != nil { + blobDigest = hasher.Digest() + } + blobSize := blobinfo.Size + if blobSize < 0 { + blobSize = counter.Count } return types.BlobInfo{ - Digest: digest, - Size: blobSize, + Digest: blobDigest, + Size: blobSize, + MediaType: blobinfo.MediaType, }, nil } -// PutBlob is used to both store filesystem layers and binary data that is part -// of the image. Filesystem layers are assumed to be imported in order, as -// that is required by some of the underlying storage drivers. -func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) { - return s.putBlob(stream, blobinfo, true) -} - -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. +// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be +// reapplied using ReapplyBlob. +// // Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. // If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); // it returns a non-nil error only on an unexpected failure. @@ -294,93 +363,289 @@ func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, if blobinfo.Digest == "" { return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`) } - for _, blob := range s.BlobList { - if blob.Digest == blobinfo.Digest { - return true, blob.Size, nil - } + if err := blobinfo.Digest.Validate(); err != nil { + return false, -1, errors.Wrapf(err, `Can not check for a blob with invalid digest`) } + // Check if we've already cached it in a file. + if size, ok := s.fileSizes[blobinfo.Digest]; ok { + return true, size, nil + } + // Check if we have a wasn't-compressed layer in storage that's based on that blob. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, -1, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Save this for completeness. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, layers[0].UncompressedSize, nil + } + // Check if we have a was-compressed layer in storage that's based on that blob. + layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, -1, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, layers[0].CompressedSize, nil + } + // Nope, we don't have it. return false, -1, nil } +// ReapplyBlob is now a no-op, assuming HasBlob() says we already have it, since Commit() can just apply the +// same one when it walks the list in the manifest. func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.BlobInfo, error) { - err := blobinfo.Digest.Validate() - if err != nil { - return types.BlobInfo{}, err + present, size, err := s.HasBlob(blobinfo) + if !present { + return types.BlobInfo{}, errors.Errorf("error reapplying blob %+v: blob was not previously applied", blobinfo) } - if layerList, ok := s.Layers[blobinfo.Digest]; !ok || len(layerList) < 1 { - b, err := s.imageRef.transport.store.ImageBigData(s.ID, blobinfo.Digest.String()) - if err != nil { - return types.BlobInfo{}, err + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "error reapplying blob %+v", blobinfo) + } + blobinfo.Size = size + return blobinfo, nil +} + +// computeID computes a recommended image ID based on information we have so far. If +// the manifest is not of a type that we recognize, we return an empty value, indicating +// that since we don't have a recommendation, a random ID should be used if one needs +// to be allocated. +func (s *storageImageDestination) computeID(m manifest.Manifest) string { + // Build the diffID list. We need the decompressed sums that we've been calculating to + // fill in the DiffIDs. It's expected (but not enforced by us) that the number of + // diffIDs corresponds to the number of non-EmptyLayer entries in the history. + var diffIDs []digest.Digest + switch m.(type) { + case *manifest.Schema1: + // Build a list of the diffIDs we've generated for the non-throwaway FS layers, + // in reverse of the order in which they were originally listed. + s1, ok := m.(*manifest.Schema1) + if !ok { + // Shouldn't happen + logrus.Debugf("internal error reading schema 1 manifest") + return "" } - return types.BlobInfo{Digest: blobinfo.Digest, Size: int64(len(b))}, nil + for i, history := range s1.History { + compat := manifest.Schema1V1Compatibility{} + if err := json.Unmarshal([]byte(history.V1Compatibility), &compat); err != nil { + logrus.Debugf("internal error reading schema 1 history: %v", err) + return "" + } + if compat.ThrowAway { + continue + } + blobSum := s1.FSLayers[i].BlobSum + diffID, ok := s.blobDiffIDs[blobSum] + if !ok { + logrus.Infof("error looking up diffID for layer %q", blobSum.String()) + return "" + } + diffIDs = append([]digest.Digest{diffID}, diffIDs...) + } + case *manifest.Schema2, *manifest.OCI1: + // We know the ID calculation for these formats doesn't actually use the diffIDs, + // so we don't need to populate the diffID list. } - layerList := s.Layers[blobinfo.Digest] - rc, _, err := diffLayer(s.imageRef.transport.store, layerList[len(layerList)-1]) + id, err := m.ImageID(diffIDs) if err != nil { - return types.BlobInfo{}, err + return "" } - return s.putBlob(rc, blobinfo, false) + return id +} + +// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig +// information out of it for Inspect(). +func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) { + if info.Digest == "" { + return nil, errors.Errorf(`no digest supplied when reading blob`) + } + if err := info.Digest.Validate(); err != nil { + return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`) + } + // Assume it's a file, since we're only calling this from a place that expects to read files. + if filename, ok := s.filenames[info.Digest]; ok { + contents, err2 := ioutil.ReadFile(filename) + if err2 != nil { + return nil, errors.Wrapf(err2, `error reading blob from file %q`, filename) + } + return contents, nil + } + // If it's not a file, it's a bug, because we're not expecting to be asked for a layer. + return nil, errors.New("blob not found") } func (s *storageImageDestination) Commit() error { - // Create the image record. - lastLayer := "" - for _, blob := range s.BlobList { - if layerList, ok := s.Layers[blob.Digest]; ok { - lastLayer = layerList[len(layerList)-1] - } + // Find the list of layer blobs. + if len(s.manifest) == 0 { + return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") } - img, err := s.imageRef.transport.store.CreateImage(s.ID, nil, lastLayer, "", nil) + man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) + if err != nil { + return errors.Wrapf(err, "error parsing manifest") + } + layerBlobs := man.LayerInfos() + // Extract or find the layers. + lastLayer := "" + addedLayers := []string{} + for _, blob := range layerBlobs { + var diff io.ReadCloser + // Check if there's already a layer with the ID that we'd give to the result of applying + // this layer blob to its parent, if it has one, or the blob's hex value otherwise. + diffID, haveDiffID := s.blobDiffIDs[blob.Digest] + if !haveDiffID { + // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), + // or to even check if we had it. + logrus.Debugf("looking for diffID for blob %+v", blob.Digest) + has, _, err := s.HasBlob(blob) + if err != nil { + return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String()) + } + if !has { + return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) + } + diffID, haveDiffID = s.blobDiffIDs[blob.Digest] + if !haveDiffID { + return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) + } + } + id := diffID.Hex() + if lastLayer != "" { + id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() + } + if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { + // There's already a layer that should have the right contents, just reuse it. + lastLayer = layer.ID + continue + } + // Check if we cached a file with that blobsum. If we didn't already have a layer with + // the blob's contents, we should have gotten a copy. + if filename, ok := s.filenames[blob.Digest]; ok { + // Use the file's contents to initialize the layer. + file, err2 := os.Open(filename) + if err2 != nil { + return errors.Wrapf(err2, "error opening file %q", filename) + } + defer file.Close() + diff = file + } + if diff == nil { + // Try to find a layer with contents matching that blobsum. + layer := "" + layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } else { + layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } + } + if layer == "" { + return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest) + } + // Use the layer's contents to initialize the new layer. + noCompression := archive.Uncompressed + diffOptions := &storage.DiffOptions{ + Compression: &noCompression, + } + diff, err2 = s.imageRef.transport.store.Diff("", layer, diffOptions) + if err2 != nil { + return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest) + } + defer diff.Close() + } + if diff == nil { + // This shouldn't have happened. + return errors.Errorf("error applying blob %q: content not found", blob.Digest) + } + // Build the new layer using the diff, regardless of where it came from. + layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, diff) + if err != nil { + return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest) + } + lastLayer = layer.ID + addedLayers = append([]string{lastLayer}, addedLayers...) + } + // If one of those blobs was a configuration blob, then we can try to dig out the date when the image + // was originally created, in case we're just copying it. If not, no harm done. + options := &storage.ImageOptions{} + if inspect, err := man.Inspect(s.getConfigBlob); err == nil { + logrus.Debugf("setting image creation date to %s", inspect.Created) + options.CreationDate = inspect.Created + } + if manifestDigest, err := manifest.Digest(s.manifest); err == nil { + options.Digest = manifestDigest + } + // Create the image record, pointing to the most-recently added layer. + intendedID := s.imageRef.id + if intendedID == "" { + intendedID = s.computeID(man) + } + oldNames := []string{} + img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) if err != nil { if errors.Cause(err) != storage.ErrDuplicateID { logrus.Debugf("error creating image: %q", err) - return errors.Wrapf(err, "error creating image %q", s.ID) + return errors.Wrapf(err, "error creating image %q", intendedID) } - img, err = s.imageRef.transport.store.Image(s.ID) + img, err = s.imageRef.transport.store.Image(intendedID) if err != nil { - return errors.Wrapf(err, "error reading image %q", s.ID) + return errors.Wrapf(err, "error reading image %q", intendedID) } if img.TopLayer != lastLayer { - logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", s.ID) - return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", s.ID) + logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID) + return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID) } logrus.Debugf("reusing image ID %q", img.ID) + oldNames = append(oldNames, img.Names...) } else { logrus.Debugf("created new image ID %q", img.ID) } - s.ID = img.ID - names := img.Names - if s.Tag != "" { - names = append(names, s.Tag) + // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so + // we just need to screen out the ones that are actually layers to get the list of non-layers. + dataBlobs := make(map[digest.Digest]struct{}) + for blob := range s.filenames { + dataBlobs[blob] = struct{}{} } - // We have names to set, so move those names to this image. - if len(names) > 0 { + for _, layerBlob := range layerBlobs { + delete(dataBlobs, layerBlob.Digest) + } + for blob := range dataBlobs { + v, err := ioutil.ReadFile(s.filenames[blob]) + if err != nil { + return errors.Wrapf(err, "error copying non-layer blob %q to image", blob) + } + if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err) + return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID) + } + } + // Set the reference's name on the image. + if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil { + names := []string{} + if name != nil { + names = append(names, verboseName(name)) + } + if len(oldNames) > 0 { + names = append(names, oldNames...) + } if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } - logrus.Debugf("error setting names on image %q: %v", img.ID, err) - return err + logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err) + return errors.Wrapf(err, "error setting names %v on image %q", names, img.ID) } logrus.Debugf("set names of image %q to %v", img.ID, names) } - // Save the data blobs to disk, and drop their contents from memory. - keys := []ddigest.Digest{} - for k, v := range s.BlobData { - if err := s.imageRef.transport.store.SetImageBigData(img.ID, k.String(), v); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving big data %q for image %q: %v", k, img.ID, err) - return err - } - keys = append(keys, k) - } - for _, key := range keys { - delete(s.BlobData, key) - } - // Save the manifest, if we have one. - if err := s.imageRef.transport.store.SetImageBigData(s.ID, "manifest", s.Manifest); err != nil { + // Save the manifest. Use storage.ImageDigestBigDataKey as the item's + // name, so that its digest can be used to locate the image in the Store. + if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } @@ -388,12 +653,14 @@ func (s *storageImageDestination) Commit() error { return err } // Save the signatures, if we have any. - if err := s.imageRef.transport.store.SetImageBigData(s.ID, "signatures", s.Signatures); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + if len(s.signatures) > 0 { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return err } - logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) - return err } // Save our metadata. metadata, err := json.Marshal(s) @@ -405,7 +672,7 @@ func (s *storageImageDestination) Commit() error { return err } if len(metadata) != 0 { - if err = s.imageRef.transport.store.SetMetadata(s.ID, string(metadata)); err != nil { + if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } @@ -418,7 +685,7 @@ func (s *storageImageDestination) Commit() error { } var manifestMIMETypes = []string{ - // TODO(runcom): we'll add OCI as part of another PR here + imgspecv1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType, @@ -428,23 +695,20 @@ func (s *storageImageDestination) SupportedManifestMIMETypes() []string { return manifestMIMETypes } -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +// PutManifest writes the manifest to the destination. func (s *storageImageDestination) PutManifest(manifest []byte) error { - s.Manifest = make([]byte, len(manifest)) - copy(s.Manifest, manifest) + s.manifest = make([]byte, len(manifest)) + copy(s.manifest, manifest) return nil } -// SupportsSignatures returns an error if we can't expect GetSignatures() to -// return data that was previously supplied to PutSignatures(). +// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was +// previously supplied to PutSignatures(). func (s *storageImageDestination) SupportsSignatures() error { return nil } -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be // uploaded to the image destination, true otherwise. func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { return false @@ -455,6 +719,7 @@ func (s *storageImageDestination) MustMatchRuntimeOS() bool { return true } +// PutSignatures records the image's signatures for committing as a single data blob. func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { sizes := []int{} sigblob := []byte{} @@ -465,146 +730,73 @@ func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { copy(newblob[len(sigblob):], sig) sigblob = newblob } - s.Signatures = sigblob + s.signatures = sigblob s.SignatureSizes = sizes return nil } -func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { - rc, n, _, err = s.getBlobAndLayerID(info) - return rc, n, err -} - -func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { - err = info.Digest.Validate() - if err != nil { - return nil, -1, "", err - } - if layerList, ok := s.Layers[info.Digest]; !ok || len(layerList) < 1 { - b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String()) - if err != nil { - return nil, -1, "", err - } - r := bytes.NewReader(b) - logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) - return ioutil.NopCloser(r), int64(r.Len()), "", nil - } - // If the blob was "put" more than once, we have multiple layer IDs - // which should all produce the same diff. For the sake of tests that - // want to make sure we created different layers each time the blob was - // "put", though, cycle through the layers. - layerList := s.Layers[info.Digest] - position, ok := s.LayerPosition[info.Digest] - if !ok { - position = 0 - } - s.LayerPosition[info.Digest] = (position + 1) % len(layerList) - logrus.Debugf("exporting filesystem layer %q for blob %q", layerList[position], info.Digest) - rc, n, err = diffLayer(s.imageRef.transport.store, layerList[position]) - return rc, n, layerList[position], err -} - -func diffLayer(store storage.Store, layerID string) (rc io.ReadCloser, n int64, err error) { - layer, err := store.Layer(layerID) - if err != nil { - return nil, -1, err - } - layerMeta := storageLayerMetadata{ - CompressedSize: -1, - } - if layer.Metadata != "" { - if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { - return nil, -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID) - } - } - if layerMeta.CompressedSize <= 0 { - n = -1 - } else { - n = layerMeta.CompressedSize - } - diff, err := store.Diff("", layer.ID, nil) - if err != nil { - return nil, -1, err - } - return diff, n, nil -} - -func (s *storageImageSource) GetManifest() (manifestBlob []byte, MIMEType string, err error) { - manifestBlob, err = s.imageRef.transport.store.ImageBigData(s.ID, "manifest") - return manifestBlob, manifest.GuessMIMEType(manifestBlob), err -} - -func (s *storageImageSource) GetTargetManifest(digest ddigest.Digest) (manifestBlob []byte, MIMEType string, err error) { - return nil, "", ErrNoManifestLists -} - -func (s *storageImageSource) GetSignatures(ctx context.Context) (signatures [][]byte, err error) { - var offset int - signature, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures") - if err != nil { - return nil, err - } - sigslice := [][]byte{} - for _, length := range s.SignatureSizes { - sigslice = append(sigslice, signature[offset:offset+length]) - offset += length - } - if offset != len(signature) { - return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) - } - return sigslice, nil -} - +// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. func (s *storageImageSource) getSize() (int64, error) { var sum int64 - names, err := s.imageRef.transport.store.ListImageBigData(s.imageRef.id) + // Size up the data blobs. + dataNames, err := s.imageRef.transport.store.ListImageBigData(s.ID) if err != nil { - return -1, errors.Wrapf(err, "error reading image %q", s.imageRef.id) + return -1, errors.Wrapf(err, "error reading image %q", s.ID) } - for _, name := range names { - bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.imageRef.id, name) + for _, dataName := range dataNames { + bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.ID, dataName) if err != nil { - return -1, errors.Wrapf(err, "error reading data blob size %q for %q", name, s.imageRef.id) + return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.ID) } sum += bigSize } + // Add the signature sizes. for _, sigSize := range s.SignatureSizes { sum += int64(sigSize) } - for _, layerList := range s.Layers { - for _, layerID := range layerList { - layer, err := s.imageRef.transport.store.Layer(layerID) - if err != nil { - return -1, err - } - layerMeta := storageLayerMetadata{ - Size: -1, - } - if layer.Metadata != "" { - if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { - return -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID) - } - } - if layerMeta.Size < 0 { - return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) - } - sum += layerMeta.Size + // Prepare to walk the layer list. + img, err := s.imageRef.transport.store.Image(s.ID) + if err != nil { + return -1, errors.Wrapf(err, "error reading image info %q", s.ID) + } + // Walk the layer list. + layerID := img.TopLayer + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return -1, err } + if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { + return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) + } + sum += layer.UncompressedSize + if layer.Parent == "" { + break + } + layerID = layer.Parent } return sum, nil } -func (s *storageImage) Size() (int64, error) { +// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageImageSource) Size() (int64, error) { + return s.getSize() +} + +// Size() returns the previously-computed size of the image, with no error. +func (s *storageImageCloser) Size() (int64, error) { return s.size, nil } // newImage creates an image that also knows its size -func newImage(s storageReference) (types.Image, error) { +func newImage(ctx *types.SystemContext, s storageReference) (types.ImageCloser, error) { src, err := newImageSource(s) if err != nil { return nil, err } - img, err := image.FromSource(src) + img, err := image.FromSource(ctx, src) if err != nil { return nil, err } @@ -612,5 +804,5 @@ func newImage(s storageReference) (types.Image, error) { if err != nil { return nil, err } - return &storageImage{Image: img, size: size}, nil + return &storageImageCloser{ImageCloser: img, size: size}, nil } diff --git a/vendor/github.com/containers/image/storage/storage_reference.go b/vendor/github.com/containers/image/storage/storage_reference.go index 674330b4..96887142 100644 --- a/vendor/github.com/containers/image/storage/storage_reference.go +++ b/vendor/github.com/containers/image/storage/storage_reference.go @@ -1,3 +1,5 @@ +// +build !containers_image_storage_stub + package storage import ( @@ -6,6 +8,7 @@ import ( "github.com/containers/image/docker/reference" "github.com/containers/image/types" "github.com/containers/storage" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -18,9 +21,11 @@ type storageReference struct { reference string id string name reference.Named + tag string + digest digest.Digest } -func newReference(transport storageTransport, reference, id string, name reference.Named) *storageReference { +func newReference(transport storageTransport, reference, id string, name reference.Named, tag string, digest digest.Digest) *storageReference { // We take a copy of the transport, which contains a pointer to the // store that it used for resolving this reference, so that the // transport that we'll return from Transport() won't be affected by @@ -30,6 +35,8 @@ func newReference(transport storageTransport, reference, id string, name referen reference: reference, id: id, name: name, + tag: tag, + digest: digest, } } @@ -37,11 +44,32 @@ func newReference(transport storageTransport, reference, id string, name referen // one present with the same name or ID, and return the image. func (s *storageReference) resolveImage() (*storage.Image, error) { if s.id == "" { + // Look for an image that has the expanded reference name as an explicit Name value. image, err := s.transport.store.Image(s.reference) if image != nil && err == nil { s.id = image.ID } } + if s.id == "" && s.name != nil && s.digest != "" { + // Look for an image with the specified digest that has the same name, + // though possibly with a different tag or digest, as a Name value, so + // that the canonical reference can be implicitly resolved to the image. + images, err := s.transport.store.ImagesByDigest(s.digest) + if images != nil && err == nil { + repo := reference.FamiliarName(reference.TrimNamed(s.name)) + search: + for _, image := range images { + for _, name := range image.Names { + if named, err := reference.ParseNormalizedNamed(name); err == nil { + if reference.FamiliarName(reference.TrimNamed(named)) == repo { + s.id = image.ID + break search + } + } + } + } + } + } if s.id == "" { logrus.Errorf("reference %q does not resolve to an image ID", s.StringWithinTransport()) return nil, ErrNoSuchImage @@ -50,12 +78,15 @@ func (s *storageReference) resolveImage() (*storage.Image, error) { if err != nil { return nil, errors.Wrapf(err, "error reading image %q", s.id) } - if s.reference != "" { + if s.name != nil { + repo := reference.FamiliarName(reference.TrimNamed(s.name)) nameMatch := false for _, name := range img.Names { - if name == s.reference { - nameMatch = true - break + if named, err := reference.ParseNormalizedNamed(name); err == nil { + if reference.FamiliarName(reference.TrimNamed(named)) == repo { + nameMatch = true + break + } } } if !nameMatch { @@ -76,8 +107,21 @@ func (s storageReference) Transport() types.ImageTransport { } } -// Return a name with a tag, if we have a name to base them on. +// Return a name with a tag or digest, if we have either, else return it bare. func (s storageReference) DockerReference() reference.Named { + if s.name == nil { + return nil + } + if s.tag != "" { + if namedTagged, err := reference.WithTag(s.name, s.tag); err == nil { + return namedTagged + } + } + if s.digest != "" { + if canonical, err := reference.WithDigest(s.name, s.digest); err == nil { + return canonical + } + } return s.name } @@ -91,7 +135,7 @@ func (s storageReference) StringWithinTransport() string { optionsList = ":" + strings.Join(options, ",") } storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" - if s.name == nil { + if s.reference == "" { return storeSpec + "@" + s.id } if s.id == "" { @@ -120,11 +164,8 @@ func (s storageReference) PolicyConfigurationNamespaces() []string { driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" namespaces := []string{} if s.name != nil { - if s.id != "" { - // The reference without the ID is also a valid namespace. - namespaces = append(namespaces, storeSpec+s.reference) - } - components := strings.Split(s.name.Name(), "/") + name := reference.TrimNamed(s.name) + components := strings.Split(name.String(), "/") for len(components) > 0 { namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) components = components[:len(components)-1] @@ -135,8 +176,13 @@ func (s storageReference) PolicyConfigurationNamespaces() []string { return namespaces } -func (s storageReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - return newImage(s) +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (s storageReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { + return newImage(ctx, s) } func (s storageReference) DeleteImage(ctx *types.SystemContext) error { @@ -154,10 +200,10 @@ func (s storageReference) DeleteImage(ctx *types.SystemContext) error { return err } -func (s storageReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { +func (s storageReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { return newImageSource(s) } func (s storageReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(s) + return newImageDestination(ctx, s) } diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go index 1a0ebd04..f6ebcdc4 100644 --- a/vendor/github.com/containers/image/storage/storage_transport.go +++ b/vendor/github.com/containers/image/storage/storage_transport.go @@ -1,3 +1,5 @@ +// +build !containers_image_storage_stub + package storage import ( @@ -11,11 +13,14 @@ import ( "github.com/containers/image/types" "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" - "github.com/opencontainers/go-digest" - ddigest "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) +const ( + minimumTruncatedIDLength = 3 +) + func init() { transports.Register(Transport) } @@ -101,69 +106,133 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { // relative to the given store, and returns it in a reference object. func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { var name reference.Named - var sum digest.Digest - var err error if ref == "" { - return nil, ErrInvalidReference + return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference") } if ref[0] == '[' { // Ignore the store specifier. closeIndex := strings.IndexRune(ref, ']') if closeIndex < 1 { - return nil, ErrInvalidReference + return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref) } ref = ref[closeIndex+1:] } - refInfo := strings.SplitN(ref, "@", 2) - if len(refInfo) == 1 { - // A name. - name, err = reference.ParseNormalizedNamed(refInfo[0]) - if err != nil { - return nil, err + + // The last segment, if there's more than one, is either a digest from a reference, or an image ID. + split := strings.LastIndex(ref, "@") + idOrDigest := "" + if split != -1 { + // Peel off that last bit so that we can work on the rest. + idOrDigest = ref[split+1:] + if idOrDigest == "" { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest) } - } else if len(refInfo) == 2 { - // An ID, possibly preceded by a name. - if refInfo[0] != "" { - name, err = reference.ParseNormalizedNamed(refInfo[0]) - if err != nil { - return nil, err - } - } - sum, err = digest.Parse(refInfo[1]) - if err != nil || sum.Validate() != nil { - sum, err = digest.Parse("sha256:" + refInfo[1]) - if err != nil || sum.Validate() != nil { - return nil, err - } - } - } else { // Coverage: len(refInfo) is always 1 or 2 - // Anything else: store specified in a form we don't - // recognize. - return nil, ErrInvalidReference + ref = ref[:split] } + + // The middle segment (now the last segment), if there is one, is a digest. + split = strings.LastIndex(ref, "@") + sum := digest.Digest("") + if split != -1 { + sum = digest.Digest(ref[split+1:]) + if sum == "" { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum) + } + ref = ref[:split] + } + + // If we have something that unambiguously should be a digest, validate it, and then the third part, + // if we have one, as an ID. + id := "" + if sum != "" { + if idSum, err := digest.Parse("sha256:" + idOrDigest); err != nil || idSum.Validate() != nil { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID", idOrDigest) + } + if err := sum.Validate(); err != nil { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum) + } + id = idOrDigest + if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, idOrDigest) { + // The ID is a truncated version of the ID of an image that's present in local storage, + // so we might as well use the expanded value. + id = img.ID + } + } else if idOrDigest != "" { + // There was no middle portion, so the final portion could be either a digest or an ID. + if idSum, err := digest.Parse("sha256:" + idOrDigest); err == nil && idSum.Validate() == nil { + // It's an ID. + id = idOrDigest + } else if idSum, err := digest.Parse(idOrDigest); err == nil && idSum.Validate() == nil { + // It's a digest. + sum = idSum + } else if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, idOrDigest) { + // It's a truncated version of the ID of an image that's present in local storage, + // and we may need the expanded value. + id = img.ID + } else { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest) + } + } + + // If we only had one portion, then _maybe_ it's a truncated image ID. Only check on that if it's + // at least of what we guess is a reasonable minimum length, because we don't want a really short value + // like "a" matching an image by ID prefix when the input was actually meant to specify an image name. + if len(ref) >= minimumTruncatedIDLength && sum == "" && id == "" { + if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) { + // It's a truncated version of the ID of an image that's present in local storage; + // we need to expand it. + id = img.ID + ref = "" + } + } + + // The initial portion is probably a name, possibly with a tag. + if ref != "" { + var err error + if name, err = reference.ParseNormalizedNamed(ref); err != nil { + return nil, errors.Wrapf(err, "error parsing named reference %q", ref) + } + } + if name == nil && sum == "" && id == "" { + return nil, errors.Errorf("error parsing reference") + } + + // Construct a copy of the store spec. optionsList := "" options := store.GraphOptions() if len(options) > 0 { optionsList = ":" + strings.Join(options, ",") } storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]" - id := "" - if sum.Validate() == nil { - id = sum.Hex() - } + + // Convert the name back into a reference string, if we got a name. refname := "" + tag := "" if name != nil { - name = reference.TagNameOnly(name) - refname = verboseName(name) + if sum.Validate() == nil { + canonical, err := reference.WithDigest(name, sum) + if err != nil { + return nil, errors.Wrapf(err, "error mixing name %q with digest %q", name, sum) + } + refname = verboseName(canonical) + } else { + name = reference.TagNameOnly(name) + tagged, ok := name.(reference.Tagged) + if !ok { + return nil, errors.Errorf("error parsing possibly-tagless name %q", ref) + } + refname = verboseName(name) + tag = tagged.Tag() + } } if refname == "" { - logrus.Debugf("parsed reference into %q", storeSpec+"@"+id) + logrus.Debugf("parsed reference to id into %q", storeSpec+"@"+id) } else if id == "" { - logrus.Debugf("parsed reference into %q", storeSpec+refname) + logrus.Debugf("parsed reference to refname into %q", storeSpec+refname) } else { - logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id) + logrus.Debugf("parsed reference to refname@id into %q", storeSpec+refname+"@"+id) } - return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name), nil + return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name, tag, sum), nil } func (s *storageTransport) GetStore() (storage.Store, error) { @@ -182,11 +251,14 @@ func (s *storageTransport) GetStore() (storage.Store, error) { return s.store, nil } -// ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"), +// ParseReference takes a name and a tag or digest and/or ID +// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"), // possibly prefixed with a store specifier in the form "[_graphroot_]" or // "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or // "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", // tries to figure out which it is, and returns it in a reference object. +// If _id_ is the ID of an image that's present in local storage, it can be truncated, and +// even be specified as if it were a _name_, value. func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { var store storage.Store // Check if there's a store location prefix. If there is, then it @@ -265,17 +337,23 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { dref := ref.DockerReference() - if dref == nil { - if sref, ok := ref.(*storageReference); ok { - if sref.id != "" { - if img, err := store.Image(sref.id); err == nil { - return img, nil - } + if dref != nil { + if img, err := store.Image(verboseName(dref)); err == nil { + return img, nil + } + } + if sref, ok := ref.(*storageReference); ok { + if sref.id != "" { + if img, err := store.Image(sref.id); err == nil { + return img, nil } } - return nil, ErrInvalidReference + tmpRef := *sref + if img, err := tmpRef.resolveImage(); err == nil { + return img, nil + } } - return store.Image(verboseName(dref)) + return nil, storage.ErrImageUnknown } func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { @@ -335,7 +413,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { if err != nil { return err } - _, err = ddigest.Parse("sha256:" + scopeInfo[1]) + _, err = digest.Parse("sha256:" + scopeInfo[1]) if err != nil { return err } @@ -345,11 +423,28 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { return nil } -func verboseName(name reference.Named) string { - name = reference.TagNameOnly(name) - tag := "" - if tagged, ok := name.(reference.NamedTagged); ok { - tag = ":" + tagged.Tag() +func verboseName(r reference.Reference) string { + if r == nil { + return "" } - return name.Name() + tag + named, isNamed := r.(reference.Named) + digested, isDigested := r.(reference.Digested) + tagged, isTagged := r.(reference.Tagged) + name := "" + tag := "" + sum := "" + if isNamed { + name = (reference.TrimNamed(named)).String() + } + if isTagged { + if tagged.Tag() != "" { + tag = ":" + tagged.Tag() + } + } + if isDigested { + if digested.Digest().Validate() == nil { + sum = "@" + digested.Digest().String() + } + } + return name + tag + sum } diff --git a/vendor/github.com/containers/image/tarball/doc.go b/vendor/github.com/containers/image/tarball/doc.go new file mode 100644 index 00000000..a6ced5a0 --- /dev/null +++ b/vendor/github.com/containers/image/tarball/doc.go @@ -0,0 +1,48 @@ +// Package tarball provides a way to generate images using one or more layer +// tarballs and an optional template configuration. +// +// An example: +// package main +// +// import ( +// "fmt" +// +// cp "github.com/containers/image/copy" +// "github.com/containers/image/tarball" +// "github.com/containers/image/transports/alltransports" +// +// imgspecv1 "github.com/containers/image/transports/alltransports" +// ) +// +// func imageFromTarball() { +// src, err := alltransports.ParseImageName("tarball:/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") +// // - or - +// // src, err := tarball.Transport.ParseReference("/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") +// if err != nil { +// panic(err) +// } +// updater, ok := src.(tarball.ConfigUpdater) +// if !ok { +// panic("unexpected: a tarball reference should implement tarball.ConfigUpdater") +// } +// config := imgspecv1.Image{ +// Config: imgspecv1.ImageConfig{ +// Cmd: []string{"/bin/bash"}, +// }, +// } +// annotations := make(map[string]string) +// annotations[imgspecv1.AnnotationDescription] = "test image built from a mock root cache" +// err = updater.ConfigUpdate(config, annotations) +// if err != nil { +// panic(err) +// } +// dest, err := alltransports.ParseImageName("docker-daemon:mock:latest") +// if err != nil { +// panic(err) +// } +// err = cp.Image(nil, dest, src, nil) +// if err != nil { +// panic(err) +// } +// } +package tarball diff --git a/vendor/github.com/containers/image/tarball/tarball_reference.go b/vendor/github.com/containers/image/tarball/tarball_reference.go new file mode 100644 index 00000000..4ccfb406 --- /dev/null +++ b/vendor/github.com/containers/image/tarball/tarball_reference.go @@ -0,0 +1,93 @@ +package tarball + +import ( + "fmt" + "os" + "strings" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/image" + "github.com/containers/image/types" + + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ConfigUpdater is an interface that ImageReferences for "tarball" images also +// implement. It can be used to set values for a configuration, and to set +// image annotations which will be present in the images returned by the +// reference's NewImage() or NewImageSource() methods. +type ConfigUpdater interface { + ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error +} + +type tarballReference struct { + transport types.ImageTransport + config imgspecv1.Image + annotations map[string]string + filenames []string + stdin []byte +} + +// ConfigUpdate updates the image's default configuration and adds annotations +// which will be visible in source images created using this reference. +func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error { + r.config = config + if r.annotations == nil { + r.annotations = make(map[string]string) + } + for k, v := range annotations { + r.annotations[k] = v + } + return nil +} + +func (r *tarballReference) Transport() types.ImageTransport { + return r.transport +} + +func (r *tarballReference) StringWithinTransport() string { + return strings.Join(r.filenames, ":") +} + +func (r *tarballReference) DockerReference() reference.Named { + return nil +} + +func (r *tarballReference) PolicyConfigurationIdentity() string { + return "" +} + +func (r *tarballReference) PolicyConfigurationNamespaces() []string { + return nil +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (r *tarballReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { + src, err := r.NewImageSource(ctx) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, src) + if err != nil { + src.Close() + return nil, err + } + return img, nil +} + +func (r *tarballReference) DeleteImage(ctx *types.SystemContext) error { + for _, filename := range r.filenames { + if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("error removing %q: %v", filename, err) + } + } + return nil +} + +func (r *tarballReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { + return nil, fmt.Errorf("destination not implemented yet") +} diff --git a/vendor/github.com/containers/image/tarball/tarball_src.go b/vendor/github.com/containers/image/tarball/tarball_src.go new file mode 100644 index 00000000..8b5b496d --- /dev/null +++ b/vendor/github.com/containers/image/tarball/tarball_src.go @@ -0,0 +1,260 @@ +package tarball + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "strings" + "time" + + "github.com/containers/image/types" + + digest "github.com/opencontainers/go-digest" + imgspecs "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type tarballImageSource struct { + reference tarballReference + filenames []string + diffIDs []digest.Digest + diffSizes []int64 + blobIDs []digest.Digest + blobSizes []int64 + blobTypes []string + config []byte + configID digest.Digest + configSize int64 + manifest []byte +} + +func (r *tarballReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { + // Gather up the digests, sizes, and date information for all of the files. + filenames := []string{} + diffIDs := []digest.Digest{} + diffSizes := []int64{} + blobIDs := []digest.Digest{} + blobSizes := []int64{} + blobTimes := []time.Time{} + blobTypes := []string{} + for _, filename := range r.filenames { + var file *os.File + var err error + var blobSize int64 + var blobTime time.Time + var reader io.Reader + if filename == "-" { + blobSize = int64(len(r.stdin)) + blobTime = time.Now() + reader = bytes.NewReader(r.stdin) + } else { + file, err = os.Open(filename) + if err != nil { + return nil, fmt.Errorf("error opening %q for reading: %v", filename, err) + } + defer file.Close() + reader = file + fileinfo, err := file.Stat() + if err != nil { + return nil, fmt.Errorf("error reading size of %q: %v", filename, err) + } + blobSize = fileinfo.Size() + blobTime = fileinfo.ModTime() + } + + // Default to assuming the layer is compressed. + layerType := imgspecv1.MediaTypeImageLayerGzip + + // Set up to digest the file as it is. + blobIDdigester := digest.Canonical.Digester() + reader = io.TeeReader(reader, blobIDdigester.Hash()) + + // Set up to digest the file after we maybe decompress it. + diffIDdigester := digest.Canonical.Digester() + uncompressed, err := gzip.NewReader(reader) + if err == nil { + // It is compressed, so the diffID is the digest of the uncompressed version + reader = io.TeeReader(uncompressed, diffIDdigester.Hash()) + } else { + // It is not compressed, so the diffID and the blobID are going to be the same + diffIDdigester = blobIDdigester + layerType = imgspecv1.MediaTypeImageLayer + uncompressed = nil + } + n, err := io.Copy(ioutil.Discard, reader) + if err != nil { + return nil, fmt.Errorf("error reading %q: %v", filename, err) + } + if uncompressed != nil { + uncompressed.Close() + } + + // Grab our uncompressed and possibly-compressed digests and sizes. + filenames = append(filenames, filename) + diffIDs = append(diffIDs, diffIDdigester.Digest()) + diffSizes = append(diffSizes, n) + blobIDs = append(blobIDs, blobIDdigester.Digest()) + blobSizes = append(blobSizes, blobSize) + blobTimes = append(blobTimes, blobTime) + blobTypes = append(blobTypes, layerType) + } + + // Build the rootfs and history for the configuration blob. + rootfs := imgspecv1.RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + created := time.Time{} + history := []imgspecv1.History{} + // Pick up the layer comment from the configuration's history list, if one is set. + comment := "imported from tarball" + if len(r.config.History) > 0 && r.config.History[0].Comment != "" { + comment = r.config.History[0].Comment + } + for i := range diffIDs { + createdBy := fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffIDs[i].Hex(), os.PathSeparator) + history = append(history, imgspecv1.History{ + Created: &blobTimes[i], + CreatedBy: createdBy, + Comment: comment, + }) + // Use the mtime of the most recently modified file as the image's creation time. + if created.Before(blobTimes[i]) { + created = blobTimes[i] + } + } + + // Pick up other defaults from the config in the reference. + config := r.config + if config.Created == nil { + config.Created = &created + } + if config.Architecture == "" { + config.Architecture = runtime.GOARCH + } + if config.OS == "" { + config.OS = runtime.GOOS + } + config.RootFS = rootfs + config.History = history + + // Encode and digest the image configuration blob. + configBytes, err := json.Marshal(&config) + if err != nil { + return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err) + } + configID := digest.Canonical.FromBytes(configBytes) + configSize := int64(len(configBytes)) + + // Populate a manifest with the configuration blob and the file as the single layer. + layerDescriptors := []imgspecv1.Descriptor{} + for i := range blobIDs { + layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ + Digest: blobIDs[i], + Size: blobSizes[i], + MediaType: blobTypes[i], + }) + } + annotations := make(map[string]string) + for k, v := range r.annotations { + annotations[k] = v + } + manifest := imgspecv1.Manifest{ + Versioned: imgspecs.Versioned{ + SchemaVersion: 2, + }, + Config: imgspecv1.Descriptor{ + Digest: configID, + Size: configSize, + MediaType: imgspecv1.MediaTypeImageConfig, + }, + Layers: layerDescriptors, + Annotations: annotations, + } + + // Encode the manifest. + manifestBytes, err := json.Marshal(&manifest) + if err != nil { + return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err) + } + + // Return the image. + src := &tarballImageSource{ + reference: *r, + filenames: filenames, + diffIDs: diffIDs, + diffSizes: diffSizes, + blobIDs: blobIDs, + blobSizes: blobSizes, + blobTypes: blobTypes, + config: configBytes, + configID: configID, + configSize: configSize, + manifest: manifestBytes, + } + + return src, nil +} + +func (is *tarballImageSource) Close() error { + return nil +} + +func (is *tarballImageSource) GetBlob(blobinfo types.BlobInfo) (io.ReadCloser, int64, error) { + // We should only be asked about things in the manifest. Maybe the configuration blob. + if blobinfo.Digest == is.configID { + return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil + } + // Maybe one of the layer blobs. + for i := range is.blobIDs { + if blobinfo.Digest == is.blobIDs[i] { + // We want to read that layer: open the file or memory block and hand it back. + if is.filenames[i] == "-" { + return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil + } + reader, err := os.Open(is.filenames[i]) + if err != nil { + return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) + } + return reader, is.blobSizes[i], nil + } + } + return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (is *tarballImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", fmt.Errorf("manifest lists are not supported by the %q transport", transportName) + } + return is.manifest, imgspecv1.MediaTypeImageManifest, nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName) + } + return nil, nil +} + +func (is *tarballImageSource) Reference() types.ImageReference { + return &is.reference +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (*tarballImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/tarball/tarball_transport.go b/vendor/github.com/containers/image/tarball/tarball_transport.go new file mode 100644 index 00000000..72558b5e --- /dev/null +++ b/vendor/github.com/containers/image/tarball/tarball_transport.go @@ -0,0 +1,66 @@ +package tarball + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/containers/image/transports" + "github.com/containers/image/types" +) + +const ( + transportName = "tarball" + separator = ":" +) + +var ( + // Transport implements the types.ImageTransport interface for "tarball:" images, + // which are makeshift images constructed using one or more possibly-compressed tar + // archives. + Transport = &tarballTransport{} +) + +type tarballTransport struct { +} + +func (t *tarballTransport) Name() string { + return transportName +} + +func (t *tarballTransport) ParseReference(reference string) (types.ImageReference, error) { + var stdin []byte + var err error + filenames := strings.Split(reference, separator) + for _, filename := range filenames { + if filename == "-" { + stdin, err = ioutil.ReadAll(os.Stdin) + if err != nil { + return nil, fmt.Errorf("error buffering stdin: %v", err) + } + continue + } + f, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("error opening %q: %v", filename, err) + } + f.Close() + } + ref := &tarballReference{ + transport: t, + filenames: filenames, + stdin: stdin, + } + return ref, nil +} + +func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error { + // See the explanation in daemonReference.PolicyConfigurationIdentity. + return errors.New(`tarball: does not support any scopes except the default "" one`) +} + +func init() { + transports.Register(Transport) +} diff --git a/vendor/github.com/containers/image/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/transports/alltransports/alltransports.go index 4279b9d2..b4552df6 100644 --- a/vendor/github.com/containers/image/transports/alltransports/alltransports.go +++ b/vendor/github.com/containers/image/transports/alltransports/alltransports.go @@ -13,8 +13,9 @@ import ( _ "github.com/containers/image/oci/archive" _ "github.com/containers/image/oci/layout" _ "github.com/containers/image/openshift" + _ "github.com/containers/image/tarball" // The ostree transport is registered by ostree*.go - _ "github.com/containers/image/storage" + // The storage transport is registered by storage*.go "github.com/containers/image/transports" "github.com/containers/image/types" "github.com/pkg/errors" diff --git a/vendor/github.com/containers/image/transports/alltransports/storage.go b/vendor/github.com/containers/image/transports/alltransports/storage.go new file mode 100644 index 00000000..a867c664 --- /dev/null +++ b/vendor/github.com/containers/image/transports/alltransports/storage.go @@ -0,0 +1,8 @@ +// +build !containers_image_storage_stub + +package alltransports + +import ( + // Register the storage transport + _ "github.com/containers/image/storage" +) diff --git a/vendor/github.com/containers/image/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/transports/alltransports/storage_stub.go new file mode 100644 index 00000000..4ac684e5 --- /dev/null +++ b/vendor/github.com/containers/image/transports/alltransports/storage_stub.go @@ -0,0 +1,9 @@ +// +build containers_image_storage_stub + +package alltransports + +import "github.com/containers/image/transports" + +func init() { + transports.Register(transports.NewStubTransport("containers-storage")) +} diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go index 78afe09f..2e9c7105 100644 --- a/vendor/github.com/containers/image/types/types.go +++ b/vendor/github.com/containers/image/types/types.go @@ -73,16 +73,15 @@ type ImageReference interface { // and each following element to be a prefix of the element preceding it. PolicyConfigurationNamespaces() []string - // NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. - // The caller must call .Close() on the returned Image. + // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. + // The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. - NewImage(ctx *SystemContext) (Image, error) - // NewImageSource returns a types.ImageSource for this reference, - // asking the backend to use a manifest from requestedManifestMIMETypes if possible. - // nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. + // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. + NewImage(ctx *SystemContext) (ImageCloser, error) + // NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. - NewImageSource(ctx *SystemContext, requestedManifestMIMETypes []string) (ImageSource, error) + NewImageSource(ctx *SystemContext) (ImageSource, error) // NewImageDestination returns a types.ImageDestination for this reference. // The caller must call .Close() on the returned ImageDestination. NewImageDestination(ctx *SystemContext) (ImageDestination, error) @@ -98,9 +97,10 @@ type BlobInfo struct { Size int64 // -1 if unknown URLs []string Annotations map[string]string + MediaType string } -// ImageSource is a service, possibly remote (= slow), to download components of a single image. +// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list). // This is primarily useful for copying images around; for examining their properties, Image (below) // is usually more useful. // Each ImageSource should eventually be closed by calling Close(). @@ -115,15 +115,21 @@ type ImageSource interface { Close() error // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. - GetManifest() ([]byte, string, error) - // GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest - // out of a manifest list. - GetTargetManifest(digest digest.Digest) ([]byte, string, error) + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); + // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). + GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). - // The Digest field in BlobInfo is guaranteed to be provided; Size may be -1. + // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. GetBlob(BlobInfo) (io.ReadCloser, int64, error) // GetSignatures returns the image's signatures. It may use a remote (= slow) service. - GetSignatures(context.Context) ([][]byte, error) + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy() []BlobInfo } // ImageDestination is a service, possibly remote (= slow), to store components of a single image. @@ -155,9 +161,10 @@ type ImageDestination interface { AcceptsForeignLayerURLs() bool // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. MustMatchRuntimeOS() bool - // PutBlob writes contents of stream and returns data representing the result (with all data filled in). + // PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. + // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. @@ -196,28 +203,35 @@ func (e ManifestTypeRejectedError) Error() string { // Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, // allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. // This also makes the UnparsedImage→Image conversion an explicitly visible step. -// Each UnparsedImage should eventually be closed by calling Close(). +// +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. type UnparsedImage interface { // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. Reference() ImageReference - // Close removes resources associated with an initialized UnparsedImage, if any. - Close() error // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. Manifest() ([]byte, string, error) // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. Signatures(ctx context.Context) ([][]byte, error) + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy() []BlobInfo } // Image is the primary API for inspecting properties of images. -// Each Image should eventually be closed by calling Close(). +// An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The Image must not be used after the underlying ImageSource is Close()d. type Image interface { // Note that Reference may return nil in the return value of UpdatedImage! UnparsedImage // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. ConfigInfo() BlobInfo - // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. + // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. // The result is cached; it is OK to call this however often you need. ConfigBlob() ([]byte, error) // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about @@ -225,7 +239,7 @@ type Image interface { // old image manifests work (docker v2s1 especially). OCIConfig() (*v1.Image, error) // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided; Size may be -1. + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // WARNING: The list may contain duplicates, and they are semantically relevant. LayerInfos() []BlobInfo // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -242,16 +256,23 @@ type Image interface { // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. // This does not change the state of the original Image object. UpdatedImage(options ManifestUpdateOptions) (Image, error) - // IsMultiImage returns true if the image's manifest is a list of images, false otherwise. - IsMultiImage() bool // Size returns an approximation of the amount of disk space which is consumed by the image in its current // location. If the size is not known, -1 will be returned. Size() (int64, error) } +// ImageCloser is an Image with a Close() method which must be called by the user. +// This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource, +// to ensure that the ImageSource is closed. +type ImageCloser interface { + Image + // Close removes resources associated with an initialized ImageCloser. + Close() error +} + // ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest type ManifestUpdateOptions struct { - LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls) which should replace the originals, in order (the root layer first, and then successive layered layers) + LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. EmbeddedDockerReference reference.Named ManifestMIMEType string // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. @@ -285,7 +306,7 @@ type DockerAuthConfig struct { Password string } -// SystemContext allows parametrizing access to implicitly-accessed resources, +// SystemContext allows parameterizing access to implicitly-accessed resources, // like configuration files in /etc and users' login state in their home directory. // Various components can share the same field only if their semantics is exactly // the same; if in doubt, add a new field. @@ -304,6 +325,24 @@ type SystemContext struct { SignaturePolicyPath string // If not "", overrides the system's default path for registries.d (Docker signature storage configuration) RegistriesDirPath string + // Path to the system-wide registries configuration file + SystemRegistriesConfPath string + // If not "", overrides the default path for the authentication file + AuthFilePath string + // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. + ArchitectureChoice string + // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. + OSChoice string + + // === OCI.Transport overrides === + // If not "", a directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client ceritificate key + // (ending with ".key") used when downloading OCI image layers. + OCICertPath string + // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + OCIInsecureSkipTLSVerify bool + // If not "", use a shared directory for storing blobs rather than within OCI layouts + OCISharedBlobDirPath string // === docker.Transport overrides === // If not "", a directory containing a CA certificate (ending with ".crt"), @@ -312,8 +351,9 @@ type SystemContext struct { DockerCertPath string // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. // Ignored if DockerCertPath is non-empty. - DockerPerHostCertDirPath string - DockerInsecureSkipTLSVerify bool // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + DockerPerHostCertDirPath string + // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + DockerInsecureSkipTLSVerify bool // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials DockerAuthConfig *DockerAuthConfig // if not "", an User-Agent header is added to each request when contacting a registry. @@ -324,6 +364,20 @@ type SystemContext struct { DockerDisableV1Ping bool // Directory to use for OSTree temporary files OSTreeTmpDirPath string + + // === docker/daemon.Transport overrides === + // A directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client certificate key + // (ending with ".key") used when talking to a Docker daemon. + DockerDaemonCertPath string + // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed. + DockerDaemonHost string + // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well. + DockerDaemonInsecureSkipTLSVerify bool + + // === dir.Transport overrides === + // DirForceCompress compresses the image layers if set to true + DirForceCompress bool } // ProgressProperties is used to pass information from the copy code to a monitor which diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf index d5bae3b0..f3634b38 100644 --- a/vendor/github.com/containers/image/vendor.conf +++ b/vendor/github.com/containers/image/vendor.conf @@ -1,5 +1,5 @@ github.com/sirupsen/logrus v1.0.0 -github.com/containers/storage 47536c89fcc545a87745e1a1573addc439409165 +github.com/containers/storage master github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 @@ -22,7 +22,7 @@ github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9 github.com/pkg/errors 248dadf4e9068a0b3e79f02ed0a610d935de5302 github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 -github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721 +github.com/vbatts/tar-split v0.10.2 golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8 golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08 @@ -36,4 +36,5 @@ github.com/tchap/go-patricia v2.2.6 github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0 github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460 -github.com/gogo/protobuf/proto fcdc5011193ff531a548e9b0301828d5a5b97fd8 +github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8 +github.com/pquerna/ffjson master diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index 0908bdd1..5631e31c 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -2,7 +2,6 @@ package storage import ( "encoding/json" - "errors" "io/ioutil" "os" "path/filepath" @@ -11,11 +10,8 @@ import ( "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/truncindex" -) - -var ( - // ErrContainerUnknown indicates that there was no container with the specified name or ID - ErrContainerUnknown = errors.New("container not known") + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" ) // A Container is a reference to a read-write layer with metadata. @@ -50,6 +46,10 @@ type Container struct { // that has been stored, if they're known. BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` + // BigDataDigests maps the names in BigDataNames to the digests of the + // data that has been stored, if they're known. + BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"` + // Created is the datestamp for when this container was created. Older // versions of the library did not track this information, so callers // will likely want to use the IsZero() method to verify that a value @@ -139,6 +139,7 @@ func (r *containerStore) Load() error { ids := make(map[string]*Container) names := make(map[string]*Container) if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil { + idlist = make([]string, 0, len(containers)) for n, container := range containers { idlist = append(idlist, container.ID) ids[container.ID] = containers[n] @@ -229,6 +230,9 @@ func (r *containerStore) SetFlag(id string, flag string, value interface{}) erro if !ok { return ErrContainerUnknown } + if container.Flags == nil { + container.Flags = make(map[string]interface{}) + } container.Flags[flag] = value return r.Save() } @@ -245,6 +249,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat if _, idInUse := r.byid[id]; idInUse { return nil, ErrDuplicateID } + names = dedupeNames(names) for _, name := range names { if _, nameInUse := r.byname[name]; nameInUse { return nil, ErrDuplicateName @@ -252,15 +257,16 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat } if err == nil { container = &Container{ - ID: id, - Names: names, - ImageID: image, - LayerID: layer, - Metadata: metadata, - BigDataNames: []string{}, - BigDataSizes: make(map[string]int64), - Created: time.Now().UTC(), - Flags: make(map[string]interface{}), + ID: id, + Names: names, + ImageID: image, + LayerID: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: time.Now().UTC(), + Flags: make(map[string]interface{}), } r.containers = append(r.containers, container) r.byid[id] = container @@ -294,6 +300,7 @@ func (r *containerStore) removeName(container *Container, name string) { } func (r *containerStore) SetNames(id string, names []string) error { + names = dedupeNames(names) if container, ok := r.lookup(id); ok { for _, name := range container.Names { delete(r.byname, name) @@ -366,6 +373,9 @@ func (r *containerStore) Exists(id string) bool { } func (r *containerStore) BigData(id, key string) ([]byte, error) { + if key == "" { + return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve container big data value for empty name") + } c, ok := r.lookup(id) if !ok { return nil, ErrContainerUnknown @@ -374,16 +384,61 @@ func (r *containerStore) BigData(id, key string) ([]byte, error) { } func (r *containerStore) BigDataSize(id, key string) (int64, error) { + if key == "" { + return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of container big data with empty name") + } c, ok := r.lookup(id) if !ok { return -1, ErrContainerUnknown } + if c.BigDataSizes == nil { + c.BigDataSizes = make(map[string]int64) + } if size, ok := c.BigDataSizes[key]; ok { return size, nil } + if data, err := r.BigData(id, key); err == nil && data != nil { + if r.SetBigData(id, key, data) == nil { + c, ok := r.lookup(id) + if !ok { + return -1, ErrContainerUnknown + } + if size, ok := c.BigDataSizes[key]; ok { + return size, nil + } + } + } return -1, ErrSizeUnknown } +func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) { + if key == "" { + return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of container big data value with empty name") + } + c, ok := r.lookup(id) + if !ok { + return "", ErrContainerUnknown + } + if c.BigDataDigests == nil { + c.BigDataDigests = make(map[string]digest.Digest) + } + if d, ok := c.BigDataDigests[key]; ok { + return d, nil + } + if data, err := r.BigData(id, key); err == nil && data != nil { + if r.SetBigData(id, key, data) == nil { + c, ok := r.lookup(id) + if !ok { + return "", ErrContainerUnknown + } + if d, ok := c.BigDataDigests[key]; ok { + return d, nil + } + } + } + return "", ErrDigestUnknown +} + func (r *containerStore) BigDataNames(id string) ([]string, error) { c, ok := r.lookup(id) if !ok { @@ -393,6 +448,9 @@ func (r *containerStore) BigDataNames(id string) ([]string, error) { } func (r *containerStore) SetBigData(id, key string, data []byte) error { + if key == "" { + return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for container big data item") + } c, ok := r.lookup(id) if !ok { return ErrContainerUnknown @@ -403,19 +461,28 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error { err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600) if err == nil { save := false - oldSize, ok := c.BigDataSizes[key] + if c.BigDataSizes == nil { + c.BigDataSizes = make(map[string]int64) + } + oldSize, sizeOk := c.BigDataSizes[key] c.BigDataSizes[key] = int64(len(data)) - if !ok || oldSize != c.BigDataSizes[key] { + if c.BigDataDigests == nil { + c.BigDataDigests = make(map[string]digest.Digest) + } + oldDigest, digestOk := c.BigDataDigests[key] + newDigest := digest.Canonical.FromBytes(data) + c.BigDataDigests[key] = newDigest + if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest { save = true } - add := true + addName := true for _, name := range c.BigDataNames { if name == key { - add = false + addName = false break } } - if add { + if addName { c.BigDataNames = append(c.BigDataNames, key) save = true } @@ -427,7 +494,7 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error { } func (r *containerStore) Wipe() error { - ids := []string{} + ids := make([]string, 0, len(r.byid)) for id := range r.byid { ids = append(ids, id) } diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go new file mode 100644 index 00000000..95261980 --- /dev/null +++ b/vendor/github.com/containers/storage/containers_ffjson.go @@ -0,0 +1,1194 @@ +// Code generated by ffjson . DO NOT EDIT. +// source: containers.go + +package storage + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/opencontainers/go-digest" + fflib "github.com/pquerna/ffjson/fflib/v1" +) + +// MarshalJSON marshal bytes to json - template +func (j *Container) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *Container) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "id":`) + fflib.WriteJsonString(buf, string(j.ID)) + buf.WriteByte(',') + if len(j.Names) != 0 { + buf.WriteString(`"names":`) + if j.Names != nil { + buf.WriteString(`[`) + for i, v := range j.Names { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + buf.WriteString(`"image":`) + fflib.WriteJsonString(buf, string(j.ImageID)) + buf.WriteString(`,"layer":`) + fflib.WriteJsonString(buf, string(j.LayerID)) + buf.WriteByte(',') + if len(j.Metadata) != 0 { + buf.WriteString(`"metadata":`) + fflib.WriteJsonString(buf, string(j.Metadata)) + buf.WriteByte(',') + } + if len(j.BigDataNames) != 0 { + buf.WriteString(`"big-data-names":`) + if j.BigDataNames != nil { + buf.WriteString(`[`) + for i, v := range j.BigDataNames { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if len(j.BigDataSizes) != 0 { + if j.BigDataSizes == nil { + buf.WriteString(`"big-data-sizes":null`) + } else { + buf.WriteString(`"big-data-sizes":{ `) + for key, value := range j.BigDataSizes { + fflib.WriteJsonString(buf, key) + buf.WriteString(`:`) + fflib.FormatBits2(buf, uint64(value), 10, value < 0) + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + } + buf.WriteByte(',') + } + if len(j.BigDataDigests) != 0 { + if j.BigDataDigests == nil { + buf.WriteString(`"big-data-digests":null`) + } else { + buf.WriteString(`"big-data-digests":{ `) + for key, value := range j.BigDataDigests { + fflib.WriteJsonString(buf, key) + buf.WriteString(`:`) + fflib.WriteJsonString(buf, string(value)) + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + } + buf.WriteByte(',') + } + if true { + buf.WriteString(`"created":`) + + { + + obj, err = j.Created.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + if len(j.Flags) != 0 { + buf.WriteString(`"flags":`) + /* Falling back. type=map[string]interface {} kind=map */ + err = buf.Encode(j.Flags) + if err != nil { + return err + } + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffjtContainerbase = iota + ffjtContainernosuchkey + + ffjtContainerID + + ffjtContainerNames + + ffjtContainerImageID + + ffjtContainerLayerID + + ffjtContainerMetadata + + ffjtContainerBigDataNames + + ffjtContainerBigDataSizes + + ffjtContainerBigDataDigests + + ffjtContainerCreated + + ffjtContainerFlags +) + +var ffjKeyContainerID = []byte("id") + +var ffjKeyContainerNames = []byte("names") + +var ffjKeyContainerImageID = []byte("image") + +var ffjKeyContainerLayerID = []byte("layer") + +var ffjKeyContainerMetadata = []byte("metadata") + +var ffjKeyContainerBigDataNames = []byte("big-data-names") + +var ffjKeyContainerBigDataSizes = []byte("big-data-sizes") + +var ffjKeyContainerBigDataDigests = []byte("big-data-digests") + +var ffjKeyContainerCreated = []byte("created") + +var ffjKeyContainerFlags = []byte("flags") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *Container) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *Container) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtContainerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtContainernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'b': + + if bytes.Equal(ffjKeyContainerBigDataNames, kn) { + currentKey = ffjtContainerBigDataNames + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyContainerBigDataSizes, kn) { + currentKey = ffjtContainerBigDataSizes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyContainerBigDataDigests, kn) { + currentKey = ffjtContainerBigDataDigests + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'c': + + if bytes.Equal(ffjKeyContainerCreated, kn) { + currentKey = ffjtContainerCreated + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffjKeyContainerFlags, kn) { + currentKey = ffjtContainerFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffjKeyContainerID, kn) { + currentKey = ffjtContainerID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyContainerImageID, kn) { + currentKey = ffjtContainerImageID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffjKeyContainerLayerID, kn) { + currentKey = ffjtContainerLayerID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffjKeyContainerMetadata, kn) { + currentKey = ffjtContainerMetadata + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffjKeyContainerNames, kn) { + currentKey = ffjtContainerNames + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffjKeyContainerFlags, kn) { + currentKey = ffjtContainerFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyContainerCreated, kn) { + currentKey = ffjtContainerCreated + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyContainerBigDataDigests, kn) { + currentKey = ffjtContainerBigDataDigests + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyContainerBigDataSizes, kn) { + currentKey = ffjtContainerBigDataSizes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyContainerBigDataNames, kn) { + currentKey = ffjtContainerBigDataNames + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyContainerMetadata, kn) { + currentKey = ffjtContainerMetadata + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyContainerLayerID, kn) { + currentKey = ffjtContainerLayerID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyContainerImageID, kn) { + currentKey = ffjtContainerImageID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyContainerNames, kn) { + currentKey = ffjtContainerNames + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyContainerID, kn) { + currentKey = ffjtContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtContainernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtContainerID: + goto handle_ID + + case ffjtContainerNames: + goto handle_Names + + case ffjtContainerImageID: + goto handle_ImageID + + case ffjtContainerLayerID: + goto handle_LayerID + + case ffjtContainerMetadata: + goto handle_Metadata + + case ffjtContainerBigDataNames: + goto handle_BigDataNames + + case ffjtContainerBigDataSizes: + goto handle_BigDataSizes + + case ffjtContainerBigDataDigests: + goto handle_BigDataDigests + + case ffjtContainerCreated: + goto handle_Created + + case ffjtContainerFlags: + goto handle_Flags + + case ffjtContainernosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ID: + + /* handler: j.ID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.ID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Names: + + /* handler: j.Names type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.Names = nil + } else { + + j.Names = []string{} + + wantVal := true + + for { + + var tmpJNames string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJNames type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJNames = string(string(outBuf)) + + } + } + + j.Names = append(j.Names, tmpJNames) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ImageID: + + /* handler: j.ImageID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.ImageID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_LayerID: + + /* handler: j.LayerID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.LayerID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Metadata: + + /* handler: j.Metadata type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.Metadata = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BigDataNames: + + /* handler: j.BigDataNames type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.BigDataNames = nil + } else { + + j.BigDataNames = []string{} + + wantVal := true + + for { + + var tmpJBigDataNames string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJBigDataNames type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJBigDataNames = string(string(outBuf)) + + } + } + + j.BigDataNames = append(j.BigDataNames, tmpJBigDataNames) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BigDataSizes: + + /* handler: j.BigDataSizes type=map[string]int64 kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.BigDataSizes = nil + } else { + + j.BigDataSizes = make(map[string]int64, 0) + + wantVal := true + + for { + + var k string + + var tmpJBigDataSizes int64 + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJBigDataSizes type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + tmpJBigDataSizes = int64(tval) + + } + } + + j.BigDataSizes[k] = tmpJBigDataSizes + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BigDataDigests: + + /* handler: j.BigDataDigests type=map[string]digest.Digest kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.BigDataDigests = nil + } else { + + j.BigDataDigests = make(map[string]digest.Digest, 0) + + wantVal := true + + for { + + var k string + + var tmpJBigDataDigests digest.Digest + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJBigDataDigests type=digest.Digest kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJBigDataDigests = digest.Digest(string(outBuf)) + + } + } + + j.BigDataDigests[k] = tmpJBigDataDigests + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Created: + + /* handler: j.Created type=time.Time kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + } else { + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = j.Created.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Flags: + + /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.Flags = nil + } else { + + j.Flags = make(map[string]interface{}, 0) + + wantVal := true + + for { + + var k string + + var tmpJFlags interface{} + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/ + + { + /* Falling back. type=interface {} kind=interface */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJFlags) + if err != nil { + return fs.WrapErr(err) + } + } + + j.Flags[k] = tmpJFlags + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *containerStore) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *containerStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{}`) + return nil +} + +const ( + ffjtcontainerStorebase = iota + ffjtcontainerStorenosuchkey +) + +// UnmarshalJSON umarshall json - template of ffjson +func (j *containerStore) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *containerStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtcontainerStorebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtcontainerStorenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + } + + currentKey = ffjtcontainerStorenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtcontainerStorenosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index 2e7f1e65..aa0da7ad 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -25,6 +25,7 @@ package aufs import ( "bufio" "fmt" + "io" "io/ioutil" "os" "os/exec" @@ -32,22 +33,22 @@ import ( "path/filepath" "strings" "sync" - "syscall" - - "github.com/sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" + "time" "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/locker" mountpk "github.com/containers/storage/pkg/mount" - "github.com/containers/storage/pkg/stringid" - + "github.com/containers/storage/pkg/system" rsystem "github.com/opencontainers/runc/libcontainer/system" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + "golang.org/x/sys/unix" ) var ( @@ -74,6 +75,8 @@ type Driver struct { ctr *graphdriver.RefCounter pathCacheLock sync.Mutex pathCache map[string]string + naiveDiff graphdriver.DiffDriver + locker *locker.Locker } // Init returns a new AUFS driver. @@ -83,6 +86,7 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap // Try to load the aufs kernel module if err := supportsAufs(); err != nil { return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support aufs") + } fsMagic, err := graphdriver.GetFSMagic(root) @@ -111,6 +115,7 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap gidMaps: gidMaps, pathCache: make(map[string]string), ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), + locker: locker.New(), } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) @@ -137,6 +142,32 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap return nil, err } } + logger := logrus.WithFields(logrus.Fields{ + "module": "graphdriver", + "driver": "aufs", + }) + + for _, path := range []string{"mnt", "diff"} { + p := filepath.Join(root, path) + entries, err := ioutil.ReadDir(p) + if err != nil { + logger.WithError(err).WithField("dir", p).Error("error reading dir entries") + continue + } + for _, entry := range entries { + if !entry.IsDir() { + continue + } + if strings.HasSuffix(entry.Name(), "-removing") { + logger.WithField("dir", entry.Name()).Debug("Cleaning up stale layer dir") + if err := system.EnsureRemoveAll(filepath.Join(p, entry.Name())); err != nil { + logger.WithField("dir", entry.Name()).WithError(err).Error("Error removing stale layer dir") + } + } + } + } + + a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, uidMaps, gidMaps) return a, nil } @@ -200,17 +231,22 @@ func (a *Driver) Exists(id string) bool { return true } +// AdditionalImageStores returns additional image stores supported by the driver +func (a *Driver) AdditionalImageStores() []string { + return nil +} + // CreateReadWrite creates a layer that is writable for use as a container // file system. -func (a *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { - return a.Create(id, parent, mountLabel, storageOpt) +func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return a.Create(id, parent, opts) } // Create three folders for each id // mnt, layers, and diff -func (a *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { +func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - if len(storageOpt) != 0 { + if opts != nil && len(opts.StorageOpt) != 0 { return fmt.Errorf("--storage-opt is not supported for aufs") } @@ -225,7 +261,7 @@ func (a *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str defer f.Close() if parent != "" { - ids, err := getParentIds(a.rootPath(), parent) + ids, err := getParentIDs(a.rootPath(), parent) if err != nil { return err } @@ -268,35 +304,68 @@ func (a *Driver) createDirsFor(id string) error { // Remove will unmount and remove the given id. func (a *Driver) Remove(id string) error { + a.locker.Lock(id) + defer a.locker.Unlock(id) a.pathCacheLock.Lock() mountpoint, exists := a.pathCache[id] a.pathCacheLock.Unlock() if !exists { mountpoint = a.getMountpoint(id) } - if err := a.unmount(mountpoint); err != nil { - // no need to return here, we can still try to remove since the `Rename` will fail below if still mounted - logrus.Debugf("aufs: error while unmounting %s: %v", mountpoint, err) - } - // Atomically remove each directory in turn by first moving it out of the - // way (so that container runtimes don't find it anymore) before doing removal of - // the whole tree. - tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id)) - if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) { - return err - } - defer os.RemoveAll(tmpMntPath) + logger := logrus.WithFields(logrus.Fields{ + "module": "graphdriver", + "driver": "aufs", + "layer": id, + }) - tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id)) - if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) { - return err + var retries int + for { + mounted, err := a.mounted(mountpoint) + if err != nil { + if os.IsNotExist(err) { + break + } + return err + } + if !mounted { + break + } + + err = a.unmount(mountpoint) + if err == nil { + break + } + + if err != unix.EBUSY { + return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint) + } + if retries >= 5 { + return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint) + } + // If unmount returns EBUSY, it could be a transient error. Sleep and retry. + retries++ + logger.Warnf("unmount failed due to EBUSY: retry count: %d", retries) + time.Sleep(100 * time.Millisecond) } - defer os.RemoveAll(tmpDiffpath) // Remove the layers file for the id if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { - return err + return errors.Wrapf(err, "error removing layers dir for %s", id) + } + + if err := atomicRemove(a.getDiffPath(id)); err != nil { + return errors.Wrapf(err, "could not remove diff path for id %s", id) + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that container runtime doesn't find it anymore) before doing removal of + // the whole tree. + if err := atomicRemove(mountpoint); err != nil { + if errors.Cause(err) == unix.EBUSY { + logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY") + } + return errors.Wrapf(err, "could not remove mountpoint for id %s", id) } a.pathCacheLock.Lock() @@ -305,9 +374,29 @@ func (a *Driver) Remove(id string) error { return nil } +func atomicRemove(source string) error { + target := source + "-removing" + + err := os.Rename(source, target) + switch { + case err == nil, os.IsNotExist(err): + case os.IsExist(err): + // Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove + if _, e := os.Stat(source); !os.IsNotExist(e) { + return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up") + } + default: + return errors.Wrapf(err, "error preparing atomic delete") + } + + return system.EnsureRemoveAll(target) +} + // Get returns the rootfs path for the id. -// This will mount the dir at it's given path +// This will mount the dir at its given path func (a *Driver) Get(id, mountLabel string) (string, error) { + a.locker.Lock(id) + defer a.locker.Unlock(id) parents, err := a.getParentLayerPaths(id) if err != nil && !os.IsNotExist(err) { return "", err @@ -343,6 +432,8 @@ func (a *Driver) Get(id, mountLabel string) (string, error) { // Put unmounts and updates list of active mounts. func (a *Driver) Put(id string) error { + a.locker.Lock(id) + defer a.locker.Unlock(id) a.pathCacheLock.Lock() m, exists := a.pathCache[id] if !exists { @@ -361,9 +452,22 @@ func (a *Driver) Put(id string) error { return err } +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (a *Driver) isParent(id, parent string) bool { + parents, _ := getParentIDs(a.rootPath(), id) + if parent == "" && len(parents) > 0 { + return false + } + return !(len(parents) > 0 && parent != parents[0]) +} + // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". -func (a *Driver) Diff(id, parent string) (archive.Archive, error) { +func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Diff(id, parent) + } + // AUFS doesn't need the parent layer to produce a diff. return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ Compression: archive.Uncompressed, @@ -373,12 +477,6 @@ func (a *Driver) Diff(id, parent string) (archive.Archive, error) { }) } -// AdditionalImageStores returns additional image stores supported by the driver -func (a *Driver) AdditionalImageStores() []string { - var imageStores []string - return imageStores -} - type fileGetNilCloser struct { storage.FileGetter } @@ -394,7 +492,7 @@ func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil } -func (a *Driver) applyDiff(id string, diff archive.Reader) error { +func (a *Driver) applyDiff(id string, diff io.Reader) error { return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ UIDMaps: a.uidMaps, GIDMaps: a.gidMaps, @@ -405,6 +503,9 @@ func (a *Driver) applyDiff(id string, diff archive.Reader) error { // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (a *Driver) DiffSize(id, parent string) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.DiffSize(id, parent) + } // AUFS doesn't need the parent layer to calculate the diff size. return directory.Size(path.Join(a.rootPath(), "diff", id)) } @@ -412,8 +513,12 @@ func (a *Driver) DiffSize(id, parent string) (size int64, err error) { // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. -func (a *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { - // AUFS doesn't need the parent id to apply the diff. +func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.ApplyDiff(id, parent, diff) + } + + // AUFS doesn't need the parent id to apply the diff if it is the direct parent. if err = a.applyDiff(id, diff); err != nil { return } @@ -424,6 +529,10 @@ func (a *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Changes(id, parent) + } + // AUFS doesn't have snapshots, so we need to get changes from all parent // layers. layers, err := a.getParentLayerPaths(id) @@ -434,7 +543,7 @@ func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { } func (a *Driver) getParentLayerPaths(id string) ([]string, error) { - parentIds, err := getParentIds(a.rootPath(), id) + parentIds, err := getParentIDs(a.rootPath(), id) if err != nil { return nil, err } @@ -499,7 +608,7 @@ func (a *Driver) Cleanup() error { for _, m := range dirs { if err := a.unmount(m); err != nil { - logrus.Debugf("aufs error unmounting %s: %s", stringid.TruncateID(m), err) + logrus.Debugf("aufs error unmounting %s: %s", m, err) } } return mountpk.Unmount(a.root) @@ -517,46 +626,35 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro offset := 54 if useDirperm() { - offset += len("dirperm1") + offset += len(",dirperm1") } - b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel + b := make([]byte, unix.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) - firstMount := true - i := 0 - - for { - for ; i < len(ro); i++ { - layer := fmt.Sprintf(":%s=ro+wh", ro[i]) - - if firstMount { - if bp+len(layer) > len(b) { - break - } - bp += copy(b[bp:], layer) - } else { - data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) - if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil { - return - } - } - } - - if firstMount { - opts := "dio,xino=/dev/shm/aufs.xino" - if useDirperm() { - opts += ",dirperm1" - } - data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) - if err = mount("none", target, "aufs", 0, data); err != nil { - return - } - firstMount = false - } - - if i == len(ro) { + index := 0 + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + if bp+len(layer) > len(b) { break } + bp += copy(b[bp:], layer) + } + + opts := "dio,xino=/dev/shm/aufs.xino" + if useDirperm() { + opts += ",dirperm1" + } + data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) + if err = mount("none", target, "aufs", 0, data); err != nil { + return + } + + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) + if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil { + return + } } return diff --git a/vendor/github.com/containers/storage/drivers/aufs/dirs.go b/vendor/github.com/containers/storage/drivers/aufs/dirs.go index eb298d9e..d2325fc4 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/dirs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/dirs.go @@ -29,7 +29,7 @@ func loadIds(root string) ([]string, error) { // // If there are no lines in the file then the id has no parent // and an empty slice is returned. -func getParentIds(root, id string) ([]string, error) { +func getParentIDs(root, id string) ([]string, error) { f, err := os.Open(path.Join(root, "layers", id)) if err != nil { return nil, err diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount.go b/vendor/github.com/containers/storage/drivers/aufs/mount.go index 8314f142..100e7537 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/mount.go +++ b/vendor/github.com/containers/storage/drivers/aufs/mount.go @@ -4,9 +4,9 @@ package aufs import ( "os/exec" - "syscall" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) // Unmount the target specified. @@ -14,7 +14,7 @@ func Unmount(target string) error { if err := exec.Command("auplink", target, "flush").Run(); err != nil { logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) } - if err := syscall.Unmount(target, 0); err != nil { + if err := unix.Unmount(target, 0); err != nil { return err } return nil diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go b/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go index 8062bae4..937104ba 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go +++ b/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go @@ -1,7 +1,7 @@ package aufs -import "syscall" +import "golang.org/x/sys/unix" func mount(source string, target string, fstype string, flags uintptr, data string) error { - return syscall.Mount(source, target, fstype, flags, data) + return unix.Mount(source, target, fstype, flags, data) } diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go index c807902d..d030b066 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go @@ -2,7 +2,7 @@ package aufs -import "github.com/pkg/errors" +import "errors" // MsRemount declared to specify a non-linux system mount. const MsRemount = 0 diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index 9e16f894..abc856c8 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -16,31 +16,32 @@ import "C" import ( "fmt" + "io/ioutil" + "math" "os" "path" "path/filepath" + "strconv" "strings" - "syscall" + "sync" "unsafe" "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/system" "github.com/docker/go-units" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func init() { graphdriver.Register("btrfs", Init) } -var ( - quotaEnabled = false - userDiskQuota = false -) - type btrfsOptions struct { minSpace uint64 size uint64 @@ -71,18 +72,11 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap return nil, err } - opt, err := parseOptions(options) + opt, userDiskQuota, err := parseOptions(options) if err != nil { return nil, err } - if userDiskQuota { - if err := subvolEnableQuota(home); err != nil { - return nil, err - } - quotaEnabled = true - } - driver := &Driver{ home: home, uidMaps: uidMaps, @@ -90,39 +84,48 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap options: opt, } + if userDiskQuota { + if err := driver.subvolEnableQuota(); err != nil { + return nil, err + } + } + return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil } -func parseOptions(opt []string) (btrfsOptions, error) { +func parseOptions(opt []string) (btrfsOptions, bool, error) { var options btrfsOptions + userDiskQuota := false for _, option := range opt { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { - return options, err + return options, userDiskQuota, err } key = strings.ToLower(key) switch key { case "btrfs.min_space": minSpace, err := units.RAMInBytes(val) if err != nil { - return options, err + return options, userDiskQuota, err } userDiskQuota = true options.minSpace = uint64(minSpace) default: - return options, fmt.Errorf("Unknown option %s", key) + return options, userDiskQuota, fmt.Errorf("Unknown option %s", key) } } - return options, nil + return options, userDiskQuota, nil } // Driver contains information about the filesystem mounted. type Driver struct { //root of the file system - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - options btrfsOptions + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + options btrfsOptions + quotaEnabled bool + once sync.Once } // String prints the name of the driver (btrfs). @@ -151,10 +154,8 @@ func (d *Driver) Metadata(id string) (map[string]string, error) { // Cleanup unmounts the home directory. func (d *Driver) Cleanup() error { - if quotaEnabled { - if err := subvolDisableQuota(d.home); err != nil { - return err - } + if err := d.subvolDisableQuota(); err != nil { + return err } return mount.Unmount(d.home) @@ -197,7 +198,7 @@ func subvolCreate(path, name string) error { args.name[i] = C.char(c) } - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) @@ -225,7 +226,7 @@ func subvolSnapshot(src, dest, name string) error { C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) C.free(unsafe.Pointer(cs)) - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) @@ -234,8 +235,8 @@ func subvolSnapshot(src, dest, name string) error { } func isSubvolume(p string) (bool, error) { - var bufStat syscall.Stat_t - if err := syscall.Lstat(p, &bufStat); err != nil { + var bufStat unix.Stat_t + if err := unix.Lstat(p, &bufStat); err != nil { return false, err } @@ -243,7 +244,7 @@ func isSubvolume(p string) (bool, error) { return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil } -func subvolDelete(dirpath, name string) error { +func subvolDelete(dirpath, name string, quotaEnabled bool) error { dir, err := openDir(dirpath) if err != nil { return err @@ -271,7 +272,7 @@ func subvolDelete(dirpath, name string) error { return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) } if sv { - if err := subvolDelete(path.Dir(p), f.Name()); err != nil { + if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil { return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) } } @@ -282,12 +283,27 @@ func subvolDelete(dirpath, name string) error { return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) } + if quotaEnabled { + if qgroupid, err := subvolLookupQgroup(fullPath); err == nil { + var args C.struct_btrfs_ioctl_qgroup_create_args + args.qgroupid = C.__u64(qgroupid) + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + logrus.Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error()) + } + } else { + logrus.Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error()) + } + } + // all subvolumes have been removed // now remove the one originally passed in for i, c := range []byte(name) { args.name[i] = C.char(c) } - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) @@ -295,8 +311,27 @@ func subvolDelete(dirpath, name string) error { return nil } -func subvolEnableQuota(path string) error { - dir, err := openDir(path) +func (d *Driver) updateQuotaStatus() { + d.once.Do(func() { + if !d.quotaEnabled { + // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed + if err := subvolQgroupStatus(d.home); err != nil { + // quota is still not enabled + return + } + d.quotaEnabled = true + } + }) +} + +func (d *Driver) subvolEnableQuota() error { + d.updateQuotaStatus() + + if d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) if err != nil { return err } @@ -304,17 +339,25 @@ func subvolEnableQuota(path string) error { var args C.struct_btrfs_ioctl_quota_ctl_args args.cmd = C.BTRFS_QUOTA_CTL_ENABLE - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) } + d.quotaEnabled = true + return nil } -func subvolDisableQuota(path string) error { - dir, err := openDir(path) +func (d *Driver) subvolDisableQuota() error { + d.updateQuotaStatus() + + if !d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) if err != nil { return err } @@ -322,24 +365,32 @@ func subvolDisableQuota(path string) error { var args C.struct_btrfs_ioctl_quota_ctl_args args.cmd = C.BTRFS_QUOTA_CTL_DISABLE - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error()) } + d.quotaEnabled = false + return nil } -func subvolRescanQuota(path string) error { - dir, err := openDir(path) +func (d *Driver) subvolRescanQuota() error { + d.updateQuotaStatus() + + if !d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) if err != nil { return err } defer closeDir(dir) var args C.struct_btrfs_ioctl_quota_rescan_args - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) @@ -358,7 +409,7 @@ func subvolLimitQgroup(path string, size uint64) error { var args C.struct_btrfs_ioctl_qgroup_limit_args args.lim.max_referenced = C.__u64(size) args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) @@ -367,6 +418,60 @@ func subvolLimitQgroup(path string, size uint64) error { return nil } +// subvolQgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path +// with search key of BTRFS_QGROUP_STATUS_KEY. +// In case qgroup is enabled, the retuned key type will match BTRFS_QGROUP_STATUS_KEY. +// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035 +func subvolQgroupStatus(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_search_args + args.key.tree_id = C.BTRFS_QUOTA_TREE_OBJECTID + args.key.min_type = C.BTRFS_QGROUP_STATUS_KEY + args.key.max_type = C.BTRFS_QGROUP_STATUS_KEY + args.key.max_objectid = C.__u64(math.MaxUint64) + args.key.max_offset = C.__u64(math.MaxUint64) + args.key.max_transid = C.__u64(math.MaxUint64) + args.key.nr_items = 4096 + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error()) + } + sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf)) + if sh._type != C.BTRFS_QGROUP_STATUS_KEY { + return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type) + } + return nil +} + +func subvolLookupQgroup(path string) (uint64, error) { + dir, err := openDir(path) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_ino_lookup_args + args.objectid = C.BTRFS_FIRST_FREE_OBJECTID + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error()) + } + if args.treeid == 0 { + return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir) + } + + return uint64(args.treeid), nil +} + func (d *Driver) subvolumesDir() string { return path.Join(d.home, "subvolumes") } @@ -375,14 +480,23 @@ func (d *Driver) subvolumesDirID(id string) string { return path.Join(d.subvolumesDir(), id) } +func (d *Driver) quotasDir() string { + return path.Join(d.home, "quotas") +} + +func (d *Driver) quotasDirID(id string) string { + return path.Join(d.quotasDir(), id) +} + // CreateReadWrite creates a layer that is writable for use as a container // file system. -func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { - return d.Create(id, parent, mountLabel, storageOpt) +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) } // Create the filesystem with given id. -func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + quotas := path.Join(d.home, "quotas") subvolumes := path.Join(d.home, "subvolumes") rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { @@ -409,14 +523,26 @@ func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str } } + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + if _, ok := storageOpt["size"]; ok { driver := &Driver{} if err := d.parseStorageOpt(storageOpt, driver); err != nil { return err } + if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { return err } + if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil { + return err + } } // if we have a remapped root (user namespaces enabled), change the created snapshot @@ -427,6 +553,11 @@ func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str } } + mountLabel := "" + if opts != nil { + mountLabel = opts.MountLabel + } + return label.Relabel(path.Join(subvolumes, id), mountLabel, false) } @@ -459,11 +590,8 @@ func (d *Driver) setStorageSize(dir string, driver *Driver) error { return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) } - if !quotaEnabled { - if err := subvolEnableQuota(d.home); err != nil { - return err - } - quotaEnabled = true + if err := d.subvolEnableQuota(); err != nil { + return err } if err := subvolLimitQgroup(dir, driver.options.size); err != nil { @@ -479,13 +607,25 @@ func (d *Driver) Remove(id string) error { if _, err := os.Stat(dir); err != nil { return err } - if err := subvolDelete(d.subvolumesDir(), id); err != nil { + quotasDir := d.quotasDirID(id) + if _, err := os.Stat(quotasDir); err == nil { + if err := os.Remove(quotasDir); err != nil { + return err + } + } else if !os.IsNotExist(err) { return err } - if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + + // Call updateQuotaStatus() to invoke status update + d.updateQuotaStatus() + + if err := subvolDelete(d.subvolumesDir(), id, d.quotaEnabled); err != nil { return err } - if err := subvolRescanQuota(d.home); err != nil { + if err := system.EnsureRemoveAll(dir); err != nil { + return err + } + if err := d.subvolRescanQuota(); err != nil { return err } return nil @@ -503,6 +643,17 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { return "", fmt.Errorf("%s: not a directory", dir) } + if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil { + if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace { + if err := d.subvolEnableQuota(); err != nil { + return "", err + } + if err := subvolLimitQgroup(dir, size); err != nil { + return "", err + } + } + } + return dir, nil } @@ -522,6 +673,5 @@ func (d *Driver) Exists(id string) bool { // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { - var imageStores []string - return imageStores + return nil } diff --git a/vendor/github.com/containers/storage/drivers/counter.go b/vendor/github.com/containers/storage/drivers/counter.go index 5ea604f5..72551a38 100644 --- a/vendor/github.com/containers/storage/drivers/counter.go +++ b/vendor/github.com/containers/storage/drivers/counter.go @@ -22,30 +22,21 @@ func NewRefCounter(c Checker) *RefCounter { } } -// Increment increaes the ref count for the given id and returns the current count +// Increment increases the ref count for the given id and returns the current count func (c *RefCounter) Increment(path string) int { - c.mu.Lock() - m := c.counts[path] - if m == nil { - m = &minfo{} - c.counts[path] = m - } - // if we are checking this path for the first time check to make sure - // if it was already mounted on the system and make sure we have a correct ref - // count if it is mounted as it is in use. - if !m.check { - m.check = true - if c.checker.IsMounted(path) { - m.count++ - } - } - m.count++ - c.mu.Unlock() - return m.count + return c.incdec(path, func(minfo *minfo) { + minfo.count++ + }) } // Decrement decreases the ref count for the given id and returns the current count func (c *RefCounter) Decrement(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count-- + }) +} + +func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { c.mu.Lock() m := c.counts[path] if m == nil { @@ -61,7 +52,8 @@ func (c *RefCounter) Decrement(path string) int { m.count++ } } - m.count-- + infoOp(m) + count := m.count c.mu.Unlock() - return m.count + return count } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go new file mode 100644 index 00000000..1430c885 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go @@ -0,0 +1,236 @@ +package devmapper + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type directLVMConfig struct { + Device string + ThinpPercent uint64 + ThinpMetaPercent uint64 + AutoExtendPercent uint64 + AutoExtendThreshold uint64 +} + +var ( + errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified") + errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100") + errMissingSetupDevice = errors.New("must provide device path in `dm.setup_device` in order to configure direct-lvm") +) + +func validateLVMConfig(cfg directLVMConfig) error { + if reflect.DeepEqual(cfg, directLVMConfig{}) { + return nil + } + if cfg.Device == "" { + return errMissingSetupDevice + } + if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 { + return errThinpPercentMissing + } + + if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 { + return errThinpPercentTooBig + } + return nil +} + +func checkDevAvailable(dev string) error { + lvmScan, err := exec.LookPath("lvmdiskscan") + if err != nil { + logrus.Debug("could not find lvmdiskscan") + return nil + } + + out, err := exec.Command(lvmScan).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + if !bytes.Contains(out, []byte(dev)) { + return errors.Errorf("%s is not available for use with devicemapper", dev) + } + return nil +} + +func checkDevInVG(dev string) error { + pvDisplay, err := exec.LookPath("pvdisplay") + if err != nil { + logrus.Debug("could not find pvdisplay") + return nil + } + + out, err := exec.Command(pvDisplay, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out))) + for scanner.Scan() { + fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name") + if len(fields) > 1 { + // got "VG Name" line" + vg := strings.TrimSpace(fields[1]) + if len(vg) > 0 { + return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) + } + logrus.Error(fields) + break + } + } + return nil +} + +func checkDevHasFS(dev string) error { + blkid, err := exec.LookPath("blkid") + if err != nil { + logrus.Debug("could not find blkid") + return nil + } + + out, err := exec.Command(blkid, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + fields := bytes.Fields(out) + for _, f := range fields { + kv := bytes.Split(f, []byte{'='}) + if bytes.Equal(kv[0], []byte("TYPE")) { + v := bytes.Trim(kv[1], "\"") + if len(v) > 0 { + return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) + } + return nil + } + } + return nil +} + +func verifyBlockDevice(dev string, force bool) error { + if err := checkDevAvailable(dev); err != nil { + return err + } + if err := checkDevInVG(dev); err != nil { + return err + } + + if force { + return nil + } + + if err := checkDevHasFS(dev); err != nil { + return err + } + return nil +} + +func readLVMConfig(root string) (directLVMConfig, error) { + var cfg directLVMConfig + + p := filepath.Join(root, "setup-config.json") + b, err := ioutil.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + return cfg, nil + } + return cfg, errors.Wrap(err, "error reading existing setup config") + } + + // check if this is just an empty file, no need to produce a json error later if so + if len(b) == 0 { + return cfg, nil + } + + err = json.Unmarshal(b, &cfg) + return cfg, errors.Wrap(err, "error unmarshaling previous device setup config") +} + +func writeLVMConfig(root string, cfg directLVMConfig) error { + p := filepath.Join(root, "setup-config.json") + b, err := json.Marshal(cfg) + if err != nil { + return errors.Wrap(err, "error marshalling direct lvm config") + } + err = ioutil.WriteFile(p, b, 0600) + return errors.Wrap(err, "error writing direct lvm config to file") +} + +func setupDirectLVM(cfg directLVMConfig) error { + lvmProfileDir := "/etc/lvm/profile" + binaries := []string{"pvcreate", "vgcreate", "lvcreate", "lvconvert", "lvchange", "thin_check"} + + for _, bin := range binaries { + if _, err := exec.LookPath(bin); err != nil { + return errors.Wrap(err, "error looking up command `"+bin+"` while setting up direct lvm") + } + } + + err := os.MkdirAll(lvmProfileDir, 0755) + if err != nil { + return errors.Wrap(err, "error creating lvm profile directory") + } + + if cfg.AutoExtendPercent == 0 { + cfg.AutoExtendPercent = 20 + } + + if cfg.AutoExtendThreshold == 0 { + cfg.AutoExtendThreshold = 80 + } + + if cfg.ThinpPercent == 0 { + cfg.ThinpPercent = 95 + } + if cfg.ThinpMetaPercent == 0 { + cfg.ThinpMetaPercent = 1 + } + + out, err := exec.Command("pvcreate", "-f", cfg.Device).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) + err = ioutil.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600) + if err != nil { + return errors.Wrap(err, "error writing storage thinp autoextend profile") + } + + out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput() + return errors.Wrap(err, string(out)) +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go index 2608c49f..6db7b2b2 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -12,44 +12,41 @@ import ( "os/exec" "path" "path/filepath" + "reflect" "strconv" "strings" "sync" - "syscall" "time" - "github.com/sirupsen/logrus" - "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/devicemapper" + "github.com/containers/storage/pkg/dmesg" "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/loopback" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" - "github.com/docker/go-units" - + "github.com/containers/storage/pkg/parsers/kernel" + units "github.com/docker/go-units" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) var ( - defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 - defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 - defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 - defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors - defaultUdevSyncOverride = false - maxDeviceID = 0xffffff // 24 bit, pool limit - deviceIDMapSz = (maxDeviceID + 1) / 8 - // We retry device removal so many a times that even error messages - // will fill up console during normal operation. So only log Fatal - // messages by default. - logLevel = devicemapper.LogLevelFatal + defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + defaultUdevSyncOverride = false + maxDeviceID = 0xffffff // 24 bit, pool limit + deviceIDMapSz = (maxDeviceID + 1) / 8 driverDeferredRemovalSupport = false enableDeferredRemoval = false enableDeferredDeletion = false userBaseSize = false defaultMinFreeSpacePercent uint32 = 10 + lvmSetupConfigForce bool ) const deviceSetMetaFile string = "deviceset-metadata" @@ -122,6 +119,8 @@ type DeviceSet struct { uidMaps []idtools.IDMap gidMaps []idtools.IDMap minFreeSpacePercent uint32 //min free space percentage in thinpool + xfsNospaceRetries string // max retries when xfs receives ENOSPC + lvmSetupConfig directLVMConfig } // DiskUsage contains information about disk usage and is used when reporting Status of a device. @@ -170,7 +169,7 @@ type Status struct { MinFreeSpace uint64 } -// Structure used to export image/container metadata in docker inspect. +// Structure used to export image/container metadata in inspect. type deviceMetadata struct { deviceID int deviceSize uint64 // size in bytes @@ -379,10 +378,7 @@ func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { var mask byte i := deviceID % 8 mask = (1 << uint(i)) - if (devices.deviceIDMap[deviceID/8] & mask) != 0 { - return false - } - return true + return (devices.deviceIDMap[deviceID/8] & mask) == 0 } // Should be called with devices.Lock() held. @@ -409,8 +405,8 @@ func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { // This function relies on that device hash map has been loaded in advance. // Should be called with devices.Lock() held. func (devices *DeviceSet) constructDeviceIDMap() { - logrus.Debugf("devmapper: constructDeviceIDMap()") - defer logrus.Debugf("devmapper: constructDeviceIDMap() END") + logrus.Debug("devmapper: constructDeviceIDMap()") + defer logrus.Debug("devmapper: constructDeviceIDMap() END") for _, info := range devices.Devices { devices.markDeviceIDUsed(info.DeviceID) @@ -458,8 +454,8 @@ func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) } func (devices *DeviceSet) loadDeviceFilesOnStart() error { - logrus.Debugf("devmapper: loadDeviceFilesOnStart()") - defer logrus.Debugf("devmapper: loadDeviceFilesOnStart() END") + logrus.Debug("devmapper: loadDeviceFilesOnStart()") + defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") var scan = func(path string, info os.FileInfo, err error) error { if err != nil { @@ -479,11 +475,10 @@ func (devices *DeviceSet) loadDeviceFilesOnStart() error { } // Should be called with devices.Lock() held. -func (devices *DeviceSet) unregisterDevice(id int, hash string) error { - logrus.Debugf("devmapper: unregisterDevice(%v, %v)", id, hash) +func (devices *DeviceSet) unregisterDevice(hash string) error { + logrus.Debugf("devmapper: unregisterDevice(%v)", hash) info := &devInfo{ - Hash: hash, - DeviceID: id, + Hash: hash, } delete(devices.Devices, hash) @@ -528,7 +523,7 @@ func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bo // Make sure deferred removal on device is canceled, if one was // scheduled. - if err := devices.cancelDeferredRemoval(info); err != nil { + if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil { return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) } @@ -539,11 +534,11 @@ func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bo return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) } -// Return true only if kernel supports xfs and mkfs.xfs is available -func xfsSupported() bool { +// xfsSupported checks if xfs is supported, returns nil if it is, otherwise an error +func xfsSupported() error { // Make sure mkfs.xfs is available if _, err := exec.LookPath("mkfs.xfs"); err != nil { - return false + return err // error text is descriptive enough } // Check if kernel supports xfs filesystem or not. @@ -551,43 +546,48 @@ func xfsSupported() bool { f, err := os.Open("/proc/filesystems") if err != nil { - logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) - return false + return errors.Wrapf(err, "error checking for xfs support") } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if strings.HasSuffix(s.Text(), "\txfs") { - return true + return nil } } if err := s.Err(); err != nil { - logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + return errors.Wrapf(err, "error checking for xfs support") } - return false + + return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`) } func determineDefaultFS() string { - if xfsSupported() { + err := xfsSupported() + if err == nil { return "xfs" } - logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesn't support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem") + logrus.Warnf("devmapper: XFS is not supported in your system (%v). Defaulting to ext4 filesystem", err) return "ext4" } +// mkfsOptions tries to figure out whether some additional mkfs options are required +func mkfsOptions(fs string) []string { + if fs == "xfs" && !kernel.CheckKernelVersion(3, 16, 0) { + // For kernels earlier than 3.16 (and newer xfsutils), + // some xfs features need to be explicitly disabled. + return []string{"-m", "crc=0,finobt=0"} + } + + return []string{} +} + func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { devname := info.DevName() - args := []string{} - for _, arg := range devices.mkfsArgs { - args = append(args, arg) - } - - args = append(args, devname) - if devices.filesystem == "" { devices.filesystem = determineDefaultFS() } @@ -595,7 +595,11 @@ func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { return err } - logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name()) + args := mkfsOptions(devices.filesystem) + args = append(args, devices.mkfsArgs...) + args = append(args, devname) + + logrus.Infof("devmapper: Creating filesystem %s on device %s, mkfs args: %v", devices.filesystem, info.Name(), args) defer func() { if err != nil { logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) @@ -833,7 +837,7 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { } if err := devices.closeTransaction(); err != nil { - devices.unregisterDevice(deviceID, hash) + devices.unregisterDevice(hash) devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) devices.markDeviceIDFree(deviceID) return nil, err @@ -841,11 +845,57 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { return info, nil } -func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { - if err := devices.poolHasFreeSpace(); err != nil { +func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error { + var ( + devinfo *devicemapper.Info + err error + ) + + if err = devices.poolHasFreeSpace(); err != nil { return err } + if devices.deferredRemove { + devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name()) + if err != nil { + return err + } + if devinfo != nil && devinfo.DeferredRemove != 0 { + err = devices.cancelDeferredRemoval(baseInfo) + if err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if errors.Cause(err) != devicemapper.ErrEnxio { + return err + } + devinfo = nil + } else { + defer devices.deactivateDevice(baseInfo) + } + } + } else { + devinfo, err = devicemapper.GetInfo(baseInfo.Name()) + if err != nil { + return err + } + } + + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil { + return err + } + defer devicemapper.ResumeDevice(baseInfo.Name()) + } + + if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { + return err + } + + return nil +} + +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { deviceID, err := devices.getNextFreeDeviceID() if err != nil { return err @@ -858,7 +908,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf } for { - if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceID, baseInfo.Name(), baseInfo.DeviceID); err != nil { + if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil { if devicemapper.DeviceIDExists(err) { // Device ID already exists. This should not // happen. Now we have a mechanism to find @@ -888,7 +938,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf } if err := devices.closeTransaction(); err != nil { - devices.unregisterDevice(deviceID, hash) + devices.unregisterDevice(hash) devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) devices.markDeviceIDFree(deviceID) return err @@ -1134,7 +1184,7 @@ func (devices *DeviceSet) growFS(info *devInfo) error { defer devices.deactivateDevice(info) - fsMountPoint := "/run/containers/mnt" + fsMountPoint := "/run/containers/storage/mnt" if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { if err := os.MkdirAll(fsMountPoint, 0700); err != nil { return err @@ -1150,10 +1200,10 @@ func (devices *DeviceSet) growFS(info *devInfo) error { options = joinMountOptions(options, devices.mountOptions) if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { - return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err) + return fmt.Errorf("Error mounting '%s' on '%s': %s\n%v", info.DevName(), fsMountPoint, err, string(dmesg.Dmesg(256))) } - defer syscall.Unmount(fsMountPoint, syscall.MNT_DETACH) + defer unix.Unmount(fsMountPoint, unix.MNT_DETACH) switch devices.BaseDeviceFilesystem { case "ext4": @@ -1216,39 +1266,18 @@ func (devices *DeviceSet) setupBaseImage() error { } func setCloseOnExec(name string) { - if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { - for _, i := range fileInfos { - link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) - if link == name { - fd, err := strconv.Atoi(i.Name()) - if err == nil { - syscall.CloseOnExec(fd) - } + fileInfos, _ := ioutil.ReadDir("/proc/self/fd") + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + unix.CloseOnExec(fd) } } } } -// DMLog implements logging using DevMapperLogger interface. -func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) { - // By default libdm sends us all the messages including debug ones. - // We need to filter out messages here and figure out which one - // should be printed. - if level > logLevel { - return - } - - // FIXME(vbatts) push this back into ./pkg/devicemapper/ - if level <= devicemapper.LogLevelErr { - logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) - } else if level <= devicemapper.LogLevelInfo { - logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) - } else { - // FIXME(vbatts) push this back into ./pkg/devicemapper/ - logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) - } -} - func major(device uint64) uint64 { return (device >> 8) & 0xfff } @@ -1356,10 +1385,7 @@ func (devices *DeviceSet) saveTransactionMetaData() error { } func (devices *DeviceSet) removeTransactionMetaData() error { - if err := os.RemoveAll(devices.transactionMetaFile()); err != nil { - return err - } - return nil + return os.RemoveAll(devices.transactionMetaFile()) } func (devices *DeviceSet) rollbackTransaction() error { @@ -1464,12 +1490,9 @@ func (devices *DeviceSet) closeTransaction() error { } func determineDriverCapabilities(version string) error { - /* - * Driver version 4.27.0 and greater support deferred activation - * feature. - */ + // Kernel driver version >= 4.27.0 support deferred removal - logrus.Debugf("devicemapper: driver version is %s", version) + logrus.Debugf("devicemapper: kernel dm driver version is %s", version) versionSplit := strings.Split(version, ".") major, err := strconv.Atoi(versionSplit[0]) @@ -1505,12 +1528,13 @@ func determineDriverCapabilities(version string) error { // Determine the major and minor number of loopback device func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { - stat, err := file.Stat() + var stat unix.Stat_t + err := unix.Stat(file.Name(), &stat) if err != nil { return 0, 0, err } - dev := stat.Sys().(*syscall.Stat_t).Rdev + dev := stat.Rdev majorNum := major(dev) minorNum := minor(dev) @@ -1648,36 +1672,19 @@ func (devices *DeviceSet) enableDeferredRemovalDeletion() error { return nil } -func (devices *DeviceSet) initDevmapper(doInit bool) error { - // give ourselves to libdm as a log handler - devicemapper.LogInit(devices) - - version, err := devicemapper.GetDriverVersion() - if err != nil { - // Can't even get driver version, assume not supported - return errors.Wrap(graphdriver.ErrNotSupported, "unable to determine version of device mapper") - } - - if err := determineDriverCapabilities(version); err != nil { - return errors.Wrap(graphdriver.ErrNotSupported, "unable to determine device mapper driver capabilities") - } - +func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { if err := devices.enableDeferredRemovalDeletion(); err != nil { return err } // https://github.com/docker/docker/issues/4036 - // if supported := devicemapper.UdevSetSyncSupport(true); !supported { - // if storageversion.IAmStatic == "true" { - // logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") - // } else { - // logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") - // } - // - // if !devices.overrideUdevSyncCheck { - // return graphdriver.ErrNotSupported - // } - // } + if supported := devicemapper.UdevSetSyncSupport(true); !supported { + logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") + + if !devices.overrideUdevSyncCheck { + return graphdriver.ErrNotSupported + } + } //create the root dir of the devmapper driver ownership to match this //daemon's remapped root uid/gid so containers can start properly @@ -1692,20 +1699,47 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { return err } - // Set the device prefix from the device id and inode of the container root dir - - st, err := os.Stat(devices.root) + prevSetupConfig, err := readLVMConfig(devices.root) if err != nil { + return err + } + + if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) { + if devices.thinPoolDevice != "" { + return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified") + } + + if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) { + if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) { + return errors.New("changing direct-lvm config is not supported") + } + logrus.WithField("storage-driver", "devicemapper").WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode") + if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil { + return err + } + if err := setupDirectLVM(devices.lvmSetupConfig); err != nil { + return err + } + if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil { + return err + } + } + devices.thinPoolDevice = "storage-thinpool" + logrus.WithField("storage-driver", "devicemapper").Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice) + } + + // Set the device prefix from the device id and inode of the storage root dir + var st unix.Stat_t + if err := unix.Stat(devices.root, &st); err != nil { return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) } - sysSt := st.Sys().(*syscall.Stat_t) // "reg-" stands for "regular file". // In the future we might use "dev-" for "device file", etc. // container-maj,min[-inode] stands for: // - Managed by container storage // - The target of this device is at major and minor // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. - devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) + devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(st.Dev), minor(st.Dev), st.Ino) logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) // Check for the existence of the thin-pool device @@ -1748,7 +1782,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { hasData := devices.hasImage("data") if !doInit && !hasData { - return errors.New("Loopback data file not found") + return errors.New("loopback data file not found") } if !hasData { @@ -1781,7 +1815,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { hasMetadata := devices.hasImage("metadata") if !doInit && !hasMetadata { - return errors.New("Loopback metadata file not found") + return errors.New("loopback metadata file not found") } if !hasMetadata { @@ -1811,6 +1845,14 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { return err } + defer func() { + if retErr != nil { + err = devices.deactivatePool() + if err != nil { + logrus.Warnf("devmapper: Failed to deactivatePool: %v", err) + } + } + }() } // Pool already exists and caller did not pass us a pool. That means @@ -1857,8 +1899,8 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { // AddDevice adds a device and registers in the hash. func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { - logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s)", hash, baseHash) - defer logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s) END", hash, baseHash) + logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash) + defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash) // If a deleted device exists, return error. baseInfo, err := devices.lookupDeviceWithLock(baseHash) @@ -1895,7 +1937,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) } - if err := devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { + if err := devices.takeSnapshot(hash, baseInfo, size); err != nil { return err } @@ -1975,7 +2017,7 @@ func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) erro } if err == nil { - if err := devices.unregisterDevice(info.DeviceID, info.Hash); err != nil { + if err := devices.unregisterDevice(info.Hash); err != nil { return err } // If device was already in deferred delete state that means @@ -1996,8 +2038,8 @@ func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) erro // Issue discard only if device open count is zero. func (devices *DeviceSet) issueDiscard(info *devInfo) error { - logrus.Debugf("devmapper: issueDiscard(device: %s). START", info.Hash) - defer logrus.Debugf("devmapper: issueDiscard(device: %s). END", info.Hash) + logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash) + defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash) // This is a workaround for the kernel not discarding block so // on the thin pool when we remove a thinp device, so we do it // manually. @@ -2030,7 +2072,16 @@ func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { } // Try to deactivate device in case it is active. - if err := devices.deactivateDevice(info); err != nil { + // If deferred removal is enabled and deferred deletion is disabled + // then make sure device is removed synchronously. There have been + // some cases of device being busy for short duration and we would + // rather busy wait for device removal to take care of these cases. + deferredRemove := devices.deferredRemove + if !devices.deferredDelete { + deferredRemove = false + } + + if err := devices.deactivateDeviceMode(info, deferredRemove); err != nil { logrus.Debugf("devmapper: Error deactivating device: %s", err) return err } @@ -2046,8 +2097,8 @@ func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { // removal. If one wants to override that and want DeleteDevice() to fail if // device was busy and could not be deleted, set syncDelete=true. func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { - logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) START", hash, syncDelete) - defer logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) END", hash, syncDelete) + logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete) + defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete) info, err := devices.lookupDeviceWithLock(hash) if err != nil { return err @@ -2063,8 +2114,8 @@ func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { } func (devices *DeviceSet) deactivatePool() error { - logrus.Debug("devmapper: deactivatePool()") - defer logrus.Debug("devmapper: deactivatePool END") + logrus.Debug("devmapper: deactivatePool() START") + defer logrus.Debug("devmapper: deactivatePool() END") devname := devices.getPoolDevName() devinfo, err := devicemapper.GetInfo(devname) @@ -2087,7 +2138,12 @@ func (devices *DeviceSet) deactivatePool() error { } func (devices *DeviceSet) deactivateDevice(info *devInfo) error { - logrus.Debugf("devmapper: deactivateDevice(%s)", info.Hash) + return devices.deactivateDeviceMode(info, devices.deferredRemove) +} + +func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove bool) error { + var err error + logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash) defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) devinfo, err := devicemapper.GetInfo(info.Name()) @@ -2099,14 +2155,17 @@ func (devices *DeviceSet) deactivateDevice(info *devInfo) error { return nil } - if devices.deferredRemove { - if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil { - return err - } + if deferredRemove { + err = devicemapper.RemoveDeviceDeferred(info.Name()) } else { - if err := devices.removeDevice(info.Name()); err != nil { - return err - } + err = devices.removeDevice(info.Name()) + } + + // This function's semantics is such that it does not return an + // error if device does not exist. So if device went away by + // the time we actually tried to remove it, do not return error. + if errors.Cause(err) != devicemapper.ErrEnxio { + return err } return nil } @@ -2137,41 +2196,53 @@ func (devices *DeviceSet) removeDevice(devname string) error { return err } -func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { +func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { if !devices.deferredRemove { return nil } - logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) - defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) + logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name()) devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) + if err != nil { + return err + } if devinfo != nil && devinfo.DeferredRemove == 0 { return nil } // Cancel deferred remove - for i := 0; i < 100; i++ { - err = devicemapper.CancelDeferredRemove(info.Name()) - if err == nil { - break - } - - if errors.Cause(err) == devicemapper.ErrEnxio { - // Device is probably already gone. Return success. - return nil - } - + if err := devices.cancelDeferredRemoval(info); err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. if errors.Cause(err) != devicemapper.ErrBusy { return err } + } + return nil +} - // If we see EBUSY it may be a transient error, - // sleep a bit a retry a few times. - devices.Unlock() - time.Sleep(100 * time.Millisecond) - devices.Lock() +func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { + logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) + + var err error + + // Cancel deferred remove + for i := 0; i < 100; i++ { + err = devicemapper.CancelDeferredRemove(info.Name()) + if err != nil { + if errors.Cause(err) != devicemapper.ErrBusy { + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + continue + } + } + break } return err } @@ -2209,9 +2280,6 @@ func (devices *DeviceSet) Shutdown(home string) error { if err != nil { return err } - if p == path.Join(home, "mnt") { - return nil - } if !info.IsDir() { return nil } @@ -2220,7 +2288,7 @@ func (devices *DeviceSet) Shutdown(home string) error { // We use MNT_DETACH here in case it is still busy in some running // container. This means it'll go away from the global scope directly, // and the device will be released when that container dies. - if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil { + if err := unix.Unmount(p, unix.MNT_DETACH); err != nil { logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err) } } @@ -2263,6 +2331,34 @@ func (devices *DeviceSet) Shutdown(home string) error { return nil } +// Recent XFS changes allow changing behavior of filesystem in case of errors. +// When thin pool gets full and XFS gets ENOSPC error, currently it tries +// IO infinitely and sometimes it can block the container process +// and process can't be killWith 0 value, XFS will not retry upon error +// and instead will shutdown filesystem. + +func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error { + dmDevicePath, err := os.Readlink(info.DevName()) + if err != nil { + return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err) + } + + dmDeviceName := path.Base(dmDevicePath) + filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries" + maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0) + if err != nil { + return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err) + } + defer maxRetriesFile.Close() + + // Set max retries to 0 + _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries) + if err != nil { + return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err) + } + return nil +} + // MountDevice mounts the device if not already mounted. func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { info, err := devices.lookupDeviceWithLock(hash) @@ -2300,7 +2396,15 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { - return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) + return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s\n%v", info.DevName(), path, err, string(dmesg.Dmesg(256))) + } + + if fstype == "xfs" && devices.xfsNospaceRetries != "" { + if err := devices.xfsSetNospaceRetries(info); err != nil { + unix.Unmount(path, unix.MNT_DETACH) + devices.deactivateDevice(info) + return err + } } return nil @@ -2308,8 +2412,8 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { // UnmountDevice unmounts the device and removes it from hash. func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { - logrus.Debugf("devmapper: UnmountDevice(hash=%s)", hash) - defer logrus.Debugf("devmapper: UnmountDevice(hash=%s) END", hash) + logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash) + defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash) info, err := devices.lookupDeviceWithLock(hash) if err != nil { @@ -2323,16 +2427,12 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { defer devices.Unlock() logrus.Debugf("devmapper: Unmount(%s)", mountPath) - if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil { + if err := unix.Unmount(mountPath, unix.MNT_DETACH); err != nil { return err } logrus.Debug("devmapper: Unmount done") - if err := devices.deactivateDevice(info); err != nil { - return err - } - - return nil + return devices.deactivateDevice(info) } // HasDevice returns true if the device metadata exists. @@ -2424,8 +2524,8 @@ func (devices *DeviceSet) MetadataDevicePath() string { } func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { - buf := new(syscall.Statfs_t) - if err := syscall.Statfs(loopFile, buf); err != nil { + buf := new(unix.Statfs_t) + if err := unix.Statfs(loopFile, buf); err != nil { logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) return 0, err } @@ -2534,22 +2634,25 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ minFreeSpacePercent: defaultMinFreeSpacePercent, } - // Pick up initialization settings, if any were saved before - defaultsFile := path.Join(root, "defaults") - defaultsBytes, err := ioutil.ReadFile(defaultsFile) - defaults := []string{} - settings := map[string]string{} - if err == nil && len(defaultsBytes) > 0 { - defaults = strings.Split(string(defaultsBytes), "\n") + version, err := devicemapper.GetDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return nil, graphdriver.ErrNotSupported + } + + if err := determineDriverCapabilities(version); err != nil { + return nil, graphdriver.ErrNotSupported + } + + if driverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport { + // enable deferred stuff by default + enableDeferredDeletion = true + enableDeferredRemoval = true } foundBlkDiscard := false - nthOption := 0 - for _, option := range append(defaults, options...) { - nthOption = nthOption + 1 - if len(option) == 0 { - continue - } + var lvmSetupConfig directLVMConfig + for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err @@ -2637,15 +2740,78 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ } devices.minFreeSpacePercent = uint32(minFreeSpacePercent) - default: - if nthOption > len(defaults) { - return nil, fmt.Errorf("devmapper: Unknown option %s", key) + case "dm.xfs_nospace_max_retries": + _, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err } - logrus.Errorf("devmapper: Unknown option %s, ignoring", key) + devices.xfsNospaceRetries = val + case "dm.directlvm_device": + lvmSetupConfig.Device = val + case "dm.directlvm_device_force": + lvmSetupConfigForce, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.thinp_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val) + } + if per >= 100 { + return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpPercent = per + case "dm.thinp_metapercent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val) + } + if per >= 100 { + return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpMetaPercent = per + case "dm.thinp_autoextend_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendPercent = per + case "dm.thinp_autoextend_threshold": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendThreshold = per + case "dm.libdm_log_level": + level, err := strconv.ParseInt(val, 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.libdm_log_level=%s`", val) + } + if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug { + return nil, errors.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug) + } + // Register a new logging callback with the specified level. + devicemapper.LogInit(devicemapper.DefaultLogger{ + Level: int(level), + }) + default: + return nil, fmt.Errorf("devmapper: Unknown option %s", key) } - settings[key] = val } + if err := validateLVMConfig(lvmSetupConfig); err != nil { + return nil, err + } + + devices.lvmSetupConfig = lvmSetupConfig + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { devices.doBlkDiscard = false @@ -2655,15 +2821,5 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ return nil, err } - // Save these settings along with the other metadata - defaults = []string{} - for key, val := range settings { - defaults = append(defaults, key+"="+val) - } - defaultsBytes = []byte(strings.Join(defaults, "\n") + "\n") - if err := ioutils.AtomicWriteFile(defaultsFile, defaultsBytes, 0600); err != nil { - return nil, err - } - return devices, nil } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go index 87a427a8..d68fb66c 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go @@ -14,8 +14,10 @@ import ( "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/devicemapper" "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/locker" "github.com/containers/storage/pkg/mount" - "github.com/docker/go-units" + "github.com/containers/storage/pkg/system" + units "github.com/docker/go-units" ) func init() { @@ -29,6 +31,7 @@ type Driver struct { uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter + locker *locker.Locker } // Init creates a driver with the given home and the set of options. @@ -48,6 +51,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + locker: locker.New(), } return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil @@ -65,18 +69,18 @@ func (d *Driver) Status() [][2]string { status := [][2]string{ {"Pool Name", s.PoolName}, - {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, - {"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))}, + {"Pool Blocksize", units.HumanSize(float64(s.SectorSize))}, + {"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))}, {"Backing Filesystem", s.BaseDeviceFS}, {"Data file", s.DataFile}, {"Metadata file", s.MetadataFile}, - {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, - {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, - {"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, - {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, - {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, - {"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, - {"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))}, + {"Data Space Used", units.HumanSize(float64(s.Data.Used))}, + {"Data Space Total", units.HumanSize(float64(s.Data.Total))}, + {"Data Space Available", units.HumanSize(float64(s.Data.Available))}, + {"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))}, + {"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))}, + {"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))}, + {"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))}, {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, @@ -122,12 +126,17 @@ func (d *Driver) Cleanup() error { // CreateReadWrite creates a layer that is writable for use as a container // file system. -func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { - return d.Create(id, parent, mountLabel, storageOpt) +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) } // Create adds a device with a given id and the parent. -func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { return err } @@ -137,6 +146,8 @@ func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str // Remove removes a device with a given id, unmounts the filesystem. func (d *Driver) Remove(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) if !d.DeviceSet.HasDevice(id) { // Consider removing a non-existing device a no-op // This is useful to be able to progress on container removal @@ -146,19 +157,15 @@ func (d *Driver) Remove(id string) error { // This assumes the device has been properly Get/Put:ed and thus is unmounted if err := d.DeviceSet.DeleteDevice(id, false); err != nil { - return err + return fmt.Errorf("failed to remove device %s: %v", id, err) } - - mp := path.Join(d.home, "mnt", id) - if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { - return err - } - - return nil + return system.EnsureRemoveAll(path.Join(d.home, "mnt", id)) } // Get mounts a device with given id into the root filesystem func (d *Driver) Get(id, mountLabel string) (string, error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) mp := path.Join(d.home, "mnt", id) rootFs := path.Join(mp, "rootfs") if count := d.ctr.Increment(mp); count > 1 { @@ -209,6 +216,8 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { // Put unmounts a device and removes it. func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) mp := path.Join(d.home, "mnt", id) if count := d.ctr.Decrement(mp); count > 0 { return nil @@ -227,6 +236,5 @@ func (d *Driver) Exists(id string) bool { // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { - var imageStores []string - return imageStores + return nil } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/mount.go b/vendor/github.com/containers/storage/drivers/devmapper/mount.go index cca1fe1b..1dc3262d 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/mount.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/mount.go @@ -7,7 +7,8 @@ import ( "fmt" "os" "path/filepath" - "syscall" + + "golang.org/x/sys/unix" ) // FIXME: this is copy-pasted from the aufs driver. @@ -15,19 +16,17 @@ import ( // Mounted returns true if a mount point exists. func Mounted(mountpoint string) (bool, error) { - mntpoint, err := os.Stat(mountpoint) - if err != nil { + var mntpointSt unix.Stat_t + if err := unix.Stat(mountpoint, &mntpointSt); err != nil { if os.IsNotExist(err) { return false, nil } return false, err } - parent, err := os.Stat(filepath.Join(mountpoint, "..")) - if err != nil { + var parentSt unix.Stat_t + if err := unix.Stat(filepath.Join(mountpoint, ".."), &parentSt); err != nil { return false, err } - mntpointSt := mntpoint.Sys().(*syscall.Stat_t) - parentSt := parent.Sys().(*syscall.Stat_t) return mntpointSt.Dev != parentSt.Dev, nil } diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index c16fc33e..56996478 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -2,6 +2,7 @@ package graphdriver import ( "fmt" + "io" "os" "path/filepath" "strings" @@ -28,12 +29,19 @@ var ( // ErrNotSupported returned when driver is not supported. ErrNotSupported = errors.New("driver not supported") - // ErrPrerequisites retuned when driver does not meet prerequisites. + // ErrPrerequisites returned when driver does not meet prerequisites. ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") // ErrIncompatibleFS returned when file system is not supported. ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") ) +//CreateOpts contains optional arguments for Create() and CreateReadWrite() +// methods. +type CreateOpts struct { + MountLabel string + StorageOpt map[string]string +} + // InitFunc initializes the storage driver. type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) @@ -47,11 +55,13 @@ type ProtoDriver interface { // String returns a string representation of this driver. String() string // CreateReadWrite creates a new, empty filesystem layer that is ready - // to be used as the storage for a container. - CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error + // to be used as the storage for a container. Additional options can + // be passed in opts. parent may be "" and opts may be nil. + CreateReadWrite(id, parent string, opts *CreateOpts) error // Create creates a new, empty, filesystem layer with the - // specified id and parent and mountLabel. Parent and mountLabel may be "". - Create(id, parent, mountLabel string, storageOpt map[string]string) error + // specified id and parent and options passed in opts. Parent + // may be "" and opts may be nil. + Create(id, parent string, opts *CreateOpts) error // Remove attempts to remove the filesystem layer with this id. Remove(id string) error // Get returns the mountpoint for the layered filesystem referred @@ -78,26 +88,48 @@ type ProtoDriver interface { AdditionalImageStores() []string } -// Driver is the interface for layered/snapshot file system drivers. -type Driver interface { - ProtoDriver +// DiffDriver is the interface to use to implement graph diffs +type DiffDriver interface { // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". - Diff(id, parent string) (archive.Archive, error) + Diff(id, parent string) (io.ReadCloser, error) // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. Changes(id, parent string) ([]archive.Change, error) // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. - // The archive.Reader must be an uncompressed stream. - ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) + // The io.Reader must be an uncompressed stream. + ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. DiffSize(id, parent string) (size int64, err error) } +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + DiffDriver +} + +// Capabilities defines a list of capabilities a driver may implement. +// These capabilities are not required; however, they do determine how a +// graphdriver can be used. +type Capabilities struct { + // Flags that this driver is capable of reproducing exactly equivalent + // diffs for read-only layers. If set, clients can rely on the driver + // for consistent tar streams, and avoid extra processing to account + // for potential differences (eg: the layer store's use of tar-split). + ReproducesExactDiffs bool +} + +// CapabilityDriver is the interface for layered file system drivers that +// can report on their Capabilities. +type CapabilityDriver interface { + Capabilities() Capabilities +} + // DiffGetterDriver is the interface for layered file system drivers that // provide a specialized function for getting file contents for tar-split. type DiffGetterDriver interface { @@ -136,15 +168,13 @@ func Register(name string, initFunc InitFunc) error { } // GetDriver initializes and returns the registered driver -func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { +func GetDriver(name string, config Options) (Driver, error) { if initFunc, exists := drivers[name]; exists { - return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) } - if pluginDriver, err := lookupPlugin(name, home, options); err == nil { - return pluginDriver, nil - } - logrus.Errorf("Failed to GetDriver graph %s %s", name, home) - return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, home) + + logrus.Errorf("Failed to GetDriver graph %s %s", name, config.Root) + return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, config.Root) } // getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins @@ -156,15 +186,24 @@ func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []id return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home) } +// Options is used to initialize a graphdriver +type Options struct { + Root string + DriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ExperimentalEnabled bool +} + // New creates the driver and initializes it at the specified root. -func New(root string, name string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { +func New(name string, config Options) (Driver, error) { if name != "" { logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver - return GetDriver(name, root, options, uidMaps, gidMaps) + return GetDriver(name, config) } // Guess for prior driver - driversMap := scanPriorDrivers(root) + driversMap := scanPriorDrivers(config.Root) for _, name := range priority { if name == "vfs" { // don't use vfs even if there is state present. @@ -173,13 +212,13 @@ func New(root string, name string, options []string, uidMaps, gidMaps []idtools. if _, prior := driversMap[name]; prior { // of the state found from prior drivers, check in order of our priority // which we would prefer - driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps) + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) if err != nil { // unlike below, we will return error here, because there is prior // state, and now it is no longer supported/prereq/compatible, so // something changed and needs attention. Otherwise the daemon's // images would just "disappear". - logrus.Errorf("[graphdriver] prior storage driver %q failed: %s", name, err) + logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) return nil, err } @@ -191,17 +230,17 @@ func New(root string, name string, options []string, uidMaps, gidMaps []idtools. driversSlice = append(driversSlice, name) } - return nil, fmt.Errorf("%q contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", root, strings.Join(driversSlice, ", ")) + return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) } - logrus.Infof("[graphdriver] using prior storage driver %q", name) + logrus.Infof("[graphdriver] using prior storage driver: %s", name) return driver, nil } } // Check for priority drivers first for _, name := range priority { - driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps) + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) if err != nil { if isDriverNotSupported(err) { continue @@ -213,7 +252,7 @@ func New(root string, name string, options []string, uidMaps, gidMaps []idtools. // Check all registered drivers if no priority driver is found for name, initFunc := range drivers { - driver, err := initFunc(filepath.Join(root, name), options, uidMaps, gidMaps) + driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) if err != nil { if isDriverNotSupported(err) { continue diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go index 2891a84f..53394b73 100644 --- a/vendor/github.com/containers/storage/drivers/driver_freebsd.go +++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go @@ -1,6 +1,10 @@ package graphdriver -import "syscall" +import ( + "syscall" + + "golang.org/x/sys/unix" +) var ( // Slice of drivers that should be used in an order @@ -11,7 +15,7 @@ var ( // Mounted checks if the given path is mounted as the fs type func Mounted(fsType FsMagic, mountPath string) (bool, error) { - var buf syscall.Statfs_t + var buf unix.Statfs_t if err := syscall.Statfs(mountPath, &buf); err != nil { return false, err } diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go index 6e1f2ee3..94f7270e 100644 --- a/vendor/github.com/containers/storage/drivers/driver_linux.go +++ b/vendor/github.com/containers/storage/drivers/driver_linux.go @@ -4,9 +4,9 @@ package graphdriver import ( "path/filepath" - "syscall" "github.com/containers/storage/pkg/mount" + "golang.org/x/sys/unix" ) const ( @@ -66,13 +66,14 @@ var ( FsMagicAufs: "aufs", FsMagicBtrfs: "btrfs", FsMagicCramfs: "cramfs", + FsMagicEcryptfs: "ecryptfs", FsMagicExtfs: "extfs", FsMagicF2fs: "f2fs", FsMagicGPFS: "gpfs", FsMagicJffs2Fs: "jffs2", FsMagicJfs: "jfs", FsMagicNfsFs: "nfs", - FsMagicOverlay: "overlay", + FsMagicOverlay: "overlayfs", FsMagicRAMFs: "ramfs", FsMagicReiserFs: "reiserfs", FsMagicSmbFs: "smb", @@ -87,14 +88,14 @@ var ( // GetFSMagic returns the filesystem id given the path. func GetFSMagic(rootpath string) (FsMagic, error) { - var buf syscall.Statfs_t - if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { + var buf unix.Statfs_t + if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil { return 0, err } return FsMagic(buf.Type), nil } -// NewFsChecker returns a checker configured for the provied FsMagic +// NewFsChecker returns a checker configured for the provided FsMagic func NewFsChecker(t FsMagic) Checker { return &fsChecker{ t: t, @@ -126,8 +127,8 @@ func (c *defaultChecker) IsMounted(path string) bool { // Mounted checks if the given path is mounted as the fs type func Mounted(fsType FsMagic, mountPath string) (bool, error) { - var buf syscall.Statfs_t - if err := syscall.Statfs(mountPath, &buf); err != nil { + var buf unix.Statfs_t + if err := unix.Statfs(mountPath, &buf); err != nil { return false, err } return FsMagic(buf.Type) == fsType, nil diff --git a/vendor/github.com/containers/storage/drivers/driver_solaris.go b/vendor/github.com/containers/storage/drivers/driver_solaris.go index cfbc26e8..174fa967 100644 --- a/vendor/github.com/containers/storage/drivers/driver_solaris.go +++ b/vendor/github.com/containers/storage/drivers/driver_solaris.go @@ -19,8 +19,8 @@ import ( "path/filepath" "unsafe" - "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "github.com/containers/storage/pkg/mount" + "github.com/sirupsen/logrus" ) const ( @@ -45,22 +45,52 @@ func GetFSMagic(rootpath string) (FsMagic, error) { return 0, nil } +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewFsChecker returns a checker configured for the provided FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on Solaris. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + // Mounted checks if the given path is mounted as the fs type //Solaris supports only ZFS for now func Mounted(fsType FsMagic, mountPath string) (bool, error) { cs := C.CString(filepath.Dir(mountPath)) + defer C.free(unsafe.Pointer(cs)) buf := C.getstatfs(cs) + defer C.free(unsafe.Pointer(buf)) // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || (buf.f_basetype[3] != 0) { - log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) - C.free(unsafe.Pointer(buf)) - return false, errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", mountPath) + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) + return false, ErrPrerequisites } - C.free(unsafe.Pointer(buf)) - C.free(unsafe.Pointer(cs)) return true, nil } diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go index 69310729..48a1f078 100644 --- a/vendor/github.com/containers/storage/drivers/fsdiff.go +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -1,14 +1,14 @@ package graphdriver import ( + "io" "time" - "github.com/sirupsen/logrus" - "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" + "github.com/sirupsen/logrus" ) var ( @@ -31,30 +31,30 @@ type NaiveDiffDriver struct { // NewNaiveDiffDriver returns a fully functional driver that wraps the // given ProtoDriver and adds the capability of the following methods which // it may or may not support on its own: -// Diff(id, parent string) (archive.Archive, error) +// Diff(id, parent string) (io.ReadCloser, error) // Changes(id, parent string) ([]archive.Change, error) -// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +// ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) // DiffSize(id, parent string) (size int64, err error) func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { - gdw := &NaiveDiffDriver{ - ProtoDriver: driver, - uidMaps: uidMaps, - gidMaps: gidMaps, - } - return gdw + return &NaiveDiffDriver{ProtoDriver: driver, + uidMaps: uidMaps, + gidMaps: gidMaps} } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". -func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) { - layerFs, err := gdw.Get(id, "") +func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) { + startTime := time.Now() + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") if err != nil { return nil, err } defer func() { if err != nil { - gdw.Put(id) + driver.Put(id) } }() @@ -65,16 +65,16 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err e } return ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() - gdw.Put(id) + driver.Put(id) return err }), nil } - parentFs, err := gdw.Get(parent, "") + parentFs, err := driver.Get(parent, "") if err != nil { return nil, err } - defer gdw.Put(parent) + defer driver.Put(parent) changes, err := archive.ChangesDirs(layerFs, parentFs) if err != nil { @@ -88,7 +88,13 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err e return ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() - gdw.Put(id) + driver.Put(id) + + // NaiveDiffDriver compares file metadata with parent layers. Parent layers + // are extracted from tar's with full second precision on modified time. + // We need this hack here to make sure calls within same second receive + // correct result. + time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) return err }), nil } @@ -96,20 +102,22 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err e // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { - layerFs, err := gdw.Get(id, "") + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") if err != nil { return nil, err } - defer gdw.Put(id) + defer driver.Put(id) parentFs := "" if parent != "" { - parentFs, err = gdw.Get(parent, "") + parentFs, err = driver.Get(parent, "") if err != nil { return nil, err } - defer gdw.Put(parent) + defer driver.Put(parent) } return archive.ChangesDirs(layerFs, parentFs) @@ -118,13 +126,15 @@ func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. -func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { + driver := gdw.ProtoDriver + // Mount the root filesystem so we can apply the diff/layer. - layerFs, err := gdw.Get(id, "") + layerFs, err := driver.Get(id, "") if err != nil { return } - defer gdw.Put(id) + defer driver.Put(id) options := &archive.TarOptions{UIDMaps: gdw.uidMaps, GIDMaps: gdw.gidMaps} @@ -142,16 +152,18 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (s // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { + driver := gdw.ProtoDriver + changes, err := gdw.Changes(id, parent) if err != nil { return } - layerFs, err := gdw.Get(id, "") + layerFs, err := driver.Get(id, "") if err != nil { return } - defer gdw.Put(id) + defer driver.Put(id) return archive.ChangesSize(layerFs, changes), nil } diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go new file mode 100644 index 00000000..2a096edf --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -0,0 +1,102 @@ +// +build linux + +package overlay + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/system" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// doesSupportNativeDiff checks whether the filesystem has a bug +// which copies up the opaque flag when copying up an opaque +// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR. +// When these exist naive diff should be used. +func doesSupportNativeDiff(d string) error { + td, err := ioutil.TempDir(d, "opaque-bug-check") + if err != nil { + return err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + // Make directories l1/d, l1/d1, l2/d, l3, work, merged + if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + return err + } + + // Mark l2/d as opaque + if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { + return errors.Wrap(err, "failed to set opaque flag on middle layer") + } + + opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work")) + if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { + return errors.Wrap(err, "failed to mount overlay") + } + defer func() { + if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } + }() + + // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" + if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { + return errors.Wrap(err, "failed to write to merged directory") + } + + // Check l3/d does not have opaque flag + xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), "trusted.overlay.opaque") + if err != nil { + return errors.Wrap(err, "failed to read opaque flag on upper layer") + } + if string(xattrOpaque) == "y" { + return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix") + } + + // rename "d1" to "d2" + if err := os.Rename(filepath.Join(td, "merged", "d1"), filepath.Join(td, "merged", "d2")); err != nil { + // if rename failed with syscall.EXDEV, the kernel doesn't have CONFIG_OVERLAY_FS_REDIRECT_DIR enabled + if err.(*os.LinkError).Err == syscall.EXDEV { + return nil + } + return errors.Wrap(err, "failed to rename dir in merged directory") + } + // get the xattr of "d2" + xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), "trusted.overlay.redirect") + if err != nil { + return errors.Wrap(err, "failed to read redirect flag on upper layer") + } + + if string(xattrRedirect) == "d1" { + return errors.New("kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled") + } + + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go index 1b53f0c8..feb03959 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/mount.go +++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go @@ -9,9 +9,9 @@ import ( "fmt" "os" "runtime" - "syscall" "github.com/containers/storage/pkg/reexec" + "golang.org/x/sys/unix" ) func init() { @@ -31,12 +31,12 @@ type mountOptions struct { Flag uint32 } -func mountFrom(dir, device, target, mType, label string) error { +func mountFrom(dir, device, target, mType string, flags uintptr, label string) error { options := &mountOptions{ Device: device, Target: target, Type: mType, - Flag: 0, + Flag: uint32(flags), Label: label, } @@ -49,18 +49,19 @@ func mountFrom(dir, device, target, mType, label string) error { output := bytes.NewBuffer(nil) cmd.Stdout = output cmd.Stderr = output - if err := cmd.Start(); err != nil { + w.Close() return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) } //write the options to the pipe for the untar exec to read if err := json.NewEncoder(w).Encode(options); err != nil { + w.Close() return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) } w.Close() if err := cmd.Wait(); err != nil { - return fmt.Errorf("mountfrom re-exec error: %v: output: %s", err, output) + return fmt.Errorf("mountfrom re-exec error: %v: output: %v", err, output) } return nil } @@ -80,7 +81,7 @@ func mountFromMain() { fatal(err) } - if err := syscall.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { fatal(err) } diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index f3cb27e2..4974a94e 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -3,8 +3,8 @@ package overlay import ( - "bufio" "fmt" + "io" "io/ioutil" "os" "os/exec" @@ -12,21 +12,25 @@ import ( "path/filepath" "strconv" "strings" - "syscall" - - "github.com/sirupsen/logrus" + "sync" "github.com/containers/storage/drivers" + "github.com/containers/storage/drivers/overlayutils" + "github.com/containers/storage/drivers/quota" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fsutils" "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/locker" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" - "github.com/containers/storage/pkg/parsers/kernel" - + "github.com/containers/storage/pkg/system" + units "github.com/docker/go-units" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) var ( @@ -42,7 +46,7 @@ var ( // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers -// below as well as "merged" and "work" directories. The "diff" directory +// below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost @@ -76,47 +80,48 @@ const ( idLength = 26 ) +type overlayOptions struct { + overrideKernelCheck bool + imageStores []string + quota quota.Quota +} + // Driver contains information about the home directory and the list of active mounts that are created using this driver. type Driver struct { - name string - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter - opts *overlayOptions + name string + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + quotaCtl *quota.Control + options overlayOptions + naiveDiff graphdriver.DiffDriver + supportsDType bool + locker *locker.Locker } -var backingFs = "" +var ( + backingFs = "" + projectQuotaSupported = false + + useNaiveDiffLock sync.Once + useNaiveDiffOnly bool +) func init() { - graphdriver.Register("overlay", InitAsOverlay) - graphdriver.Register("overlay2", InitAsOverlay2) + graphdriver.Register("overlay", Init) + graphdriver.Register("overlay2", Init) } -// InitWithName returns the a naive diff driver for the overlay filesystem, -// which returns the passed-in name when asked which driver it is. -func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { +// Init returns the a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { return nil, err } - if err := supportsOverlay(); err != nil { - return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support overlay fs") - } - - // require kernel 4.0.0 to ensure multiple lower dirs are supported - v, err := kernel.GetKernelVersion() - if err != nil { - return nil, err - } - if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 { - if !opts.overrideKernelCheck { - return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay") - } - logrus.Warnf("Using pre-4.0.0 kernel for overlay, mount failures may require kernel update") - } - fsMagic, err := graphdriver.GetFSMagic(home) if err != nil { return nil, err @@ -127,7 +132,7 @@ func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtool // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs switch fsMagic { - case graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: + case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: logrus.Errorf("'overlay' is not supported over %s", backingFs) return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s", backingFs) } @@ -136,6 +141,12 @@ func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtool if err != nil { return nil, err } + + supportsDType, err := supportsOverlay(home, fsMagic, rootUID, rootGID) + if err != nil { + return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support overlay fs") + } + // Create the driver home dir if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { return nil, err @@ -146,36 +157,35 @@ func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtool } d := &Driver{ - name: name, - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, - ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), - opts: opts, + name: "overlay", + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + supportsDType: supportsDType, + locker: locker.New(), + options: *opts, } + d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) + + if backingFs == "xfs" { + // Try to enable project quota support over xfs. + if d.quotaCtl, err = quota.NewControl(home); err == nil { + projectQuotaSupported = true + } else if opts.quota.Size > 0 { + return nil, fmt.Errorf("Storage option overlay.size not supported. Filesystem does not support Project Quota: %v", err) + } + } else if opts.quota.Size > 0 { + // if xfs is not the backing fs then error out if the storage-opt overlay.size is used. + return nil, fmt.Errorf("Storage option overlay.size only supported for backingFS XFS. Found %v", backingFs) + } + + logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported) + return d, nil } -// InitAsOverlay returns the a naive diff driver for overlay filesystem. -// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. -// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned. -func InitAsOverlay(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - return InitWithName("overlay", home, options, uidMaps, gidMaps) -} - -// InitAsOverlay2 returns the a naive diff driver for overlay filesystem. -// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. -// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned. -func InitAsOverlay2(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - return InitWithName("overlay2", home, options, uidMaps, gidMaps) -} - -type overlayOptions struct { - overrideKernelCheck bool - imageStores []string -} - func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { @@ -185,12 +195,21 @@ func parseOptions(options []string) (*overlayOptions, error) { } key = strings.ToLower(key) switch key { - case "overlay.override_kernel_check", "overlay2.override_kernel_check": + case ".override_kernel_check", "overlay.override_kernel_check", "overlay2.override_kernel_check": + logrus.Debugf("overlay: override_kernelcheck=%s", val) o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } - case "overlay.imagestore": + case ".size", "overlay.size", "overlay2.size": + logrus.Debugf("overlay: size=%s", val) + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + o.quota.Size = uint64(size) + case ".imagestore", "overlay.imagestore", "overlay2.imagestore": + logrus.Debugf("overlay: imagestore=%s", val) // Additional read only image stores to use for lower paths for _, store := range strings.Split(val, ",") { store = filepath.Clean(store) @@ -199,7 +218,7 @@ func parseOptions(options []string) (*overlayOptions, error) { } st, err := os.Stat(store) if err != nil { - return nil, fmt.Errorf("overlay: Can't stat imageStore dir %s: %v", store, err) + return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %v", store, err) } if !st.IsDir() { return nil, fmt.Errorf("overlay: image path %q must be a directory", store) @@ -213,25 +232,69 @@ func parseOptions(options []string) (*overlayOptions, error) { return o, nil } -func supportsOverlay() error { - // We can try to modprobe overlay first before looking at - // proc/filesystems for when overlay is supported +func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) { + // We can try to modprobe overlay first exec.Command("modprobe", "overlay").Run() - f, err := os.Open("/proc/filesystems") - if err != nil { - return err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if s.Text() == "nodev\toverlay" { - return nil + layerDir, err := ioutil.TempDir(home, "compat") + if err == nil { + // Check if reading the directory's contents populates the d_type field, which is required + // for proper operation of the overlay filesystem. + supportsDType, err = fsutils.SupportsDType(layerDir) + if err != nil { + return false, err } + if !supportsDType { + logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs)) + // TODO: Will make fatal when CRI-O Has AMI built on RHEL7.4 + // return nil, overlayutils.ErrDTypeNotSupported("overlay", backingFs) + } + + // Try a test mount in the specific location we're looking at using. + mergedDir := filepath.Join(layerDir, "merged") + lower1Dir := filepath.Join(layerDir, "lower1") + lower2Dir := filepath.Join(layerDir, "lower2") + defer func() { + // Permitted to fail, since the various subdirectories + // can be empty or not even there, and the home might + // legitimately be not empty + _ = unix.Unmount(mergedDir, unix.MNT_DETACH) + _ = os.RemoveAll(layerDir) + _ = os.Remove(home) + }() + _ = idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(lower1Dir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID) + flags := fmt.Sprintf("lowerdir=%s:%s", lower1Dir, lower2Dir) + if len(flags) < unix.Getpagesize() { + if mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) == nil { + logrus.Debugf("overlay test mount with multiple lowers succeeded") + return supportsDType, nil + } + } + flags = fmt.Sprintf("lowerdir=%s", lower1Dir) + if len(flags) < unix.Getpagesize() { + if mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) == nil { + logrus.Errorf("overlay test mount with multiple lowers failed, but succeeded with a single lower") + return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay") + } + } + logrus.Errorf("'overlay' is not supported over %s at %q", backingFs, home) + return supportsDType, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s at %q", backingFs, home) } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") - return errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") +} + +func useNaiveDiff(home string) bool { + useNaiveDiffLock.Do(func() { + if err := doesSupportNativeDiff(home); err != nil { + logrus.Warnf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err) + useNaiveDiffOnly = true + } + }) + return useNaiveDiffOnly } func (d *Driver) String() string { @@ -243,6 +306,8 @@ func (d *Driver) String() string { func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, + {"Supports d_type", strconv.FormatBool(d.supportsDType)}, + {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, } } @@ -280,18 +345,39 @@ func (d *Driver) Cleanup() error { // CreateReadWrite creates a layer that is writable for use as a container // file system. -func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { - return d.Create(id, parent, mountLabel, storageOpt) +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { + return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") + } + + if opts == nil { + opts = &graphdriver.CreateOpts{ + StorageOpt: map[string]string{}, + } + } + + if _, ok := opts.StorageOpt["size"]; !ok { + if opts.StorageOpt == nil { + opts.StorageOpt = map[string]string{} + } + opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) + } + + return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. -func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) (retErr error) { - - if len(storageOpt) != 0 { - return fmt.Errorf("--storage-opt is not supported for overlay") +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + if opts != nil && len(opts.StorageOpt) != 0 { + if _, ok := opts.StorageOpt["size"]; ok { + return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") + } } + return d.create(id, parent, opts) +} +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) @@ -312,6 +398,20 @@ func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str } }() + if opts != nil && len(opts.StorageOpt) > 0 { + driver := &Driver{} + if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { + return err + } + + if driver.options.quota.Size > 0 { + // Set container disk quota limit + if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { + return err + } + } + } + if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil { return err } @@ -351,6 +451,26 @@ func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str return nil } +// Parse overlay storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to set the disk project quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.quota.Size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) @@ -377,11 +497,11 @@ func (d *Driver) getLower(parent string) (string, error) { return strings.Join(lowers, ":"), nil } -func (d *Driver) dir(val string) string { - newpath := path.Join(d.home, val) +func (d *Driver) dir(id string) string { + newpath := path.Join(d.home, id) if _, err := os.Stat(newpath); err != nil { for _, p := range d.AdditionalImageStores() { - l := path.Join(p, d.name, val) + l := path.Join(p, d.name, id) _, err = os.Stat(l) if err == nil { return l @@ -411,6 +531,8 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) { // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { @@ -419,14 +541,16 @@ func (d *Driver) Remove(id string) error { } } - if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. -func (d *Driver) Get(id string, mountLabel string) (s string, err error) { +func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return "", err @@ -458,7 +582,7 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) { return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err) } } else { - lower = l + lower = newpath } if newlowers == "" { newlowers = lower @@ -472,22 +596,42 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) { return mergedDir, nil } defer func() { - if err != nil { + if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { - syscall.Unmount(mergedDir, 0) + if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { + logrus.Errorf("error unmounting %v: %v", mergedDir, mntErr) + } } } }() workDir := path.Join(dir, "work") - opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", newlowers, path.Join(id, "diff"), path.Join(id, "work")) - mountLabel = label.FormatMountLabel(opts, mountLabel) - if len(mountLabel) > syscall.Getpagesize() { - return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountLabel)) - } + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", newlowers, diffDir, workDir) + mountData := label.FormatMountLabel(opts, mountLabel) + mount := unix.Mount + mountTarget := mergedDir - if err := mountFrom(d.home, "overlay", path.Join(id, "merged"), "overlay", mountLabel); err != nil { - return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + pageSize := unix.Getpagesize() + + // Use relative paths and mountFrom when the mount data has exceeded + // the page size. The mount syscall fails if the mount data cannot + // fit within a page and relative links make the mount data much + // smaller at the expense of requiring a fork exec to chroot. + if len(mountData) > pageSize { + //FIXME: We need to figure out to get this to work with additional stores + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work")) + mountData = label.FormatMountLabel(opts, mountLabel) + if len(mountData) > pageSize { + return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) + } + + mount = func(source string, target string, mType string, flags uintptr, label string) error { + return mountFrom(d.home, source, target, mType, flags, label) + } + mountTarget = path.Join(id, "merged") + } + if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mountTarget, err) } // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a @@ -506,19 +650,27 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) { // Put unmounts the mount path created for the give id. func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return err + } mountpoint := path.Join(d.dir(id), "merged") if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } - err := syscall.Unmount(mountpoint, 0) - if err != nil { - if _, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)); err != nil { - // We didn't have a "lower" directory, so we weren't mounting a "merged" directory anyway + if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil { + // If no lower, we used the diff directory, so no work to do + if os.IsNotExist(err) { return nil } - logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + return err } - return err + if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { + logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) + } + return nil } // Exists checks to see if the id is already mounted. @@ -527,8 +679,33 @@ func (d *Driver) Exists(id string) bool { return err == nil } +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (d *Driver) isParent(id, parent string) bool { + lowers, err := d.getLowerDirs(id) + if err != nil { + return false + } + if parent == "" && len(lowers) > 0 { + return false + } + + parentDir := d.dir(parent) + var ld string + if len(lowers) > 0 { + ld = filepath.Dir(lowers[0]) + } + if ld == "" && parent == "" { + return true + } + return ld == parentDir +} + // ApplyDiff applies the new layer into a root -func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) { +func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { + if !d.isParent(id, parent) { + return d.naiveDiff.ApplyDiff(id, parent, diff) + } + applyDir := d.getDiffPath(id) logrus.Debugf("Applying tar in %s", applyDir) @@ -541,7 +718,7 @@ func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size return 0, err } - return d.DiffSize(id, parent) + return directory.Size(applyDir) } func (d *Driver) getDiffPath(id string) string { @@ -554,12 +731,19 @@ func (d *Driver) getDiffPath(id string) string { // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.DiffSize(id, parent) + } return directory.Size(d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". -func (d *Driver) Diff(id, parent string) (archive.Archive, error) { +func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.Diff(id, parent) + } + diffPath := d.getDiffPath(id) logrus.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ @@ -573,6 +757,9 @@ func (d *Driver) Diff(id, parent string) (archive.Archive, error) { // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.Changes(id, parent) + } // Overlay doesn't have snapshots, so we need to get changes from all parent // layers. diffPath := d.getDiffPath(id) @@ -586,5 +773,5 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { - return d.opts.imageStores + return d.options.imageStores } diff --git a/vendor/github.com/containers/storage/drivers/overlay/randomid.go b/vendor/github.com/containers/storage/drivers/overlay/randomid.go index 975b3a50..fc565ef0 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/randomid.go +++ b/vendor/github.com/containers/storage/drivers/overlay/randomid.go @@ -12,6 +12,7 @@ import ( "time" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) // generateID creates a new random string identifier with the given length @@ -69,7 +70,7 @@ func retryOnError(err error) bool { case *os.PathError: return retryOnError(err.Err) // unpack the target error case syscall.Errno: - if err == syscall.EPERM { + if err == unix.EPERM { // EPERM represents an entropy pool exhaustion, a condition under // which we backoff and retry. return true diff --git a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go new file mode 100644 index 00000000..46773364 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go @@ -0,0 +1,18 @@ +// +build linux + +package overlayutils + +import ( + "errors" + "fmt" +) + +// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type. +func ErrDTypeNotSupported(driver, backingFs string) error { + msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs) + if backingFs == "xfs" { + msg += " Reformat the filesystem with ftype=1 to enable d_type support." + } + msg += " Running without d_type is not supported." + return errors.New(msg) +} diff --git a/vendor/github.com/containers/storage/drivers/plugin.go b/vendor/github.com/containers/storage/drivers/plugin.go deleted file mode 100644 index a76aae6e..00000000 --- a/vendor/github.com/containers/storage/drivers/plugin.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build experimental - -package graphdriver - -import ( - "fmt" - "io" - - "github.com/containers/storage/pkg/plugins" -) - -type pluginClient interface { - // Call calls the specified method with the specified arguments for the plugin. - Call(string, interface{}, interface{}) error - // Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream - Stream(string, interface{}) (io.ReadCloser, error) - // SendFile calls the specified method, and passes through the IO stream - SendFile(string, io.Reader, interface{}) error -} - -func lookupPlugin(name, home string, opts []string) (Driver, error) { - pl, err := plugins.Get(name, "GraphDriver") - if err != nil { - return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) - } - return newPluginDriver(name, home, opts, pl.Client()) -} - -func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) { - proxy := &graphDriverProxy{name, c} - return proxy, proxy.Init(home, opts) -} diff --git a/vendor/github.com/containers/storage/drivers/plugin_unsupported.go b/vendor/github.com/containers/storage/drivers/plugin_unsupported.go deleted file mode 100644 index daa7a170..00000000 --- a/vendor/github.com/containers/storage/drivers/plugin_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !experimental - -package graphdriver - -func lookupPlugin(name, home string, opts []string) (Driver, error) { - return nil, ErrNotSupported -} diff --git a/vendor/github.com/containers/storage/drivers/proxy.go b/vendor/github.com/containers/storage/drivers/proxy.go deleted file mode 100644 index d56b8731..00000000 --- a/vendor/github.com/containers/storage/drivers/proxy.go +++ /dev/null @@ -1,225 +0,0 @@ -// +build experimental - -package graphdriver - -import ( - "fmt" - - "github.com/containers/storage/pkg/archive" - "github.com/pkg/errors" -) - -type graphDriverProxy struct { - name string - client pluginClient -} - -type graphDriverRequest struct { - ID string `json:",omitempty"` - Parent string `json:",omitempty"` - MountLabel string `json:",omitempty"` -} - -type graphDriverResponse struct { - Err string `json:",omitempty"` - Dir string `json:",omitempty"` - Exists bool `json:",omitempty"` - Status [][2]string `json:",omitempty"` - Changes []archive.Change `json:",omitempty"` - Size int64 `json:",omitempty"` - Metadata map[string]string `json:",omitempty"` -} - -type graphDriverInitRequest struct { - Home string - Opts []string -} - -func (d *graphDriverProxy) Init(home string, opts []string) error { - args := &graphDriverInitRequest{ - Home: home, - Opts: opts, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) String() string { - return d.name -} - -func (d *graphDriverProxy) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - MountLabel: mountLabel, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.CreateReadWrite", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Create(id, parent, mountLabel string, storageOpt map[string]string) error { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - MountLabel: mountLabel, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Create", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Remove(id string) error { - args := &graphDriverRequest{ID: id} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { - args := &graphDriverRequest{ - ID: id, - MountLabel: mountLabel, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil { - return "", err - } - var err error - if ret.Err != "" { - err = errors.New(ret.Err) - } - return ret.Dir, err -} - -func (d *graphDriverProxy) Put(id string) error { - args := &graphDriverRequest{ID: id} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Exists(id string) bool { - args := &graphDriverRequest{ID: id} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil { - return false - } - return ret.Exists -} - -func (d *graphDriverProxy) Status() [][2]string { - args := &graphDriverRequest{} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil { - return nil - } - return ret.Status -} - -func (d *graphDriverProxy) Metadata(id string) (map[string]string, error) { - args := &graphDriverRequest{ - ID: id, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Metadata", args, &ret); err != nil { - return nil, err - } - if ret.Err != "" { - return nil, errors.New(ret.Err) - } - return ret.Metadata, nil -} - -func (d *graphDriverProxy) Cleanup() error { - args := &graphDriverRequest{} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil { - return nil - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Diff(id, parent string) (archive.Archive, error) { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - body, err := d.client.Stream("GraphDriver.Diff", args) - if err != nil { - return nil, err - } - return archive.Archive(body), nil -} - -func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil { - return nil, err - } - if ret.Err != "" { - return nil, errors.New(ret.Err) - } - - return ret.Changes, nil -} - -func (d *graphDriverProxy) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { - var ret graphDriverResponse - if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { - return -1, err - } - if ret.Err != "" { - return -1, errors.New(ret.Err) - } - return ret.Size, nil -} - -func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil { - return -1, err - } - if ret.Err != "" { - return -1, errors.New(ret.Err) - } - return ret.Size, nil -} diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go new file mode 100644 index 00000000..93e74437 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go @@ -0,0 +1,337 @@ +// +build linux + +// +// projectquota.go - implements XFS project quota controls +// for setting quota limits on a newly created directory. +// It currently supports the legacy XFS specific ioctls. +// +// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR +// for both xfs/ext4 for kernel version >= v4.5 +// + +package quota + +/* +#include +#include +#include +#include +#include + +#ifndef FS_XFLAG_PROJINHERIT +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + unsigned char fsx_pad[12]; +}; +#define FS_XFLAG_PROJINHERIT 0x00000200 +#endif +#ifndef FS_IOC_FSGETXATTR +#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) +#endif +#ifndef FS_IOC_FSSETXATTR +#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) +#endif + +#ifndef PRJQUOTA +#define PRJQUOTA 2 +#endif +#ifndef XFS_PROJ_QUOTA +#define XFS_PROJ_QUOTA 2 +#endif +#ifndef Q_XSETPQLIM +#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) +#endif +#ifndef Q_XGETPQUOTA +#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) +#endif +*/ +import "C" +import ( + "fmt" + "io/ioutil" + "path" + "path/filepath" + "unsafe" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// Quota limit params - currently we only control blocks hard limit +type Quota struct { + Size uint64 +} + +// Control - Context to be used by storage driver (e.g. overlay) +// who wants to apply project quotas to container dirs +type Control struct { + backingFsBlockDev string + nextProjectID uint32 + quotas map[string]uint32 +} + +// NewControl - initialize project quota support. +// Test to make sure that quota can be set on a test dir and find +// the first project id to be used for the next container create. +// +// Returns nil (and error) if project quota is not supported. +// +// First get the project id of the home directory. +// This test will fail if the backing fs is not xfs. +// +// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: +// echo 999:/var/lib/containers/storage/overlay >> /etc/projects +// echo storage:999 >> /etc/projid +// xfs_quota -x -c 'project -s storage' / +// +// In that case, the home directory project id will be used as a "start offset" +// and all containers will be assigned larger project ids (e.g. >= 1000). +// This is a way to prevent xfs_quota management from conflicting with containers/storage. +// +// Then try to create a test directory with the next project id and set a quota +// on it. If that works, continue to scan existing containers to map allocated +// project ids. +// +func NewControl(basePath string) (*Control, error) { + // + // Get project id of parent dir as minimal id to be used by driver + // + minProjectID, err := getProjectID(basePath) + if err != nil { + return nil, err + } + minProjectID++ + + // + // create backing filesystem device node + // + backingFsBlockDev, err := makeBackingFsDev(basePath) + if err != nil { + return nil, err + } + + // + // Test if filesystem supports project quotas by trying to set + // a quota on the first available project id + // + quota := Quota{ + Size: 0, + } + if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil { + return nil, err + } + + q := Control{ + backingFsBlockDev: backingFsBlockDev, + nextProjectID: minProjectID + 1, + quotas: make(map[string]uint32), + } + + // + // get first project id to be used for next container + // + err = q.findNextProjectID(basePath) + if err != nil { + return nil, err + } + + logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) + return &q, nil +} + +// SetQuota - assign a unique project id to directory and set the quota limits +// for that project id +func (q *Control) SetQuota(targetPath string, quota Quota) error { + + projectID, ok := q.quotas[targetPath] + if !ok { + projectID = q.nextProjectID + + // + // assign project id to new container directory + // + err := setProjectID(targetPath, projectID) + if err != nil { + return err + } + + q.quotas[targetPath] = projectID + q.nextProjectID++ + } + + // + // set the quota limit for the container's project id + // + logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID) + return setProjectQuota(q.backingFsBlockDev, projectID, quota) +} + +// setProjectQuota - set the quota for project id on xfs block device +func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error { + var d C.fs_disk_quota_t + d.d_version = C.FS_DQUOT_VERSION + d.d_id = C.__u32(projectID) + d.d_flags = C.XFS_PROJ_QUOTA + + d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT + d.d_blk_hardlimit = C.__u64(quota.Size / 512) + d.d_blk_softlimit = d.d_blk_hardlimit + + var cs = C.CString(backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, + uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v", + projectID, backingFsBlockDev, errno.Error()) + } + + return nil +} + +// GetQuota - get the quota limits of a directory that was configured with SetQuota +func (q *Control) GetQuota(targetPath string, quota *Quota) error { + + projectID, ok := q.quotas[targetPath] + if !ok { + return fmt.Errorf("quota not found for path : %s", targetPath) + } + + // + // get the quota limit for the container's project id + // + var d C.fs_disk_quota_t + + var cs = C.CString(q.backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA, + uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return fmt.Errorf("Failed to get quota limit for projid %d on %s: %v", + projectID, q.backingFsBlockDev, errno.Error()) + } + quota.Size = uint64(d.d_blk_hardlimit) * 512 + + return nil +} + +// getProjectID - get the project id of path on xfs +func getProjectID(targetPath string) (uint32, error) { + dir, err := openDir(targetPath) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + + return uint32(fsx.fsx_projid), nil +} + +// setProjectID - set the project id of path on xfs +func setProjectID(targetPath string, projectID uint32) error { + dir, err := openDir(targetPath) + if err != nil { + return err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + fsx.fsx_projid = C.__u32(projectID) + fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT + _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error()) + } + + return nil +} + +// findNextProjectID - find the next project id to be used for containers +// by scanning driver home directory to find used project ids +func (q *Control) findNextProjectID(home string) error { + files, err := ioutil.ReadDir(home) + if err != nil { + return fmt.Errorf("read directory failed : %s", home) + } + for _, file := range files { + if !file.IsDir() { + continue + } + path := filepath.Join(home, file.Name()) + projid, err := getProjectID(path) + if err != nil { + return err + } + if projid > 0 { + q.quotas[path] = projid + } + if q.nextProjectID <= projid { + q.nextProjectID = projid + 1 + } + } + + return nil +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +// Get the backing block device of the driver home directory +// and create a block device node under the home directory +// to be used by quotactl commands +func makeBackingFsDev(home string) (string, error) { + var stat unix.Stat_t + if err := unix.Stat(home, &stat); err != nil { + return "", err + } + + backingFsBlockDev := path.Join(home, "backingFsBlockDev") + // Re-create just in case someone copied the home directory over to a new device + unix.Unlink(backingFsBlockDev) + if err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)); err != nil { + return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err) + } + + return backingFsBlockDev, nil +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index ff7a88f1..ae62207d 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -4,17 +4,18 @@ import ( "fmt" "os" "path/filepath" + "strings" "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/idtools" - + "github.com/containers/storage/pkg/system" "github.com/opencontainers/selinux/go-selinux/label" ) var ( // CopyWithTar defines the copy method to use. - CopyWithTar = chrootarchive.CopyWithTar + CopyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar ) func init() { @@ -25,16 +26,22 @@ func init() { // This sets the home directory for the driver and returns NaiveDiffDriver. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { d := &Driver{ - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, + homes: []string{home}, + idMappings: idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), } - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { + rootIDs := d.idMappings.RootPair() + if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil { return nil, err } - if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { - return nil, err + for _, option := range options { + if strings.HasPrefix(option, "vfs.imagestore=") { + d.homes = append(d.homes, strings.Split(option[15:], ",")...) + continue + } + if strings.HasPrefix(option, ".imagestore=") { + d.homes = append(d.homes, strings.Split(option[12:], ",")...) + continue + } } return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil } @@ -44,9 +51,8 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap // In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. // Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver type Driver struct { - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap + homes []string + idMappings *idtools.IDMappings } func (d *Driver) String() string { @@ -70,29 +76,26 @@ func (d *Driver) Cleanup() error { // CreateReadWrite creates a layer that is writable for use as a container // file system. -func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { - return d.Create(id, parent, mountLabel, storageOpt) +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) } // Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. -func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { - if len(storageOpt) != 0 { +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil && len(opts.StorageOpt) != 0 { return fmt.Errorf("--storage-opt is not supported for vfs") } dir := d.dir(id) - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { + rootIDs := d.idMappings.RootPair() + if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil { return err } - if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAndChown(dir, 0755, rootIDs); err != nil { return err } - if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil { - return err - } - opts := []string{"level:s0"} - if _, mountLabel, err := label.InitLabels(opts); err == nil { + labelOpts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(labelOpts); err == nil { label.SetFileLabel(dir, mountLabel) } if parent == "" { @@ -102,22 +105,26 @@ func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str if err != nil { return fmt.Errorf("%s: %s", parent, err) } - if err := CopyWithTar(parentDir, dir); err != nil { - return err - } - return nil + return CopyWithTar(parentDir, dir) } func (d *Driver) dir(id string) string { - return filepath.Join(d.home, "dir", filepath.Base(id)) + for i, home := range d.homes { + if i > 0 { + home = filepath.Join(home, d.String()) + } + candidate := filepath.Join(home, "dir", filepath.Base(id)) + fi, err := os.Stat(candidate) + if err == nil && fi.IsDir() { + return candidate + } + } + return filepath.Join(d.homes[0], "dir", filepath.Base(id)) } // Remove deletes the content from the directory for a given id. func (d *Driver) Remove(id string) error { - if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { - return err - } - return nil + return system.EnsureRemoveAll(d.dir(id)) } // Get returns the directory for the given id. @@ -146,6 +153,8 @@ func (d *Driver) Exists(id string) bool { // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { - var imageStores []string - return imageStores + if len(d.homes) > 1 { + return d.homes[1:] + } + return nil } diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go index 7ab36513..abe2ac43 100644 --- a/vendor/github.com/containers/storage/drivers/windows/windows.go +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -6,6 +6,7 @@ import ( "bufio" "bytes" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -16,6 +17,7 @@ import ( "strings" "sync" "syscall" + "time" "unsafe" "github.com/Microsoft/go-winio" @@ -29,17 +31,37 @@ import ( "github.com/containers/storage/pkg/longpath" "github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/system" + units "github.com/docker/go-units" "github.com/sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" + "golang.org/x/sys/windows" ) // filterDriver is an HCSShim driver type for the Windows Filter driver. const filterDriver = 1 +var ( + // mutatedFiles is a list of files that are mutated by the import process + // and must be backed up and restored. + mutatedFiles = map[string]string{ + "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", + } + noreexec = false +) + // init registers the windows graph drivers to the register. func init() { graphdriver.Register("windowsfilter", InitFilter) - reexec.Register("storage-windows-write-layer", writeLayer) + // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes + // debugging issues in the re-exec codepath significantly easier. + if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { + logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.") + noreexec = true + } else { + reexec.Register("docker-windows-write-layer", writeLayerReexec) + } } type checker struct { @@ -60,13 +82,22 @@ type Driver struct { cache map[string]string } -func isTP5OrOlder() bool { - return system.GetOSVersion().Build <= 14300 -} - // InitFilter returns a new Windows storage filter driver. func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) + + fsType, err := getFileSystemType(string(home[0])) + if err != nil { + return nil, err + } + if strings.ToLower(fsType) == "refs" { + return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) + } + + if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil { + return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err) + } + d := &Driver{ info: hcsshim.DriverInfo{ HomeDir: home, @@ -78,6 +109,37 @@ func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) return d, nil } +// win32FromHresult is a helper function to get the win32 error code from an HRESULT +func win32FromHresult(hr uintptr) uintptr { + if hr&0x1fff0000 == 0x00070000 { + return hr & 0xffff + } + return hr +} + +// getFileSystemType obtains the type of a file system through GetVolumeInformation +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx +func getFileSystemType(drive string) (fsType string, hr error) { + var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW") + buf = make([]uint16, 255) + size = windows.MAX_PATH + 1 + ) + if len(drive) != 1 { + hr = errors.New("getFileSystemType must be called with a drive letter") + return + } + drive += `:\` + n := uintptr(unsafe.Pointer(nil)) + r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + fsType = windows.UTF16ToString(buf) + return +} + // String returns the string representation of a driver. This should match // the name the graph driver has been registered with. func (d *Driver) String() string { @@ -91,8 +153,19 @@ func (d *Driver) Status() [][2]string { } } +// panicIfUsedByLcow does exactly what it says. +// TODO @jhowardmsft - this is a temporary measure for the bring-up of +// Linux containers on Windows. It is a failsafe to ensure that the right +// graphdriver is used. +func panicIfUsedByLcow() { + if system.LCOWSupported() { + panic("inconsistency - windowsfilter graphdriver should not be used when in LCOW mode") + } +} + // Exists returns true if the given id is registered with this driver. func (d *Driver) Exists(id string) bool { + panicIfUsedByLcow() rID, err := d.resolveID(id) if err != nil { return false @@ -106,20 +179,24 @@ func (d *Driver) Exists(id string) bool { // CreateReadWrite creates a layer that is writable for use as a container // file system. -func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { - return d.create(id, parent, mountLabel, false, storageOpt) +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + panicIfUsedByLcow() + if opts != nil { + return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt) + } + return d.create(id, parent, "", false, nil) } // Create creates a new read-only layer with the given id. -func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { - return d.create(id, parent, mountLabel, true, storageOpt) +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + panicIfUsedByLcow() + if opts != nil { + return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt) + } + return d.create(id, parent, "", true, nil) } func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error { - if len(storageOpt) != 0 { - return fmt.Errorf("--storage-opt is not supported for windows") - } - rPId, err := d.resolveID(parent) if err != nil { return err @@ -133,7 +210,7 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt var layerChain []string if rPId != "" { - parentPath, err := hcsshim.LayerMountPath(d.info, rPId) + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) if err != nil { return err } @@ -156,32 +233,20 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt parentPath = layerChain[0] } - if isTP5OrOlder() { - // Pre-create the layer directory, providing an ACL to give the Hyper-V Virtual Machines - // group access. This is necessary to ensure that Hyper-V containers can access the - // virtual machine data. This is not necessary post-TP5. - path, err := syscall.UTF16FromString(filepath.Join(d.info.HomeDir, id)) - if err != nil { - return err - } - // Give system and administrators full control, and VMs read, write, and execute. - // Mark these ACEs as inherited. - sd, err := winio.SddlToSecurityDescriptor("D:(A;OICI;FA;;;SY)(A;OICI;FA;;;BA)(A;OICI;FRFWFX;;;S-1-5-83-0)") - if err != nil { - return err - } - err = syscall.CreateDirectory(&path[0], &syscall.SecurityAttributes{ - Length: uint32(unsafe.Sizeof(syscall.SecurityAttributes{})), - SecurityDescriptor: uintptr(unsafe.Pointer(&sd[0])), - }) - if err != nil { - return err - } - } - if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { return err } + + storageOptions, err := parseStorageOpt(storageOpt) + if err != nil { + return fmt.Errorf("Failed to parse storage options - %s", err) + } + + if storageOptions.size != 0 { + if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil { + return err + } + } } if _, err := os.Lstat(d.dir(parent)); err != nil { @@ -208,16 +273,89 @@ func (d *Driver) dir(id string) string { // Remove unmounts and removes the dir information. func (d *Driver) Remove(id string) error { + panicIfUsedByLcow() rID, err := d.resolveID(id) if err != nil { return err } - os.RemoveAll(filepath.Join(d.info.HomeDir, "sysfile-backups", rID)) // ok to fail - return hcsshim.DestroyLayer(d.info, rID) + + // This retry loop is due to a bug in Windows (Internal bug #9432268) + // if GetContainers fails with ErrVmcomputeOperationInvalidState + // it is a transient error. Retry until it succeeds. + var computeSystems []hcsshim.ContainerProperties + retryCount := 0 + osv := system.GetOSVersion() + for { + // Get and terminate any template VMs that are currently using the layer. + // Note: It is unfortunate that we end up in the graphdrivers Remove() call + // for both containers and images, but the logic for template VMs is only + // needed for images - specifically we are looking to see if a base layer + // is in use by a template VM as a result of having started a Hyper-V + // container at some point. + // + // We have a retry loop for ErrVmcomputeOperationInvalidState and + // ErrVmcomputeOperationAccessIsDenied as there is a race condition + // in RS1 and RS2 building during enumeration when a silo is going away + // for example under it, in HCS. AccessIsDenied added to fix 30278. + // + // TODO @jhowardmsft - For RS3, we can remove the retries. Also consider + // using platform APIs (if available) to get this more succinctly. Also + // consider enhancing the Remove() interface to have context of why + // the remove is being called - that could improve efficiency by not + // enumerating compute systems during a remove of a container as it's + // not required. + computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{}) + if err != nil { + if (osv.Build < 15139) && + ((err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied)) { + if retryCount >= 500 { + break + } + retryCount++ + time.Sleep(10 * time.Millisecond) + continue + } + return err + } + break + } + + for _, computeSystem := range computeSystems { + if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate { + container, err := hcsshim.OpenContainer(computeSystem.ID) + if err != nil { + return err + } + defer container.Close() + err = container.Terminate() + if hcsshim.IsPending(err) { + err = container.Wait() + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + return err + } + } + } + + layerPath := filepath.Join(d.info.HomeDir, rID) + tmpID := fmt.Sprintf("%s-removing", rID) + tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID) + if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { + return err + } + if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil { + logrus.Errorf("Failed to DestroyLayer %s: %s", id, err) + } + + return nil } -// Get returns the rootfs path for the id. This will mount the dir at it's given path. +// Get returns the rootfs path for the id. This will mount the dir at its given path. func (d *Driver) Get(id, mountLabel string) (string, error) { + panicIfUsedByLcow() logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) var dir string @@ -248,9 +386,12 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { return "", err } - mountPath, err := hcsshim.LayerMountPath(d.info, rID) + mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) if err != nil { d.ctr.Decrement(rID) + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + logrus.Warnf("Failed to Unprepare %s: %s", id, err) + } if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { logrus.Warnf("Failed to Deactivate %s: %s", id, err) } @@ -273,6 +414,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { // Put adds a new layer to the driver. func (d *Driver) Put(id string) error { + panicIfUsedByLcow() logrus.Debugf("WindowsGraphDriver Put() id %s", id) rID, err := d.resolveID(id) @@ -283,9 +425,15 @@ func (d *Driver) Put(id string) error { return nil } d.cacheMu.Lock() + _, exists := d.cache[rID] delete(d.cache, rID) d.cacheMu.Unlock() + // If the cache was not populated, then the layer was left unprepared and deactivated + if !exists { + return nil + } + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { return err } @@ -293,14 +441,39 @@ func (d *Driver) Put(id string) error { } // Cleanup ensures the information the driver stores is properly removed. +// We use this opportunity to cleanup any -removing folders which may be +// still left if the daemon was killed while it was removing a layer. func (d *Driver) Cleanup() error { + items, err := ioutil.ReadDir(d.info.HomeDir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + // Note we don't return an error below - it's possible the files + // are locked. However, next time around after the daemon exits, + // we likely will be able to to cleanup successfully. Instead we log + // warnings if there are errors. + for _, item := range items { + if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { + if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil { + logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err) + } else { + logrus.Infof("Cleaned up %s", item.Name()) + } + } + } + return nil } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". // The layer should be mounted when calling this function -func (d *Driver) Diff(id, parent string) (_ archive.Archive, err error) { +func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) { + panicIfUsedByLcow() rID, err := d.resolveID(id) if err != nil { return @@ -335,8 +508,9 @@ func (d *Driver) Diff(id, parent string) (_ archive.Archive, err error) { // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. -// The layer should be mounted when calling this function +// The layer should not be mounted when calling this function. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + panicIfUsedByLcow() rID, err := d.resolveID(id) if err != nil { return nil, err @@ -346,13 +520,12 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return nil, err } - // this is assuming that the layer is unmounted - if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { return nil, err } defer func() { - if err := hcsshim.PrepareLayer(d.info, rID, parentChain); err != nil { - logrus.Warnf("Failed to Deactivate %s: %s", rID, err) + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2) } }() @@ -392,7 +565,8 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { // layer with the specified id and parent, returning the size of the // new layer in bytes. // The layer should not be mounted when calling this function -func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { +func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + panicIfUsedByLcow() var layerChain []string if parent != "" { rPId, err := d.resolveID(parent) @@ -403,7 +577,7 @@ func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (int64, error if err != nil { return 0, err } - parentPath, err := hcsshim.LayerMountPath(d.info, rPId) + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) if err != nil { return 0, err } @@ -427,6 +601,7 @@ func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (int64, error // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + panicIfUsedByLcow() rPId, err := d.resolveID(parent) if err != nil { return @@ -448,6 +623,7 @@ func (d *Driver) DiffSize(id, parent string) (size int64, err error) { // Metadata returns custom driver information. func (d *Driver) Metadata(id string) (map[string]string, error) { + panicIfUsedByLcow() m := make(map[string]string) m["dir"] = d.dir(id) return m, nil @@ -483,7 +659,7 @@ func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error { } // exportLayer generates an archive from a layer based on the given ID. -func (d *Driver) exportLayer(id string, parentLayerPaths []string) (archive.Archive, error) { +func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) { archive, w := io.Pipe() go func() { err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { @@ -505,7 +681,48 @@ func (d *Driver) exportLayer(id string, parentLayerPaths []string) (archive.Arch return archive, nil } -func writeLayerFromTar(r archive.Reader, w hcsshim.LayerWriter) (int64, error) { +// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and +// writes it to a backup stream, and also saves any files that will be mutated +// by the import layer process to a backup location. +func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { + var bcdBackup *os.File + var bcdBackupWriter *winio.BackupFileWriter + if backupPath, ok := mutatedFiles[hdr.Name]; ok { + bcdBackup, err = os.Create(filepath.Join(root, backupPath)) + if err != nil { + return nil, err + } + defer func() { + cerr := bcdBackup.Close() + if err == nil { + err = cerr + } + }() + + bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) + defer func() { + cerr := bcdBackupWriter.Close() + if err == nil { + err = cerr + } + }() + + buf.Reset(io.MultiWriter(w, bcdBackupWriter)) + } else { + buf.Reset(w) + } + + defer func() { + ferr := buf.Flush() + if err == nil { + err = ferr + } + }() + + return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) +} + +func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { t := tar.NewReader(r) hdr, err := t.Next() totalSize := int64(0) @@ -539,30 +756,7 @@ func writeLayerFromTar(r archive.Reader, w hcsshim.LayerWriter) (int64, error) { if err != nil { return 0, err } - buf.Reset(w) - - // Add the Hyper-V Virtual Machine group ACE to the security descriptor - // for TP5 so that Xenons can access all files. This is not necessary - // for post-TP5 builds. - if isTP5OrOlder() { - if sddl, ok := hdr.Winheaders["sd"]; ok { - var ace string - if hdr.Typeflag == tar.TypeDir { - ace = "(A;OICI;0x1200a9;;;S-1-5-83-0)" - } else { - ace = "(A;;0x1200a9;;;S-1-5-83-0)" - } - if hdr.Winheaders["sd"], ok = addAceToSddlDacl(sddl, ace); !ok { - logrus.Debugf("failed to add VM ACE to %s", sddl) - } - } - } - - hdr, err = backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) - ferr := buf.Flush() - if ferr != nil { - err = ferr - } + hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) totalSize += size } } @@ -572,105 +766,75 @@ func writeLayerFromTar(r archive.Reader, w hcsshim.LayerWriter) (int64, error) { return totalSize, nil } -func addAceToSddlDacl(sddl, ace string) (string, bool) { - daclStart := strings.Index(sddl, "D:") - if daclStart < 0 { - return sddl, false - } - - dacl := sddl[daclStart:] - daclEnd := strings.Index(dacl, "S:") - if daclEnd < 0 { - daclEnd = len(dacl) - } - dacl = dacl[:daclEnd] - - if strings.Contains(dacl, ace) { - return sddl, true - } - - i := 2 - for i+1 < len(dacl) { - if dacl[i] != '(' { - return sddl, false - } - - if dacl[i+1] == 'A' { - break - } - - i += 2 - for p := 1; i < len(dacl) && p > 0; i++ { - if dacl[i] == '(' { - p++ - } else if dacl[i] == ')' { - p-- - } - } - } - - return sddl[:daclStart+i] + ace + sddl[daclStart+i:], true -} - // importLayer adds a new layer to the tag and graph store based on the given data. -func (d *Driver) importLayer(id string, layerData archive.Reader, parentLayerPaths []string) (size int64, err error) { - cmd := reexec.Command(append([]string{"storage-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) - output := bytes.NewBuffer(nil) - cmd.Stdin = layerData - cmd.Stdout = output - cmd.Stderr = output +func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) { + if !noreexec { + cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) + output := bytes.NewBuffer(nil) + cmd.Stdin = layerData + cmd.Stdout = output + cmd.Stderr = output - if err = cmd.Start(); err != nil { - return + if err = cmd.Start(); err != nil { + return + } + + if err = cmd.Wait(); err != nil { + return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) + } + + return strconv.ParseInt(output.String(), 10, 64) } - - if err = cmd.Wait(); err != nil { - return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) - } - - return strconv.ParseInt(output.String(), 10, 64) + return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...) } -// writeLayer is the re-exec entry point for writing a layer from a tar file -func writeLayer() { - home := os.Args[1] - id := os.Args[2] - parentLayerPaths := os.Args[3:] - - err := func() error { - err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) - if err != nil { - return err - } - - info := hcsshim.DriverInfo{ - Flavour: filterDriver, - HomeDir: home, - } - - w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) - if err != nil { - return err - } - - size, err := writeLayerFromTar(os.Stdin, w) - if err != nil { - return err - } - - err = w.Close() - if err != nil { - return err - } - - fmt.Fprint(os.Stdout, size) - return nil - }() - +// writeLayerReexec is the re-exec entry point for writing a layer from a tar file +func writeLayerReexec() { + size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...) if err != nil { fmt.Fprint(os.Stderr, err) os.Exit(1) } + fmt.Fprint(os.Stdout, size) +} + +// writeLayer writes a layer from a tar file. +func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (int64, error) { + err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) + if err != nil { + return 0, err + } + if noreexec { + defer func() { + if err := winio.DisableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { + // This should never happen, but just in case when in debugging mode. + // See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale. + panic("Failed to disabled process privileges while in non re-exec mode") + } + }() + } + + info := hcsshim.DriverInfo{ + Flavour: filterDriver, + HomeDir: home, + } + + w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) + if err != nil { + return 0, err + } + + size, err := writeLayerFromTar(layerData, w, filepath.Join(home, id)) + if err != nil { + return 0, err + } + + err = w.Close() + if err != nil { + return 0, err + } + + return size, nil } // resolveID computes the layerID information based on the given id. @@ -686,11 +850,7 @@ func (d *Driver) resolveID(id string) (string, error) { // setID stores the layerId in disk. func (d *Driver) setID(id, altID string) error { - err := ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) - if err != nil { - return err - } - return nil + return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) } // getLayerChain returns the layer chain information. @@ -733,17 +893,23 @@ type fileGetCloserWithBackupPrivileges struct { } func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) { + if backupPath, ok := mutatedFiles[filename]; ok { + return os.Open(filepath.Join(fg.path, backupPath)) + } + var f *os.File // Open the file while holding the Windows backup privilege. This ensures that the // file can be opened even if the caller does not actually have access to it according - // to the security descriptor. + // to the security descriptor. Also use sequential file access to avoid depleting the + // standby list - Microsoft VSO Bug Tracker #9900466 err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { path := longpath.AddPrefix(filepath.Join(fg.path, filename)) - p, err := syscall.UTF16FromString(path) + p, err := windows.UTF16FromString(path) if err != nil { return err } - h, err := syscall.CreateFile(&p[0], syscall.GENERIC_READ, syscall.FILE_SHARE_READ, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, err := windows.CreateFile(&p[0], windows.GENERIC_READ, windows.FILE_SHARE_READ, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0) if err != nil { return &os.PathError{Op: "open", Path: path, Err: err} } @@ -757,19 +923,10 @@ func (fg *fileGetCloserWithBackupPrivileges) Close() error { return nil } -type fileGetDestroyCloser struct { - storage.FileGetter - path string -} - -func (f *fileGetDestroyCloser) Close() error { - // TODO: activate layers and release here? - return os.RemoveAll(f.path) -} - // DiffGetter returns a FileGetCloser that can read files from the directory that // contains files for the layer differences. Used for direct access for tar-split. func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + panicIfUsedByLcow() id, err := d.resolveID(id) if err != nil { return nil, err @@ -777,3 +934,32 @@ func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil } + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return nil +} + +type storageOptions struct { + size uint64 +} + +func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { + options := storageOptions{} + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + options.size = uint64(size) + default: + return nil, fmt.Errorf("Unknown storage option: %s", key) + } + } + return &options, nil +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go index c9860ec2..8c8e7d67 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -10,7 +10,6 @@ import ( "strconv" "strings" "sync" - "syscall" "time" "github.com/containers/storage/drivers" @@ -21,6 +20,7 @@ import ( "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) type zfsOptions struct { @@ -100,6 +100,14 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, fmt.Errorf("Failed to get root uid/guid: %v", err) + } + if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil { + return nil, fmt.Errorf("Failed to create '%s': %v", base, err) + } + if err := mount.MakePrivate(base); err != nil { return nil, err } @@ -134,8 +142,8 @@ func parseOptions(opt []string) (zfsOptions, error) { } func lookupZfsDataset(rootdir string) (string, error) { - var stat syscall.Stat_t - if err := syscall.Stat(rootdir, &stat); err != nil { + var stat unix.Stat_t + if err := unix.Stat(rootdir, &stat); err != nil { return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) } wantedDev := stat.Dev @@ -145,7 +153,7 @@ func lookupZfsDataset(rootdir string) (string, error) { return "", err } for _, m := range mounts { - if err := syscall.Stat(m.Mountpoint, &stat); err != nil { + if err := unix.Stat(m.Mountpoint, &stat); err != nil { logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) continue // may fail on fuse file systems } @@ -213,7 +221,10 @@ func (d *Driver) Status() [][2]string { // Metadata returns image/container metadata related to graph driver func (d *Driver) Metadata(id string) (map[string]string, error) { - return nil, nil + return map[string]string{ + "Mountpoint": d.mountPath(id), + "Dataset": d.zfsPath(id), + }, nil } func (d *Driver) cloneFilesystem(name, parentName string) error { @@ -248,12 +259,17 @@ func (d *Driver) mountPath(id string) string { // CreateReadWrite creates a layer that is writable for use as a container // file system. -func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { - return d.Create(id, parent, mountLabel, storageOpt) +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) } // Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. -func (d *Driver) Create(id string, parent string, mountLabel string, storageOpt map[string]string) error { +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + err := d.create(id, parent, storageOpt) if err == nil { return nil @@ -391,22 +407,20 @@ func (d *Driver) Put(id string) error { logrus.Debugf(`[zfs] unmount("%s")`, mountpoint) - err = mount.Unmount(mountpoint) - if err != nil { + if err := mount.Unmount(mountpoint); err != nil { return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) } - return err + return nil } // Exists checks to see if the cache entry exists for the given id. func (d *Driver) Exists(id string) bool { d.Lock() defer d.Unlock() - return d.filesystemsCache[d.zfsPath(id)] == true + return d.filesystemsCache[d.zfsPath(id)] } // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { - var imageStores []string - return imageStores + return nil } diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go index ade71b15..69c0448d 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go @@ -3,16 +3,16 @@ package zfs import ( "fmt" "strings" - "syscall" "github.com/containers/storage/drivers" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func checkRootdirFs(rootdir string) error { - var buf syscall.Statfs_t - if err := syscall.Statfs(rootdir, &buf); err != nil { + var buf unix.Statfs_t + if err := unix.Statfs(rootdir, &buf); err != nil { return fmt.Errorf("Failed to access '%s': %s", rootdir, err) } diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go index 92b38756..da298047 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go @@ -2,16 +2,16 @@ package zfs import ( "fmt" - "syscall" "github.com/containers/storage/drivers" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func checkRootdirFs(rootdir string) error { - var buf syscall.Statfs_t - if err := syscall.Statfs(rootdir, &buf); err != nil { + var buf unix.Statfs_t + if err := unix.Statfs(rootdir, &buf); err != nil { return fmt.Errorf("Failed to access '%s': %s", rootdir, err) } diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go index ca595638..2383bf3b 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go @@ -22,24 +22,23 @@ import ( "github.com/containers/storage/drivers" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus" ) func checkRootdirFs(rootdir string) error { cs := C.CString(filepath.Dir(rootdir)) + defer C.free(unsafe.Pointer(cs)) buf := C.getstatfs(cs) + defer C.free(unsafe.Pointer(buf)) // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || (buf.f_basetype[3] != 0) { - log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) - C.free(unsafe.Pointer(buf)) + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir) } - C.free(unsafe.Pointer(buf)) - C.free(unsafe.Pointer(cs)) return nil } diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go new file mode 100644 index 00000000..bed6f8cd --- /dev/null +++ b/vendor/github.com/containers/storage/errors.go @@ -0,0 +1,56 @@ +package storage + +import ( + "errors" +) + +var ( + // ErrContainerUnknown indicates that there was no container with the specified name or ID. + ErrContainerUnknown = errors.New("container not known") + // ErrImageUnknown indicates that there was no image with the specified name or ID. + ErrImageUnknown = errors.New("image not known") + // ErrParentUnknown indicates that we didn't record the ID of the parent of the specified layer. + ErrParentUnknown = errors.New("parent of layer not known") + // ErrLayerUnknown indicates that there was no layer with the specified name or ID. + ErrLayerUnknown = errors.New("layer not known") + // ErrLoadError indicates that there was an initialization error. + ErrLoadError = errors.New("error loading storage metadata") + // ErrDuplicateID indicates that an ID which is to be assigned to a new item is already being used. + ErrDuplicateID = errors.New("that ID is already in use") + // ErrDuplicateName indicates that a name which is to be assigned to a new item is already being used. + ErrDuplicateName = errors.New("that name is already in use") + // ErrParentIsContainer is returned when a caller attempts to create a layer as a child of a container's layer. + ErrParentIsContainer = errors.New("would-be parent layer is a container") + // ErrNotAContainer is returned when the caller attempts to delete a container that isn't a container. + ErrNotAContainer = errors.New("identifier is not a container") + // ErrNotAnImage is returned when the caller attempts to delete an image that isn't an image. + ErrNotAnImage = errors.New("identifier is not an image") + // ErrNotALayer is returned when the caller attempts to delete a layer that isn't a layer. + ErrNotALayer = errors.New("identifier is not a layer") + // ErrNotAnID is returned when the caller attempts to read or write metadata from an item that doesn't exist. + ErrNotAnID = errors.New("identifier is not a layer, image, or container") + // ErrLayerHasChildren is returned when the caller attempts to delete a layer that has children. + ErrLayerHasChildren = errors.New("layer has children") + // ErrLayerUsedByImage is returned when the caller attempts to delete a layer that is an image's top layer. + ErrLayerUsedByImage = errors.New("layer is in use by an image") + // ErrLayerUsedByContainer is returned when the caller attempts to delete a layer that is a container's layer. + ErrLayerUsedByContainer = errors.New("layer is in use by a container") + // ErrImageUsedByContainer is returned when the caller attempts to delete an image that is a container's image. + ErrImageUsedByContainer = errors.New("image is in use by a container") + // ErrIncompleteOptions is returned when the caller attempts to initialize a Store without providing required information. + ErrIncompleteOptions = errors.New("missing necessary StoreOptions") + // ErrSizeUnknown is returned when the caller asks for the size of a big data item, but the Store couldn't determine the answer. + ErrSizeUnknown = errors.New("size is not known") + // ErrStoreIsReadOnly is returned when the caller makes a call to a read-only store that would require modifying its contents. + ErrStoreIsReadOnly = errors.New("called a write method on a read-only store") + // ErrLockReadOnly indicates that the caller only took a read-only lock, and is not allowed to write. + ErrLockReadOnly = errors.New("lock is not a read-write lock") + // ErrDuplicateImageNames indicates that the read-only store uses the same name for multiple images. + ErrDuplicateImageNames = errors.New("read-only image store assigns the same name to multiple images") + // ErrDuplicateLayerNames indicates that the read-only store uses the same name for multiple layers. + ErrDuplicateLayerNames = errors.New("read-only layer store assigns the same name to multiple layers") + // ErrInvalidBigDataName indicates that the name for a big data item is not acceptable; it may be empty. + ErrInvalidBigDataName = errors.New("not a valid name for a big data item") + // ErrDigestUnknown indicates that we were unable to compute the digest of a specified item. + ErrDigestUnknown = errors.New("could not compute digest of item") +) diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index fe17f631..962e1bb7 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -10,12 +10,15 @@ import ( "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/truncindex" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) -var ( - // ErrImageUnknown indicates that there was no image with the specified name or ID - ErrImageUnknown = errors.New("image not known") +const ( + // ImageDigestBigDataKey is the name of the big data item whose + // contents we consider useful for computing a "digest" of the + // image, by which we can locate the image later. + ImageDigestBigDataKey = "manifest" ) // An Image is a reference to a layer and an associated metadata string. @@ -24,14 +27,18 @@ type Image struct { // value which was generated by the library. ID string `json:"id"` + // Digest is a digest value that we can use to locate the image. + Digest digest.Digest `json:"digest,omitempty"` + // Names is an optional set of user-defined convenience values. The // image can be referred to by its ID or any of its names. Names are // unique among images. Names []string `json:"names,omitempty"` - // TopLayer is the ID of the topmost layer of the image itself. - // Multiple images can refer to the same top layer. - TopLayer string `json:"layer"` + // TopLayer is the ID of the topmost layer of the image itself, if the + // image contains one or more layers. Multiple images can refer to the + // same top layer. + TopLayer string `json:"layer,omitempty"` // Metadata is data we keep for the convenience of the caller. It is not // expected to be large, since it is kept in memory. @@ -46,6 +53,10 @@ type Image struct { // that has been stored, if they're known. BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` + // BigDataDigests maps the names in BigDataNames to the digests of the + // data that has been stored, if they're known. + BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"` + // Created is the datestamp for when this image was created. Older // versions of the library did not track this information, so callers // will likely want to use the IsZero() method to verify that a value @@ -73,6 +84,10 @@ type ROImageStore interface { // Images returns a slice enumerating the known images. Images() ([]Image, error) + + // Images returns a slice enumerating the images which have a big data + // item with the name ImageDigestBigDataKey and the specified digest. + ByDigest(d digest.Digest) ([]*Image, error) } // ImageStore provides bookkeeping for information about Images. @@ -86,7 +101,7 @@ type ImageStore interface { // Create creates an image that has a specified ID (or a random one) and // optional names, using the specified layer as its topmost (hopefully // read-only) layer. That layer can be referenced by multiple images. - Create(id string, names []string, layer, metadata string, created time.Time) (*Image, error) + Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error) // SetNames replaces the list of names associated with an image with the // supplied values. @@ -106,6 +121,7 @@ type imageStore struct { idindex *truncindex.TruncIndex byid map[string]*Image byname map[string]*Image + bydigest map[digest.Digest][]*Image } func (r *imageStore) Images() ([]Image, error) { @@ -139,7 +155,9 @@ func (r *imageStore) Load() error { idlist := []string{} ids := make(map[string]*Image) names := make(map[string]*Image) + digests := make(map[digest.Digest][]*Image) if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil { + idlist = make([]string, 0, len(images)) for n, image := range images { ids[image.ID] = images[n] idlist = append(idlist, image.ID) @@ -150,15 +168,26 @@ func (r *imageStore) Load() error { } names[name] = images[n] } + // Implicit digest + if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok { + digests[digest] = append(digests[digest], images[n]) + } + // Explicit digest + if image.Digest == "" { + image.Digest = image.BigDataDigests[ImageDigestBigDataKey] + } else if image.Digest != image.BigDataDigests[ImageDigestBigDataKey] { + digests[image.Digest] = append(digests[image.Digest], images[n]) + } } } if shouldSave && !r.IsReadWrite() { - return errors.New("image store assigns the same name to multiple images") + return ErrDuplicateImageNames } r.images = images r.idindex = truncindex.NewTruncIndex(idlist) r.byid = ids r.byname = names + r.bydigest = digests if shouldSave { return r.Save() } @@ -197,6 +226,7 @@ func newImageStore(dir string) (ImageStore, error) { images: []*Image{}, byid: make(map[string]*Image), byname: make(map[string]*Image), + bydigest: make(map[digest.Digest][]*Image), } if err := istore.Load(); err != nil { return nil, err @@ -217,6 +247,7 @@ func newROImageStore(dir string) (ROImageStore, error) { images: []*Image{}, byid: make(map[string]*Image), byname: make(map[string]*Image), + bydigest: make(map[digest.Digest][]*Image), } if err := istore.Load(); err != nil { return nil, err @@ -256,11 +287,14 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { if !ok { return ErrImageUnknown } + if image.Flags == nil { + image.Flags = make(map[string]interface{}) + } image.Flags[flag] = value return r.Save() } -func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time) (image *Image, err error) { +func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) { if !r.IsReadWrite() { return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath()) } @@ -275,6 +309,7 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c if _, idInUse := r.byid[id]; idInUse { return nil, ErrDuplicateID } + names = dedupeNames(names) for _, name := range names { if _, nameInUse := r.byname[name]; nameInUse { return nil, ErrDuplicateName @@ -285,18 +320,24 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c } if err == nil { image = &Image{ - ID: id, - Names: names, - TopLayer: layer, - Metadata: metadata, - BigDataNames: []string{}, - BigDataSizes: make(map[string]int64), - Created: created, - Flags: make(map[string]interface{}), + ID: id, + Digest: searchableDigest, + Names: names, + TopLayer: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: created, + Flags: make(map[string]interface{}), } r.images = append(r.images, image) r.idindex.Add(id) r.byid[id] = image + if searchableDigest != "" { + list := r.bydigest[searchableDigest] + r.bydigest[searchableDigest] = append(list, image) + } for _, name := range names { r.byname[name] = image } @@ -331,6 +372,7 @@ func (r *imageStore) SetNames(id string, names []string) error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath()) } + names = dedupeNames(names) if image, ok := r.lookup(id); ok { for _, name := range image.Names { delete(r.byname, name) @@ -375,6 +417,28 @@ func (r *imageStore) Delete(id string) error { r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...) } } + if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok { + // remove the image from the digest-based index + if list, ok := r.bydigest[digest]; ok { + prunedList := imageSliceWithoutValue(list, image) + if len(prunedList) == 0 { + delete(r.bydigest, digest) + } else { + r.bydigest[digest] = prunedList + } + } + } + if image.Digest != "" { + // remove the image's hard-coded digest from the digest-based index + if list, ok := r.bydigest[image.Digest]; ok { + prunedList := imageSliceWithoutValue(list, image) + if len(prunedList) == 0 { + delete(r.bydigest, image.Digest) + } else { + r.bydigest[image.Digest] = prunedList + } + } + } if err := r.Save(); err != nil { return err } @@ -403,7 +467,17 @@ func (r *imageStore) Exists(id string) bool { return ok } +func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) { + if images, ok := r.bydigest[d]; ok { + return images, nil + } + return nil, ErrImageUnknown +} + func (r *imageStore) BigData(id, key string) ([]byte, error) { + if key == "" { + return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name") + } image, ok := r.lookup(id) if !ok { return nil, ErrImageUnknown @@ -412,16 +486,61 @@ func (r *imageStore) BigData(id, key string) ([]byte, error) { } func (r *imageStore) BigDataSize(id, key string) (int64, error) { + if key == "" { + return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of image big data with empty name") + } image, ok := r.lookup(id) if !ok { return -1, ErrImageUnknown } + if image.BigDataSizes == nil { + image.BigDataSizes = make(map[string]int64) + } if size, ok := image.BigDataSizes[key]; ok { return size, nil } + if data, err := r.BigData(id, key); err == nil && data != nil { + if r.SetBigData(id, key, data) == nil { + image, ok := r.lookup(id) + if !ok { + return -1, ErrImageUnknown + } + if size, ok := image.BigDataSizes[key]; ok { + return size, nil + } + } + } return -1, ErrSizeUnknown } +func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) { + if key == "" { + return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of image big data value with empty name") + } + image, ok := r.lookup(id) + if !ok { + return "", ErrImageUnknown + } + if image.BigDataDigests == nil { + image.BigDataDigests = make(map[string]digest.Digest) + } + if d, ok := image.BigDataDigests[key]; ok { + return d, nil + } + if data, err := r.BigData(id, key); err == nil && data != nil { + if r.SetBigData(id, key, data) == nil { + image, ok := r.lookup(id) + if !ok { + return "", ErrImageUnknown + } + if d, ok := image.BigDataDigests[key]; ok { + return d, nil + } + } + } + return "", ErrDigestUnknown +} + func (r *imageStore) BigDataNames(id string) ([]string, error) { image, ok := r.lookup(id) if !ok { @@ -430,7 +549,21 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) { return image.BigDataNames, nil } +func imageSliceWithoutValue(slice []*Image, value *Image) []*Image { + modified := make([]*Image, 0, len(slice)) + for _, v := range slice { + if v == value { + continue + } + modified = append(modified, v) + } + return modified +} + func (r *imageStore) SetBigData(id, key string, data []byte) error { + if key == "" { + return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item") + } if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath()) } @@ -443,23 +576,55 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error { } err := ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600) if err == nil { - add := true save := false - oldSize, ok := image.BigDataSizes[key] + if image.BigDataSizes == nil { + image.BigDataSizes = make(map[string]int64) + } + oldSize, sizeOk := image.BigDataSizes[key] image.BigDataSizes[key] = int64(len(data)) - if !ok || oldSize != image.BigDataSizes[key] { + if image.BigDataDigests == nil { + image.BigDataDigests = make(map[string]digest.Digest) + } + oldDigest, digestOk := image.BigDataDigests[key] + newDigest := digest.Canonical.FromBytes(data) + image.BigDataDigests[key] = newDigest + if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest { save = true } + addName := true for _, name := range image.BigDataNames { if name == key { - add = false + addName = false break } } - if add { + if addName { image.BigDataNames = append(image.BigDataNames, key) save = true } + if key == ImageDigestBigDataKey { + if oldDigest != "" && oldDigest != newDigest && oldDigest != image.Digest { + // remove the image from the list of images in the digest-based + // index which corresponds to the old digest for this item, unless + // it's also the hard-coded digest + if list, ok := r.bydigest[oldDigest]; ok { + prunedList := imageSliceWithoutValue(list, image) + if len(prunedList) == 0 { + delete(r.bydigest, oldDigest) + } else { + r.bydigest[oldDigest] = prunedList + } + } + } + // add the image to the list of images in the digest-based index which + // corresponds to the new digest for this item, unless it's already there + list := r.bydigest[newDigest] + if len(list) == len(imageSliceWithoutValue(list, image)) { + // the list isn't shortened by trying to prune this image from it, + // so it's not in there yet + r.bydigest[newDigest] = append(list, image) + } + } if save { err = r.Save() } @@ -471,7 +636,7 @@ func (r *imageStore) Wipe() error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath()) } - ids := []string{} + ids := make([]string, 0, len(r.byid)) for id := range r.byid { ids = append(ids, id) } diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go new file mode 100644 index 00000000..f6a8b065 --- /dev/null +++ b/vendor/github.com/containers/storage/images_ffjson.go @@ -0,0 +1,1202 @@ +// Code generated by ffjson . DO NOT EDIT. +// source: images.go + +package storage + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/opencontainers/go-digest" + fflib "github.com/pquerna/ffjson/fflib/v1" +) + +// MarshalJSON marshal bytes to json - template +func (j *Image) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "id":`) + fflib.WriteJsonString(buf, string(j.ID)) + buf.WriteByte(',') + if len(j.Digest) != 0 { + buf.WriteString(`"digest":`) + fflib.WriteJsonString(buf, string(j.Digest)) + buf.WriteByte(',') + } + if len(j.Names) != 0 { + buf.WriteString(`"names":`) + if j.Names != nil { + buf.WriteString(`[`) + for i, v := range j.Names { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if len(j.TopLayer) != 0 { + buf.WriteString(`"layer":`) + fflib.WriteJsonString(buf, string(j.TopLayer)) + buf.WriteByte(',') + } + if len(j.Metadata) != 0 { + buf.WriteString(`"metadata":`) + fflib.WriteJsonString(buf, string(j.Metadata)) + buf.WriteByte(',') + } + if len(j.BigDataNames) != 0 { + buf.WriteString(`"big-data-names":`) + if j.BigDataNames != nil { + buf.WriteString(`[`) + for i, v := range j.BigDataNames { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if len(j.BigDataSizes) != 0 { + if j.BigDataSizes == nil { + buf.WriteString(`"big-data-sizes":null`) + } else { + buf.WriteString(`"big-data-sizes":{ `) + for key, value := range j.BigDataSizes { + fflib.WriteJsonString(buf, key) + buf.WriteString(`:`) + fflib.FormatBits2(buf, uint64(value), 10, value < 0) + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + } + buf.WriteByte(',') + } + if len(j.BigDataDigests) != 0 { + if j.BigDataDigests == nil { + buf.WriteString(`"big-data-digests":null`) + } else { + buf.WriteString(`"big-data-digests":{ `) + for key, value := range j.BigDataDigests { + fflib.WriteJsonString(buf, key) + buf.WriteString(`:`) + fflib.WriteJsonString(buf, string(value)) + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + } + buf.WriteByte(',') + } + if true { + buf.WriteString(`"created":`) + + { + + obj, err = j.Created.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + if len(j.Flags) != 0 { + buf.WriteString(`"flags":`) + /* Falling back. type=map[string]interface {} kind=map */ + err = buf.Encode(j.Flags) + if err != nil { + return err + } + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffjtImagebase = iota + ffjtImagenosuchkey + + ffjtImageID + + ffjtImageDigest + + ffjtImageNames + + ffjtImageTopLayer + + ffjtImageMetadata + + ffjtImageBigDataNames + + ffjtImageBigDataSizes + + ffjtImageBigDataDigests + + ffjtImageCreated + + ffjtImageFlags +) + +var ffjKeyImageID = []byte("id") + +var ffjKeyImageDigest = []byte("digest") + +var ffjKeyImageNames = []byte("names") + +var ffjKeyImageTopLayer = []byte("layer") + +var ffjKeyImageMetadata = []byte("metadata") + +var ffjKeyImageBigDataNames = []byte("big-data-names") + +var ffjKeyImageBigDataSizes = []byte("big-data-sizes") + +var ffjKeyImageBigDataDigests = []byte("big-data-digests") + +var ffjKeyImageCreated = []byte("created") + +var ffjKeyImageFlags = []byte("flags") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *Image) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *Image) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtImagebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtImagenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'b': + + if bytes.Equal(ffjKeyImageBigDataNames, kn) { + currentKey = ffjtImageBigDataNames + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyImageBigDataSizes, kn) { + currentKey = ffjtImageBigDataSizes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyImageBigDataDigests, kn) { + currentKey = ffjtImageBigDataDigests + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'c': + + if bytes.Equal(ffjKeyImageCreated, kn) { + currentKey = ffjtImageCreated + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffjKeyImageDigest, kn) { + currentKey = ffjtImageDigest + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffjKeyImageFlags, kn) { + currentKey = ffjtImageFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffjKeyImageID, kn) { + currentKey = ffjtImageID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffjKeyImageTopLayer, kn) { + currentKey = ffjtImageTopLayer + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffjKeyImageMetadata, kn) { + currentKey = ffjtImageMetadata + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffjKeyImageNames, kn) { + currentKey = ffjtImageNames + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffjKeyImageFlags, kn) { + currentKey = ffjtImageFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyImageCreated, kn) { + currentKey = ffjtImageCreated + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyImageBigDataDigests, kn) { + currentKey = ffjtImageBigDataDigests + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyImageBigDataSizes, kn) { + currentKey = ffjtImageBigDataSizes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyImageBigDataNames, kn) { + currentKey = ffjtImageBigDataNames + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyImageMetadata, kn) { + currentKey = ffjtImageMetadata + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyImageTopLayer, kn) { + currentKey = ffjtImageTopLayer + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyImageNames, kn) { + currentKey = ffjtImageNames + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyImageDigest, kn) { + currentKey = ffjtImageDigest + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyImageID, kn) { + currentKey = ffjtImageID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtImagenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtImageID: + goto handle_ID + + case ffjtImageDigest: + goto handle_Digest + + case ffjtImageNames: + goto handle_Names + + case ffjtImageTopLayer: + goto handle_TopLayer + + case ffjtImageMetadata: + goto handle_Metadata + + case ffjtImageBigDataNames: + goto handle_BigDataNames + + case ffjtImageBigDataSizes: + goto handle_BigDataSizes + + case ffjtImageBigDataDigests: + goto handle_BigDataDigests + + case ffjtImageCreated: + goto handle_Created + + case ffjtImageFlags: + goto handle_Flags + + case ffjtImagenosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ID: + + /* handler: j.ID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.ID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Digest: + + /* handler: j.Digest type=digest.Digest kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.Digest = digest.Digest(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Names: + + /* handler: j.Names type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.Names = nil + } else { + + j.Names = []string{} + + wantVal := true + + for { + + var tmpJNames string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJNames type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJNames = string(string(outBuf)) + + } + } + + j.Names = append(j.Names, tmpJNames) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TopLayer: + + /* handler: j.TopLayer type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.TopLayer = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Metadata: + + /* handler: j.Metadata type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.Metadata = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BigDataNames: + + /* handler: j.BigDataNames type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.BigDataNames = nil + } else { + + j.BigDataNames = []string{} + + wantVal := true + + for { + + var tmpJBigDataNames string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJBigDataNames type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJBigDataNames = string(string(outBuf)) + + } + } + + j.BigDataNames = append(j.BigDataNames, tmpJBigDataNames) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BigDataSizes: + + /* handler: j.BigDataSizes type=map[string]int64 kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.BigDataSizes = nil + } else { + + j.BigDataSizes = make(map[string]int64, 0) + + wantVal := true + + for { + + var k string + + var tmpJBigDataSizes int64 + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJBigDataSizes type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + tmpJBigDataSizes = int64(tval) + + } + } + + j.BigDataSizes[k] = tmpJBigDataSizes + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BigDataDigests: + + /* handler: j.BigDataDigests type=map[string]digest.Digest kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.BigDataDigests = nil + } else { + + j.BigDataDigests = make(map[string]digest.Digest, 0) + + wantVal := true + + for { + + var k string + + var tmpJBigDataDigests digest.Digest + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJBigDataDigests type=digest.Digest kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJBigDataDigests = digest.Digest(string(outBuf)) + + } + } + + j.BigDataDigests[k] = tmpJBigDataDigests + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Created: + + /* handler: j.Created type=time.Time kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + } else { + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = j.Created.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Flags: + + /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.Flags = nil + } else { + + j.Flags = make(map[string]interface{}, 0) + + wantVal := true + + for { + + var k string + + var tmpJFlags interface{} + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/ + + { + /* Falling back. type=interface {} kind=interface */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJFlags) + if err != nil { + return fs.WrapErr(err) + } + } + + j.Flags[k] = tmpJFlags + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *imageStore) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *imageStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{}`) + return nil +} + +const ( + ffjtimageStorebase = iota + ffjtimageStorenosuchkey +) + +// UnmarshalJSON umarshall json - template of ffjson +func (j *imageStore) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *imageStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtimageStorebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtimageStorenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + } + + currentKey = ffjtimageStorenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtimageStorenosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index 7cdc2e25..f51406a0 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -27,13 +27,6 @@ const ( compressionFlag = "diff-compression" ) -var ( - // ErrParentUnknown indicates that we didn't record the ID of the parent of the specified layer - ErrParentUnknown = errors.New("parent of layer not known") - // ErrLayerUnknown indicates that there was no layer with the specified name or ID - ErrLayerUnknown = errors.New("layer not known") -) - // A Layer is a record of a copy-on-write layer that's stored by the lower // level graph driver. type Layer struct { @@ -191,7 +184,7 @@ type LayerStore interface { CreateWithFlags(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}) (layer *Layer, err error) // Put combines the functions of CreateWithFlags and ApplyDiff. - Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (*Layer, int64, error) + Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) // SetNames replaces the list of names associated with a layer with the // supplied values. @@ -213,7 +206,7 @@ type LayerStore interface { // ApplyDiff reads a tarstream which was created by a previous call to Diff and // applies its changes to a specified layer. - ApplyDiff(to string, diff archive.Reader) (int64, error) + ApplyDiff(to string, diff io.Reader) (int64, error) } type layerStore struct { @@ -261,6 +254,7 @@ func (r *layerStore) Load() error { compressedsums := make(map[digest.Digest][]string) uncompressedsums := make(map[digest.Digest][]string) if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil { + idlist = make([]string, 0, len(layers)) for n, layer := range layers { ids[layer.ID] = layers[n] idlist = append(idlist, layer.ID) @@ -280,7 +274,7 @@ func (r *layerStore) Load() error { } } if shouldSave && !r.IsReadWrite() { - return errors.New("layer store assigns the same name to multiple layers") + return ErrDuplicateLayerNames } mpath := r.mountspath() data, err = ioutil.ReadFile(mpath) @@ -312,6 +306,9 @@ func (r *layerStore) Load() error { // actually delete. if r.IsReadWrite() { for _, layer := range r.layers { + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } if cleanup, ok := layer.Flags[incompleteFlag]; ok { if b, ok := cleanup.(bool); ok && b { err = r.Delete(layer.ID) @@ -345,7 +342,7 @@ func (r *layerStore) Save() error { if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil { return err } - mounts := []layerMountPoint{} + mounts := make([]layerMountPoint, 0, len(r.layers)) for _, layer := range r.layers { if layer.MountPoint != "" && layer.MountCount > 0 { mounts = append(mounts, layerMountPoint{ @@ -462,6 +459,9 @@ func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { if !ok { return ErrLayerUnknown } + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } layer.Flags[flag] = value return r.Save() } @@ -470,7 +470,7 @@ func (r *layerStore) Status() ([][2]string, error) { return r.driver.Status(), nil } -func (r *layerStore) Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (layer *Layer, size int64, err error) { +func (r *layerStore) Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) { if !r.IsReadWrite() { return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath()) } @@ -497,15 +497,20 @@ func (r *layerStore) Put(id, parent string, names []string, mountLabel string, o if _, idInUse := r.byid[id]; idInUse { return nil, -1, ErrDuplicateID } + names = dedupeNames(names) for _, name := range names { if _, nameInUse := r.byname[name]; nameInUse { return nil, -1, ErrDuplicateName } } + opts := drivers.CreateOpts{ + MountLabel: mountLabel, + StorageOpt: options, + } if writeable { - err = r.driver.CreateReadWrite(id, parent, mountLabel, options) + err = r.driver.CreateReadWrite(id, parent, &opts) } else { - err = r.driver.Create(id, parent, mountLabel, options) + err = r.driver.Create(id, parent, &opts) } if err == nil { layer = &Layer{ @@ -629,6 +634,7 @@ func (r *layerStore) SetNames(id string, names []string) error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath()) } + names = dedupeNames(names) if layer, ok := r.lookup(id); ok { for _, name := range layer.Names { delete(r.byname, name) @@ -734,7 +740,7 @@ func (r *layerStore) Wipe() error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) } - ids := []string{} + ids := make([]string, 0, len(r.byid)) for id := range r.byid { ids = append(ids, id) } @@ -907,7 +913,7 @@ func (r *layerStore) DiffSize(from, to string) (size int64, err error) { return r.driver.DiffSize(to, from) } -func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err error) { +func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) { if !r.IsReadWrite() { return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath()) } diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go new file mode 100644 index 00000000..8bec40e1 --- /dev/null +++ b/vendor/github.com/containers/storage/layers_ffjson.go @@ -0,0 +1,1713 @@ +// Code generated by ffjson . DO NOT EDIT. +// source: layers.go + +package storage + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/containers/storage/pkg/archive" + "github.com/opencontainers/go-digest" + fflib "github.com/pquerna/ffjson/fflib/v1" +) + +// MarshalJSON marshal bytes to json - template +func (j *DiffOptions) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *DiffOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + if j.Compression != nil { + buf.WriteString(`{"Compression":`) + fflib.FormatBits2(buf, uint64(*j.Compression), 10, *j.Compression < 0) + } else { + buf.WriteString(`{"Compression":null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffjtDiffOptionsbase = iota + ffjtDiffOptionsnosuchkey + + ffjtDiffOptionsCompression +) + +var ffjKeyDiffOptionsCompression = []byte("Compression") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *DiffOptions) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *DiffOptions) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtDiffOptionsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtDiffOptionsnosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'C': + + if bytes.Equal(ffjKeyDiffOptionsCompression, kn) { + currentKey = ffjtDiffOptionsCompression + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffjKeyDiffOptionsCompression, kn) { + currentKey = ffjtDiffOptionsCompression + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtDiffOptionsnosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtDiffOptionsCompression: + goto handle_Compression + + case ffjtDiffOptionsnosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Compression: + + /* handler: j.Compression type=archive.Compression kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + j.Compression = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := archive.Compression(tval) + j.Compression = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *Layer) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *Layer) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "id":`) + fflib.WriteJsonString(buf, string(j.ID)) + buf.WriteByte(',') + if len(j.Names) != 0 { + buf.WriteString(`"names":`) + if j.Names != nil { + buf.WriteString(`[`) + for i, v := range j.Names { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if len(j.Parent) != 0 { + buf.WriteString(`"parent":`) + fflib.WriteJsonString(buf, string(j.Parent)) + buf.WriteByte(',') + } + if len(j.Metadata) != 0 { + buf.WriteString(`"metadata":`) + fflib.WriteJsonString(buf, string(j.Metadata)) + buf.WriteByte(',') + } + if len(j.MountLabel) != 0 { + buf.WriteString(`"mountlabel":`) + fflib.WriteJsonString(buf, string(j.MountLabel)) + buf.WriteByte(',') + } + if true { + buf.WriteString(`"created":`) + + { + + obj, err = j.Created.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + if len(j.CompressedDigest) != 0 { + buf.WriteString(`"compressed-diff-digest":`) + fflib.WriteJsonString(buf, string(j.CompressedDigest)) + buf.WriteByte(',') + } + if j.CompressedSize != 0 { + buf.WriteString(`"compressed-size":`) + fflib.FormatBits2(buf, uint64(j.CompressedSize), 10, j.CompressedSize < 0) + buf.WriteByte(',') + } + if len(j.UncompressedDigest) != 0 { + buf.WriteString(`"diff-digest":`) + fflib.WriteJsonString(buf, string(j.UncompressedDigest)) + buf.WriteByte(',') + } + if j.UncompressedSize != 0 { + buf.WriteString(`"diff-size":`) + fflib.FormatBits2(buf, uint64(j.UncompressedSize), 10, j.UncompressedSize < 0) + buf.WriteByte(',') + } + if j.CompressionType != 0 { + buf.WriteString(`"compression":`) + fflib.FormatBits2(buf, uint64(j.CompressionType), 10, j.CompressionType < 0) + buf.WriteByte(',') + } + if len(j.Flags) != 0 { + buf.WriteString(`"flags":`) + /* Falling back. type=map[string]interface {} kind=map */ + err = buf.Encode(j.Flags) + if err != nil { + return err + } + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffjtLayerbase = iota + ffjtLayernosuchkey + + ffjtLayerID + + ffjtLayerNames + + ffjtLayerParent + + ffjtLayerMetadata + + ffjtLayerMountLabel + + ffjtLayerCreated + + ffjtLayerCompressedDigest + + ffjtLayerCompressedSize + + ffjtLayerUncompressedDigest + + ffjtLayerUncompressedSize + + ffjtLayerCompressionType + + ffjtLayerFlags +) + +var ffjKeyLayerID = []byte("id") + +var ffjKeyLayerNames = []byte("names") + +var ffjKeyLayerParent = []byte("parent") + +var ffjKeyLayerMetadata = []byte("metadata") + +var ffjKeyLayerMountLabel = []byte("mountlabel") + +var ffjKeyLayerCreated = []byte("created") + +var ffjKeyLayerCompressedDigest = []byte("compressed-diff-digest") + +var ffjKeyLayerCompressedSize = []byte("compressed-size") + +var ffjKeyLayerUncompressedDigest = []byte("diff-digest") + +var ffjKeyLayerUncompressedSize = []byte("diff-size") + +var ffjKeyLayerCompressionType = []byte("compression") + +var ffjKeyLayerFlags = []byte("flags") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *Layer) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *Layer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtLayerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtLayernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffjKeyLayerCreated, kn) { + currentKey = ffjtLayerCreated + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyLayerCompressedDigest, kn) { + currentKey = ffjtLayerCompressedDigest + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyLayerCompressedSize, kn) { + currentKey = ffjtLayerCompressedSize + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyLayerCompressionType, kn) { + currentKey = ffjtLayerCompressionType + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffjKeyLayerUncompressedDigest, kn) { + currentKey = ffjtLayerUncompressedDigest + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyLayerUncompressedSize, kn) { + currentKey = ffjtLayerUncompressedSize + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffjKeyLayerFlags, kn) { + currentKey = ffjtLayerFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffjKeyLayerID, kn) { + currentKey = ffjtLayerID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffjKeyLayerMetadata, kn) { + currentKey = ffjtLayerMetadata + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyLayerMountLabel, kn) { + currentKey = ffjtLayerMountLabel + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffjKeyLayerNames, kn) { + currentKey = ffjtLayerNames + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffjKeyLayerParent, kn) { + currentKey = ffjtLayerParent + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffjKeyLayerFlags, kn) { + currentKey = ffjtLayerFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerCompressionType, kn) { + currentKey = ffjtLayerCompressionType + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerUncompressedSize, kn) { + currentKey = ffjtLayerUncompressedSize + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerUncompressedDigest, kn) { + currentKey = ffjtLayerUncompressedDigest + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerCompressedSize, kn) { + currentKey = ffjtLayerCompressedSize + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerCompressedDigest, kn) { + currentKey = ffjtLayerCompressedDigest + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerCreated, kn) { + currentKey = ffjtLayerCreated + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerMountLabel, kn) { + currentKey = ffjtLayerMountLabel + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerMetadata, kn) { + currentKey = ffjtLayerMetadata + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerParent, kn) { + currentKey = ffjtLayerParent + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerNames, kn) { + currentKey = ffjtLayerNames + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerID, kn) { + currentKey = ffjtLayerID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtLayernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtLayerID: + goto handle_ID + + case ffjtLayerNames: + goto handle_Names + + case ffjtLayerParent: + goto handle_Parent + + case ffjtLayerMetadata: + goto handle_Metadata + + case ffjtLayerMountLabel: + goto handle_MountLabel + + case ffjtLayerCreated: + goto handle_Created + + case ffjtLayerCompressedDigest: + goto handle_CompressedDigest + + case ffjtLayerCompressedSize: + goto handle_CompressedSize + + case ffjtLayerUncompressedDigest: + goto handle_UncompressedDigest + + case ffjtLayerUncompressedSize: + goto handle_UncompressedSize + + case ffjtLayerCompressionType: + goto handle_CompressionType + + case ffjtLayerFlags: + goto handle_Flags + + case ffjtLayernosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ID: + + /* handler: j.ID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.ID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Names: + + /* handler: j.Names type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.Names = nil + } else { + + j.Names = []string{} + + wantVal := true + + for { + + var tmpJNames string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJNames type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJNames = string(string(outBuf)) + + } + } + + j.Names = append(j.Names, tmpJNames) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Parent: + + /* handler: j.Parent type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.Parent = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Metadata: + + /* handler: j.Metadata type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.Metadata = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MountLabel: + + /* handler: j.MountLabel type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.MountLabel = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Created: + + /* handler: j.Created type=time.Time kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + } else { + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = j.Created.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CompressedDigest: + + /* handler: j.CompressedDigest type=digest.Digest kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.CompressedDigest = digest.Digest(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CompressedSize: + + /* handler: j.CompressedSize type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + j.CompressedSize = int64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UncompressedDigest: + + /* handler: j.UncompressedDigest type=digest.Digest kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.UncompressedDigest = digest.Digest(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UncompressedSize: + + /* handler: j.UncompressedSize type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + j.UncompressedSize = int64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CompressionType: + + /* handler: j.CompressionType type=archive.Compression kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + j.CompressionType = archive.Compression(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Flags: + + /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.Flags = nil + } else { + + j.Flags = make(map[string]interface{}, 0) + + wantVal := true + + for { + + var k string + + var tmpJFlags interface{} + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/ + + { + /* Falling back. type=interface {} kind=interface */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJFlags) + if err != nil { + return fs.WrapErr(err) + } + } + + j.Flags[k] = tmpJFlags + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *layerMountPoint) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *layerMountPoint) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"id":`) + fflib.WriteJsonString(buf, string(j.ID)) + buf.WriteString(`,"path":`) + fflib.WriteJsonString(buf, string(j.MountPoint)) + buf.WriteString(`,"count":`) + fflib.FormatBits2(buf, uint64(j.MountCount), 10, j.MountCount < 0) + buf.WriteByte('}') + return nil +} + +const ( + ffjtlayerMountPointbase = iota + ffjtlayerMountPointnosuchkey + + ffjtlayerMountPointID + + ffjtlayerMountPointMountPoint + + ffjtlayerMountPointMountCount +) + +var ffjKeylayerMountPointID = []byte("id") + +var ffjKeylayerMountPointMountPoint = []byte("path") + +var ffjKeylayerMountPointMountCount = []byte("count") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *layerMountPoint) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *layerMountPoint) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtlayerMountPointbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtlayerMountPointnosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffjKeylayerMountPointMountCount, kn) { + currentKey = ffjtlayerMountPointMountCount + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffjKeylayerMountPointID, kn) { + currentKey = ffjtlayerMountPointID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffjKeylayerMountPointMountPoint, kn) { + currentKey = ffjtlayerMountPointMountPoint + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointMountCount, kn) { + currentKey = ffjtlayerMountPointMountCount + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointMountPoint, kn) { + currentKey = ffjtlayerMountPointMountPoint + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointID, kn) { + currentKey = ffjtlayerMountPointID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtlayerMountPointnosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtlayerMountPointID: + goto handle_ID + + case ffjtlayerMountPointMountPoint: + goto handle_MountPoint + + case ffjtlayerMountPointMountCount: + goto handle_MountCount + + case ffjtlayerMountPointnosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ID: + + /* handler: j.ID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.ID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MountPoint: + + /* handler: j.MountPoint type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.MountPoint = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MountCount: + + /* handler: j.MountCount type=int kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + j.MountCount = int(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *layerStore) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *layerStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{}`) + return nil +} + +const ( + ffjtlayerStorebase = iota + ffjtlayerStorenosuchkey +) + +// UnmarshalJSON umarshall json - template of ffjson +func (j *layerStore) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *layerStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtlayerStorebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtlayerStorenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + } + + currentKey = ffjtlayerStorenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtlayerStorenosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *simpleGetCloser) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *simpleGetCloser) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{}`) + return nil +} + +const ( + ffjtsimpleGetCloserbase = iota + ffjtsimpleGetClosernosuchkey +) + +// UnmarshalJSON umarshall json - template of ffjson +func (j *simpleGetCloser) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *simpleGetCloser) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtsimpleGetCloserbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtsimpleGetClosernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + } + + currentKey = ffjtsimpleGetClosernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtsimpleGetClosernosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} diff --git a/vendor/github.com/containers/storage/lockfile.go b/vendor/github.com/containers/storage/lockfile.go index 6e09b526..41ee9017 100644 --- a/vendor/github.com/containers/storage/lockfile.go +++ b/vendor/github.com/containers/storage/lockfile.go @@ -44,8 +44,6 @@ type lockfile struct { var ( lockfiles map[string]*lockfile lockfilesLock sync.Mutex - // ErrLockReadOnly indicates that the caller only took a read-only lock, and is not allowed to write - ErrLockReadOnly = errors.New("lock is not a read-write lock") ) // GetLockfile opens a read-write lock file, creating it if necessary. The diff --git a/vendor/github.com/containers/storage/pkg/archive/README.md b/vendor/github.com/containers/storage/pkg/archive/README.md new file mode 100644 index 00000000..7307d969 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index a4071d71..abee36f7 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -6,7 +6,6 @@ import ( "bytes" "compress/bzip2" "compress/gzip" - "errors" "fmt" "io" "io/ioutil" @@ -27,18 +26,11 @@ import ( ) type ( - // Archive is a type of io.ReadCloser which has two interfaces Read and Closer. - Archive io.ReadCloser - // Reader is a type of io.Reader. - Reader io.Reader // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int - // TarChownOptions wraps the chown options UID and GID. - TarChownOptions struct { - UID, GID int - } + // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string @@ -47,7 +39,7 @@ type ( NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap - ChownOpts *TarChownOptions + ChownOpts *idtools.IDPair IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack @@ -59,34 +51,28 @@ type ( // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string + InUserNS bool } - - // Archiver allows the reuse of most utility functions of this package - // with a pluggable Untar function. Also, to facilitate the passing of - // specific id mappings for untar, an archiver can be created with maps - // which will then be passed to Untar operations - Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - } - - // breakoutError is used to differentiate errors related to breaking out - // When testing archive breakout in the unit tests, this error is expected - // in order for the test to pass. - breakoutError error ) -var ( - // ErrNotImplemented is the error message of function not implemented. - ErrNotImplemented = errors.New("Function not implemented") - defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} -) +// Archiver allows the reuse of most utility functions of this package +// with a pluggable Untar function. Also, to facilitate the passing of +// specific id mappings for untar, an archiver can be created with maps +// which will then be passed to Untar operations +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + IDMappings *idtools.IDMappings +} -const ( - // HeaderSize is the size in bytes of a tar header - HeaderSize = 512 -) +// NewDefaultArchiver returns a new Archiver without any IDMappings +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, IDMappings: &idtools.IDMappings{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error const ( // Uncompressed represents the uncompressed. @@ -107,17 +93,15 @@ const ( OverlayWhiteoutFormat ) -// IsArchive checks for the magic bytes of a tar or any supported compression -// algorithm. -func IsArchive(header []byte) bool { - compression := DetectCompression(header) - if compression != Uncompressed { - return true - } - r := tar.NewReader(bytes.NewBuffer(header)) - _, err := r.Next() - return err == nil -} +const ( + modeISDIR = 040000 // Directory + modeISFIFO = 010000 // FIFO + modeISREG = 0100000 // Regular file + modeISLNK = 0120000 // Symbolic link + modeISBLK = 060000 // Block special file + modeISCHR = 020000 // Character special file + modeISSOCK = 0140000 // Socket +) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. @@ -147,7 +131,7 @@ func DetectCompression(source []byte) Compression { logrus.Debug("Len too short") continue } - if bytes.Compare(m, source[:len(m)]) == 0 { + if bytes.Equal(m, source[:len(m)]) { return compression } } @@ -206,7 +190,7 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { } } -// CompressStream compresseses the dest with specified compression algorithm. +// CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) @@ -220,13 +204,100 @@ func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, er return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as we only currently generates gzipped tars + // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + header.Name = name + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + + }() + return pipeReader +} + // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { @@ -242,8 +313,65 @@ func (compression *Compression) Extension() string { return "" } +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return nil, err + } + hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar +// https://github.com/golang/go/commit/66b5a2f +func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { + fm := fi.Mode() + switch { + case fm.IsRegular(): + mode |= modeISREG + case fi.IsDir(): + mode |= modeISDIR + case fm&os.ModeSymlink != 0: + mode |= modeISLNK + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + mode |= modeISCHR + } else { + mode |= modeISBLK + } + case fm&os.ModeNamedPipe != 0: + mode |= modeISFIFO + case fm&os.ModeSocket != 0: + mode |= modeISSOCK + } + return mode +} + +// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem +// to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + return nil +} + type tarWhiteoutConverter interface { - ConvertWrite(*tar.Header, string, os.FileInfo) error + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } @@ -252,9 +380,9 @@ type tarAppender struct { Buffer *bufio.Writer // for hardlink mapping - SeenFiles map[uint64]string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap + SeenFiles map[uint64]string + IDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined @@ -263,6 +391,16 @@ type tarAppender struct { WhiteoutConverter tarWhiteoutConverter } +func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IDMappings: idMapping, + ChownOpts: chownOpts, + } +} + // canonicalTarName provides a platform-independent and consistent posix-style //path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) (string, error) { @@ -285,33 +423,30 @@ func (ta *tarAppender) addTarFile(path, name string) error { return err } - link := "" + var link string if fi.Mode()&os.ModeSymlink != 0 { - if link, err = os.Readlink(path); err != nil { + var err error + link, err = os.Readlink(path) + if err != nil { return err } } - hdr, err := tar.FileInfoHeader(fi, link) + hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - name, err = canonicalTarName(name, fi.IsDir()) - if err != nil { - return fmt.Errorf("tar: cannot canonicalize path: %v", err) - } - hdr.Name = name - - inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) - if err != nil { + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, - // it's hardlinked, so set the type flag accordingly + // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { @@ -323,36 +458,46 @@ func (ta *tarAppender) addTarFile(path, name string) error { } } - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability) - } - //handle re-mapping container ID mappings back to host ID mappings before //writing tar headers/files. We skip whiteout files because they were written //by the kernel and already have proper ownership relative to the host - if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) { - uid, gid, err := getFileUIDGID(fi.Sys()) + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } - xUID, err := idtools.ToContainer(uid, ta.UIDMaps) + hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) if err != nil { return err } - xGID, err := idtools.ToContainer(gid, ta.GIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - hdr.Gid = xGID + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { - if err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi); err != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { return err } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { @@ -360,7 +505,10 @@ func (ta *tarAppender) addTarFile(path, name string) error { } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - file, err := os.Open(path) + // We use system.OpenSequential to ensure we use sequential file + // access on Windows to avoid depleting the standby list. + // On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(path) if err != nil { return err } @@ -381,7 +529,7 @@ func (ta *tarAppender) addTarFile(path, name string) error { return nil } -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error { +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) @@ -398,8 +546,10 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } case tar.TypeReg, tar.TypeRegA: - // Source is regular file - file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } @@ -409,7 +559,16 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } file.Close() - case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + return nil + } + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err @@ -444,13 +603,13 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L return nil default: - return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { - chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err @@ -525,8 +684,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) - patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) - + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } @@ -539,14 +697,12 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(compressWriter), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - UIDMaps: options.UIDMaps, - GIDMaps: options.GIDMaps, - WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat), - } + ta := newTarAppender( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + options.ChownOpts, + ) + ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat) defer func() { // Make sure to check the error on Close. @@ -623,7 +779,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { - skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) + skip, err = pm.Matches(relFilePath) if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err @@ -633,7 +789,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an - // excludes pattern (eg !dir/file) that starts with this + // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. @@ -642,18 +798,17 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } // No exceptions (!...) in patterns so just skip dir - if !exceptions { + if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) - for _, pat := range patterns { - if pat[0] != '!' { + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { continue } - pat = pat[1:] + string(filepath.Separator) - if strings.HasPrefix(pat, dirSlash) { + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } @@ -703,10 +858,8 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return err - } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) // Iterate through the files in the archive. @@ -740,7 +893,7 @@ loop: parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID) + err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) if err != nil { return err } @@ -785,26 +938,8 @@ loop: } trBuf.Reset(tr) - // if the options contain a uid & gid maps, convert header uid/gid - // entries using the maps such that lchown sets the proper mapped - // uid/gid after writing the file. We only perform this mapping if - // the file isn't already owned by the remapped root UID or GID, as - // that specific uid/gid has no mapping from container -> host, and - // those files already have the proper ownership for inside the - // container. - if hdr.Uid != remappedRootUID { - xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - } - if hdr.Gid != remappedRootGID { - xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) - if err != nil { - return err - } - hdr.Gid = xGID + if err := remapIDs(idMappings, hdr); err != nil { + return err } if whiteoutConverter != nil { @@ -817,7 +952,7 @@ loop: } } - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } @@ -889,23 +1024,13 @@ func (archiver *Archiver) TarUntar(src, dst string) error { return err } defer archive.Close() - - var options *TarOptions - if archiver.UIDMaps != nil || archiver.GIDMaps != nil { - options = &TarOptions{ - UIDMaps: archiver.UIDMaps, - GIDMaps: archiver.GIDMaps, - } + options := &TarOptions{ + UIDMaps: archiver.IDMappings.UIDs(), + GIDMaps: archiver.IDMappings.GIDs(), } return archiver.Untar(archive, dst, options) } -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func TarUntar(src, dst string) error { - return defaultArchiver.TarUntar(src, dst) -} - // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) @@ -913,22 +1038,13 @@ func (archiver *Archiver) UntarPath(src, dst string) error { return err } defer archive.Close() - var options *TarOptions - if archiver.UIDMaps != nil || archiver.GIDMaps != nil { - options = &TarOptions{ - UIDMaps: archiver.UIDMaps, - GIDMaps: archiver.GIDMaps, - } + options := &TarOptions{ + UIDMaps: archiver.IDMappings.UIDs(), + GIDMaps: archiver.IDMappings.GIDs(), } return archiver.Untar(archive, dst, options) } -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - return defaultArchiver.UntarPath(src, dst) -} - // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no @@ -945,27 +1061,16 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { // if this archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner - rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) - if err != nil { - return err - } + rootIDs := archiver.IDMappings.RootPair() // Create dst, copy src's content into it logrus.Debugf("Creating dest directory: %s", dst) - if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) return archiver.TarUntar(src, dst) } -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func CopyWithTar(src, dst string) error { - return defaultArchiver.CopyWithTar(src, dst) -} - // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. @@ -986,7 +1091,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { + if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { return err } @@ -1007,28 +1112,10 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) - if err != nil { + if err := remapIDs(archiver.IDMappings, hdr); err != nil { return err } - // only perform mapping if the file being copied isn't already owned by the - // uid or gid of the remapped root in the container - if remappedRootUID != hdr.Uid { - xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - } - if remappedRootGID != hdr.Gid { - xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) - if err != nil { - return err - } - hdr.Gid = xGID - } - tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { @@ -1040,7 +1127,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { return nil }) defer func() { - if er := <-errC; err != nil { + if er := <-errC; err == nil && er != nil { err = er } }() @@ -1052,16 +1139,10 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { return err } -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// Destination handling is in an operating specific manner depending -// where the daemon is running. If `dst` ends with a trailing slash -// the final destination path will be `dst/base(src)` (Linux) or -// `dst\base(src)` (Windows). -func CopyFileWithTar(src, dst string) (err error) { - return defaultArchiver.CopyFileWithTar(src, dst) +func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error { + ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return err } // cmdStream executes a command, and returns its stdout as a stream. @@ -1096,7 +1177,7 @@ func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. -func NewTempArchive(src Archive, dir string) (*TempArchive, error) { +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := ioutil.TempFile(dir, "") if err != nil { return nil, err @@ -1145,3 +1226,26 @@ func (archive *TempArchive) Read(data []byte) (int, error) { } return n, err } + +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return NewDefaultArchiver().UntarPath(src, dst) +} + +const ( + // HeaderSize is the size in bytes of a tar header + HeaderSize = 512 +) diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go index e944ca2a..5a14eb91 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -5,9 +5,9 @@ import ( "os" "path/filepath" "strings" - "syscall" "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" ) func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { @@ -19,7 +19,7 @@ func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { type overlayWhiteoutConverter struct{} -func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) error { +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { // convert whiteouts to AUFS format if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { // we just rename the file and make it normal @@ -34,12 +34,16 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os // convert opaque dirs to AUFS format by writing an empty file with the prefix opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") if err != nil { - return err + return nil, err } - if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' { + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, "trusted.overlay.opaque") + } + // create a header for the whiteout file // it should inherit some properties from the parent, but be a regular file - *hdr = tar.Header{ + wo = &tar.Header{ Typeflag: tar.TypeReg, Mode: hdr.Mode & int64(os.ModePerm), Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), @@ -54,7 +58,7 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os } } - return nil + return } func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { @@ -63,12 +67,9 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay if base == WhiteoutOpaqueDir { - if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil { - return false, err - } - + err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) // don't write the file itself - return false, nil + return false, err } // if a file was deleted and we are using overlay, we need to create a character device @@ -76,7 +77,7 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, originalBase := base[len(WhiteoutPrefix):] originalPath := filepath.Join(dir, originalBase) - if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil { + if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { return false, err } if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go index 19d731fd..bdc1a3d7 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go @@ -9,7 +9,10 @@ import ( "path/filepath" "syscall" + "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" ) // fixVolumePathPrefix does platform specific processing to ensure that if @@ -40,33 +43,38 @@ func chmodTarEntry(perm os.FileMode) os.FileMode { return perm // noop for unix as golang APIs provide perm bits correctly } -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { s, ok := stat.(*syscall.Stat_t) - if !ok { - err = errors.New("cannot convert stat value to syscall.Stat_t") - return - } - - inode = uint64(s.Ino) - - // Currently go does not fill in the major/minors - if s.Mode&syscall.S_IFBLK != 0 || - s.Mode&syscall.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) - hdr.Devminor = int64(minor(uint64(s.Rdev))) + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert + hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert + } } return } -func getFileUIDGID(stat interface{}) (int, int, error) { +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = s.Ino + } + + return +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { s, ok := stat.(*syscall.Stat_t) if !ok { - return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") + return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") } - return int(s.Uid), int(s.Gid), nil + return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil } func major(device uint64) uint64 { @@ -80,20 +88,22 @@ func minor(device uint64) uint64 { // handleTarTypeBlockCharFifo is an OS-specific helper function used by // createTarFile to handle the following types of header: Block; Char; Fifo func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + mode := uint32(hdr.Mode & 07777) switch hdr.Typeflag { case tar.TypeBlock: - mode |= syscall.S_IFBLK + mode |= unix.S_IFBLK case tar.TypeChar: - mode |= syscall.S_IFCHR + mode |= unix.S_IFCHR case tar.TypeFifo: - mode |= syscall.S_IFIFO + mode |= unix.S_IFIFO } - if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { - return err - } - return nil + return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) } func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go index 828d3b9d..0bcbb925 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go @@ -9,6 +9,7 @@ import ( "path/filepath" "strings" + "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/longpath" ) @@ -42,15 +43,23 @@ func CanonicalTarNameForPath(p string) (string, error) { // chmodTarEntry is used to adjust the file permissions used in tar header based // on the platform the archival is done. func chmodTarEntry(perm os.FileMode) os.FileMode { - perm &= 0755 + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm // Add the x bit: make everything +x from windows - perm |= 0111 + permPart |= 0111 + permPart &= 0755 - return perm + return noPermPart | permPart } -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows return } @@ -64,7 +73,7 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { return nil } -func getFileUIDGID(stat interface{}) (int, int, error) { +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { // no notion of file ownership mapping yet on Windows - return 0, 0, nil + return idtools.IDPair{0, 0}, nil } diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go index 488e1298..6ba4b8ec 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -267,7 +267,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { } for name, newChild := range info.children { - oldChild, _ := oldChildren[name] + oldChild := oldChildren[name] if oldChild != nil { // change? oldStat := oldChild.stat @@ -279,7 +279,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { // breaks down is if some code intentionally hides a change by setting // back mtime if statDifferent(oldStat, newStat) || - bytes.Compare(oldChild.capability, newChild.capability) != 0 { + !bytes.Equal(oldChild.capability, newChild.capability) { change := Change{ Path: newChild.path(), Kind: ChangeModify, @@ -391,16 +391,11 @@ func ChangesSize(newDir string, changes []Change) int64 { } // ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) { +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { reader, writer := io.Pipe() go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - UIDMaps: uidMaps, - GIDMaps: gidMaps, - } + ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) + // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go index 798d7bfc..90c9a627 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go @@ -10,6 +10,7 @@ import ( "unsafe" "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" ) // walker is used to implement collectFileInfoForChanges on linux. Where this @@ -65,7 +66,7 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { } parent := root.LookUp(filepath.Dir(path)) if parent == nil { - return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) } info := &FileInfo{ name: filepath.Base(path), @@ -233,7 +234,7 @@ func readdirnames(dirname string) (names []nameIno, err error) { // Refill the buffer if necessary if bufp >= nbuf { bufp = 0 - nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux if nbuf < 0 { nbuf = 0 } @@ -255,12 +256,12 @@ func readdirnames(dirname string) (names []nameIno, err error) { return sl, nil } -// parseDirent is a minor modification of syscall.ParseDirent (linux version) +// parseDirent is a minor modification of unix.ParseDirent (linux version) // which returns {name,inode} pairs instead of just names. func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { origlen := len(buf) for len(buf) > 0 { - dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) buf = buf[dirent.Reclen:] if dirent.Ino == 0 { // File absent in directory. continue @@ -293,7 +294,7 @@ func OverlayChanges(layers []string, rw string) ([]Change, error) { func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { if fi.Mode()&os.ModeCharDevice != 0 { s := fi.Sys().(*syscall.Stat_t) - if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 { + if major(s.Rdev) == 0 && minor(s.Rdev) == 0 { return path, nil } } @@ -302,7 +303,7 @@ func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { if err != nil { return "", err } - if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' { + if len(opaque) == 1 && opaque[0] == 'y' { return path, nil } } diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go index 43dd94e2..d669c01b 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go @@ -7,6 +7,7 @@ import ( "syscall" "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" ) func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { @@ -16,7 +17,7 @@ func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { oldStat.GID() != newStat.GID() || oldStat.Rdev() != newStat.Rdev() || // Don't look at size for dirs, its not a good measure of change - (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && + (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { return true } @@ -24,11 +25,11 @@ func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { } func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 } func getIno(fi os.FileInfo) uint64 { - return uint64(fi.Sys().(*syscall.Stat_t).Ino) + return fi.Sys().(*syscall.Stat_t).Ino } func hasHardlinks(fi os.FileInfo) bool { diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go index 06eadd66..5ad3d7e3 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go @@ -9,16 +9,16 @@ import ( func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { // Don't look at size for dirs, its not a good measure of change - if oldStat.ModTime() != newStat.ModTime() || + if oldStat.Mtim() != newStat.Mtim() || oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.IsDir() { + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { return true } return false } func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.IsDir() + return info.parent == nil || info.stat.Mode().IsDir() } func getIno(fi os.FileInfo) (inode uint64) { diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go index c970f422..ea012b2d 100644 --- a/vendor/github.com/containers/storage/pkg/archive/copy.go +++ b/vendor/github.com/containers/storage/pkg/archive/copy.go @@ -88,13 +88,13 @@ func SplitPathDirEntry(path string) (dir, base string) { // This function acts as a convenient wrapper around TarWithOptions, which // requires a directory as the source path. TarResource accepts either a // directory or a file path and correctly sets the Tar options. -func TarResource(sourceInfo CopyInfo) (content Archive, err error) { +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) } // TarResourceRebase is like TarResource but renames the first path element of // items in the resulting tar archive to match the given rebaseName if not "". -func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) { +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { sourcePath = normalizePath(sourcePath) if _, err = os.Lstat(sourcePath); err != nil { // Catches the case where the source does not exist or is not a @@ -103,7 +103,7 @@ func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err erro return } - // Separate the source path between it's directory and + // Separate the source path between its directory and // the entry in that directory which we are archiving. sourceDir, sourceBase := SplitPathDirEntry(sourcePath) @@ -241,7 +241,7 @@ func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { // contain the archived resource described by srcInfo, to the destination // described by dstInfo. Returns the possibly modified content archive along // with the path to the destination directory which it should be extracted to. -func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { // Ensure in platform semantics srcInfo.Path = normalizePath(srcInfo.Path) dstInfo.Path = normalizePath(dstInfo.Path) @@ -304,7 +304,7 @@ func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir st // RebaseArchiveEntries rewrites the given srcContent archive replacing // an occurrence of oldBase with newBase at the beginning of entry names. -func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { if oldBase == string(os.PathSeparator) { // If oldBase specifies the root directory, use an empty string as // oldBase instead so that newBase doesn't replace the path separator @@ -332,6 +332,9 @@ func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { } hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } if err = rebasedTar.WriteHeader(hdr); err != nil { w.CloseWithError(err) @@ -380,7 +383,7 @@ func CopyResource(srcPath, dstPath string, followLink bool) error { // CopyTo handles extracting the given content whose // entries should be sourced from srcInfo to dstPath. -func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error { +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { // The destination path need not exist, but CopyInfoDestinationPath will // ensure that at least the parent directory exists. dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go index c7ad4d94..f93f4cb1 100644 --- a/vendor/github.com/containers/storage/pkg/archive/diff.go +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -19,7 +19,7 @@ import ( // UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be // compressed or uncompressed. // Returns the size in bytes of the contents of the layer. -func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) { +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { tr := tar.NewReader(layer) trBuf := pools.BufioReader32KPool.Get(tr) defer pools.BufioReader32KPool.Put(trBuf) @@ -33,17 +33,11 @@ func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, er if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return 0, err - } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) aufsTempdir := "" aufsHardlinks := make(map[string]*tar.Header) - if options == nil { - options = &TarOptions{} - } // Iterate through the files in the archive. for { hdr, err := tr.Next() @@ -90,7 +84,7 @@ func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, er parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0600) + err = system.MkdirAll(parentPath, 0600, "") if err != nil { return 0, err } @@ -111,7 +105,7 @@ func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, er } defer os.RemoveAll(aufsTempdir) } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil { + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { return 0, err } } @@ -198,28 +192,11 @@ func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, er srcData = tmpFile } - // if the options contain a uid & gid maps, convert header uid/gid - // entries using the maps such that lchown sets the proper mapped - // uid/gid after writing the file. We only perform this mapping if - // the file isn't already owned by the remapped root UID or GID, as - // that specific uid/gid has no mapping from container -> host, and - // those files already have the proper ownership for inside the - // container. - if srcHdr.Uid != remappedRootUID { - xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) - if err != nil { - return 0, err - } - srcHdr.Uid = xUID + if err := remapIDs(idMappings, srcHdr); err != nil { + return 0, err } - if srcHdr.Gid != remappedRootGID { - xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) - if err != nil { - return 0, err - } - srcHdr.Gid = xGID - } - if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { + + if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { return 0, err } @@ -246,7 +223,7 @@ func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, er // and applies it to the directory `dest`. The stream `layer` can be // compressed or uncompressed. // Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer Reader) (int64, error) { +func ApplyLayer(dest string, layer io.Reader) (int64, error) { return applyLayerHandler(dest, layer, &TarOptions{}, true) } @@ -254,12 +231,12 @@ func ApplyLayer(dest string, layer Reader) (int64, error) { // `layer`, and applies it to the directory `dest`. The stream `layer` // can only be uncompressed. // Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) { +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { return applyLayerHandler(dest, layer, options, false) } // do the bulk load of ApplyLayer, but allow for not calling DecompressStream -func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) { +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { dest = filepath.Clean(dest) // We need to be able to set any perms diff --git a/vendor/github.com/containers/storage/pkg/archive/wrap.go b/vendor/github.com/containers/storage/pkg/archive/wrap.go index dfb335c0..b39d12c8 100644 --- a/vendor/github.com/containers/storage/pkg/archive/wrap.go +++ b/vendor/github.com/containers/storage/pkg/archive/wrap.go @@ -3,7 +3,7 @@ package archive import ( "archive/tar" "bytes" - "io/ioutil" + "io" ) // Generate generates a new archive from the content provided @@ -22,7 +22,7 @@ import ( // // FIXME: stream content instead of buffering // FIXME: specify permissions and other archive metadata -func Generate(input ...string) (Archive, error) { +func Generate(input ...string) (io.Reader, error) { files := parseStringPairs(input...) buf := new(bytes.Buffer) tw := tar.NewWriter(buf) @@ -42,7 +42,7 @@ func Generate(input ...string) (Archive, error) { if err := tw.Close(); err != nil { return nil, err } - return ioutil.NopCloser(buf), nil + return buf, nil } func parseStringPairs(input ...string) (output [][2]string) { diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go index 649575c0..2735f140 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -11,7 +11,13 @@ import ( "github.com/containers/storage/pkg/idtools" ) -var chrootArchiver = &archive.Archiver{Untar: Untar} +// NewArchiver returns a new Archiver which uses chrootarchive.Untar +func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + return &archive.Archiver{Untar: Untar, IDMappings: idMappings} +} // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. @@ -30,7 +36,6 @@ func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOp // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { - if tarArchive == nil { return fmt.Errorf("Empty archive") } @@ -41,14 +46,12 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions options.ExcludePatterns = []string{} } - rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return err - } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() dest = filepath.Clean(dest) if _, err := os.Stat(dest); os.IsNotExist(err) { - if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { return err } } @@ -65,33 +68,3 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions return invokeUnpack(r, dest, options) } - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func TarUntar(src, dst string) error { - return chrootArchiver.TarUntar(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func CopyWithTar(src, dst string) error { - return chrootArchiver.CopyWithTar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// If `dst` ends with a trailing slash '/' ('\' on Windows), the final -// destination path will be `dst/base(src)` or `dst\base(src)` -func CopyFileWithTar(src, dst string) (err error) { - return chrootArchiver.CopyFileWithTar(src, dst) -} - -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - return chrootArchiver.UntarPath(src, dst) -} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go index 54b5ff48..e8bd22e3 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go @@ -5,9 +5,10 @@ import ( "io/ioutil" "os" "path/filepath" - "syscall" "github.com/containers/storage/pkg/mount" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" ) // chroot on linux uses pivot_root instead of chroot @@ -17,14 +18,25 @@ import ( // Old root is removed after the call to pivot_root so it is no longer available under the new root. // This is similar to how libcontainer sets up a container's rootfs func chroot(path string) (err error) { - if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil { + // if the engine is running in a user namespace we need to use actual chroot + if rsystem.RunningInUserNS() { + return realChroot(path) + } + if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { return fmt.Errorf("Error creating mount namespace before pivot: %v", err) } - if err := mount.MakeRPrivate(path); err != nil { + // make everything in new ns private + if err := mount.MakeRPrivate("/"); err != nil { return err } + if mounted, _ := mount.Mounted(path); !mounted { + if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { + return realChroot(path) + } + } + // setup oldRoot for pivot_root pivotDir, err := ioutil.TempDir(path, ".pivot_root") if err != nil { @@ -35,7 +47,7 @@ func chroot(path string) (err error) { defer func() { if mounted { // make sure pivotDir is not mounted before we try to remove it - if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil { + if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil { if err == nil { err = errCleanup } @@ -52,16 +64,9 @@ func chroot(path string) (err error) { err = errCleanup } } - - if errCleanup := syscall.Unmount("/", syscall.MNT_DETACH); errCleanup != nil { - if err == nil { - err = fmt.Errorf("error unmounting root: %v", errCleanup) - } - return - } }() - if err := syscall.PivotRoot(path, pivotDir); err != nil { + if err := unix.PivotRoot(path, pivotDir); err != nil { // If pivot fails, fall back to the normal chroot after cleaning up temp dir if err := os.Remove(pivotDir); err != nil { return fmt.Errorf("Error cleaning up after failed pivot: %v", err) @@ -74,17 +79,17 @@ func chroot(path string) (err error) { // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction pivotDir = filepath.Join("/", filepath.Base(pivotDir)) - if err := syscall.Chdir("/"); err != nil { + if err := unix.Chdir("/"); err != nil { return fmt.Errorf("Error changing to new root: %v", err) } // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host - if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil { + if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { return fmt.Errorf("Error making old root private after pivot: %v", err) } // Now unmount the old root so it's no longer visible from the new root - if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { + if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil { return fmt.Errorf("Error while unmounting old root after pivot: %v", err) } mounted = false @@ -93,10 +98,10 @@ func chroot(path string) (err error) { } func realChroot(path string) error { - if err := syscall.Chroot(path); err != nil { + if err := unix.Chroot(path); err != nil { return fmt.Errorf("Error after fallback to chroot: %v", err) } - if err := syscall.Chdir("/"); err != nil { + if err := unix.Chdir("/"); err != nil { return fmt.Errorf("Error changing to new root after chroot: %v", err) } return nil diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go index 16354bf6..f9b5dece 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go @@ -2,11 +2,11 @@ package chrootarchive -import "syscall" +import "golang.org/x/sys/unix" func chroot(path string) error { - if err := syscall.Chroot(path); err != nil { + if err := unix.Chroot(path); err != nil { return err } - return syscall.Chdir("/") + return unix.Chdir("/") } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go index 377aeb9f..68b8f74f 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go @@ -1,12 +1,16 @@ package chrootarchive -import "github.com/containers/storage/pkg/archive" +import ( + "io" + + "github.com/containers/storage/pkg/archive" +) // ApplyLayer parses a diff in the standard layer format from `layer`, // and applies it to the directory `dest`. The stream `layer` can only be // uncompressed. // Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) { +func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) } @@ -14,6 +18,6 @@ func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) { // `layer`, and applies it to the directory `dest`. The stream `layer` // can only be uncompressed. // Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer archive.Reader, options *archive.TarOptions) (int64, error) { +func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { return applyLayerHandler(dest, layer, options, false) } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go index 3a9f9a82..4369f30c 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go @@ -7,6 +7,7 @@ import ( "encoding/json" "flag" "fmt" + "io" "io/ioutil" "os" "path/filepath" @@ -15,6 +16,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" ) type applyLayerResponse struct { @@ -27,13 +29,14 @@ type applyLayerResponse struct { func applyLayer() { var ( - tmpDir = "" + tmpDir string err error options *archive.TarOptions ) runtime.LockOSThread() flag.Parse() + inUserns := rsystem.RunningInUserNS() if err := chroot(flag.Arg(0)); err != nil { fatal(err) } @@ -49,6 +52,10 @@ func applyLayer() { fatal(err) } + if inUserns { + options.InUserNS = true + } + if tmpDir, err = ioutil.TempDir("/", "temp-storage-extract"); err != nil { fatal(err) } @@ -75,7 +82,7 @@ func applyLayer() { // applyLayerHandler parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. Returns the size in bytes of the // contents of the layer. -func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { dest = filepath.Clean(dest) if decompress { decompressed, err := archive.DecompressStream(layer) @@ -88,6 +95,9 @@ func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOp } if options == nil { options = &archive.TarOptions{} + if rsystem.RunningInUserNS() { + options.InUserNS = true + } } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go index 534d2708..8f8e88bf 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go @@ -2,6 +2,7 @@ package chrootarchive import ( "fmt" + "io" "io/ioutil" "os" "path/filepath" @@ -13,7 +14,7 @@ import ( // applyLayerHandler parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. Returns the size in bytes of the // contents of the layer. -func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { dest = filepath.Clean(dest) // Ensure it is a Windows-style volume path @@ -37,7 +38,7 @@ func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOp s, err := archive.UnpackLayer(dest, layer, nil) os.RemoveAll(tmpDir) if err != nil { - return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) } return s, nil diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go index 1ed0e861..6a0ac246 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go @@ -1,4 +1,4 @@ -// +build linux +// +build linux,cgo package devicemapper @@ -7,17 +7,14 @@ import ( "fmt" "os" "runtime" - "syscall" "unsafe" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) -// DevmapperLogger defines methods for logging with devicemapper. -type DevmapperLogger interface { - DMLog(level int, file string, line int, dmError int, message string) -} - +// Same as DM_DEVICE_* enum values from libdevmapper.h +// nolint: deadcode const ( deviceCreate TaskType = iota deviceReload @@ -155,6 +152,7 @@ func (t *Task) run() error { if res := DmTaskRun(t.unmanaged); res != 1 { return ErrTaskRun } + runtime.KeepAlive(t) return nil } @@ -257,25 +255,12 @@ func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start // UdevWait waits for any processes that are waiting for udev to complete the specified cookie. func UdevWait(cookie *uint) error { if res := DmUdevWait(*cookie); res != 1 { - logrus.Debugf("devicemapper: Failed to wait on udev cookie %d", *cookie) + logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res) return ErrUdevWait } return nil } -// LogInitVerbose is an interface to initialize the verbose logger for the device mapper library. -func LogInitVerbose(level int) { - DmLogInitVerbose(level) -} - -var dmLogger DevmapperLogger - -// LogInit initializes the logger for the device mapper library. -func LogInit(logger DevmapperLogger) { - dmLogger = logger - LogWithErrnoInit() -} - // SetDevDir sets the dev folder for the device mapper library (usually /dev). func SetDevDir(dir string) error { if res := DmSetDevDir(dir); res != 1 { @@ -328,17 +313,21 @@ func RemoveDevice(name string) error { return err } - var cookie uint - if err := task.setCookie(&cookie, 0); err != nil { + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { return fmt.Errorf("devicemapper: Can not set cookie: %s", err) } - defer UdevWait(&cookie) + defer UdevWait(cookie) dmSawBusy = false // reset before the task is run + dmSawEnxio = false if err = task.run(); err != nil { if dmSawBusy { return ErrBusy } + if dmSawEnxio { + return ErrEnxio + } return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) } @@ -358,7 +347,32 @@ func RemoveDeviceDeferred(name string) error { return ErrTaskDeferredRemove } + // set a task cookie and disable library fallback, or else libdevmapper will + // disable udev dm rules and delete the symlink under /dev/mapper by itself, + // even if the removal is deferred by the kernel. + cookie := new(uint) + var flags uint16 + flags = DmUdevDisableLibraryFallback + if err := task.setCookie(cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + + // libdevmapper and udev relies on System V semaphore for synchronization, + // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`. + // So these two function call must come in pairs, otherwise semaphores will + // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem` + // will be reached, which will eventually make all following calls to 'task.SetCookie' + // fail. + // this call will not wait for the deferred removal's final executing, since no + // udev event will be generated, and the semaphore's value will not be incremented + // by udev, what UdevWait is just cleaning up the semaphore. + defer UdevWait(cookie) + + dmSawEnxio = false if err = task.run(); err != nil { + if dmSawEnxio { + return ErrEnxio + } return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) } @@ -427,7 +441,7 @@ func BlockDeviceDiscard(path string) error { // Without this sometimes the remove of the device that happens after // discard fails with EBUSY. - syscall.Sync() + unix.Sync() return nil } @@ -450,13 +464,13 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize return fmt.Errorf("devicemapper: Can't add target %s", err) } - var cookie uint + cookie := new(uint) var flags uint16 flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag - if err := task.setCookie(&cookie, flags); err != nil { + if err := task.setCookie(cookie, flags); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } - defer UdevWait(&cookie) + defer UdevWait(cookie) if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) @@ -484,7 +498,7 @@ func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize } if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceCreate %s", err) + return fmt.Errorf("devicemapper: Error running ReloadPool %s", err) } return nil @@ -638,11 +652,11 @@ func ResumeDevice(name string) error { return err } - var cookie uint - if err := task.setCookie(&cookie, 0); err != nil { + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } - defer UdevWait(&cookie) + defer UdevWait(cookie) if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running deviceResume %s", err) @@ -736,12 +750,12 @@ func activateDevice(poolName string, name string, deviceID int, size uint64, ext return fmt.Errorf("devicemapper: Can't add node %s", err) } - var cookie uint - if err := task.setCookie(&cookie, 0); err != nil { + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } - defer UdevWait(&cookie) + defer UdevWait(cookie) if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) @@ -750,6 +764,33 @@ func activateDevice(poolName string, name string, deviceID int, size uint64, ext return nil } +// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active. +func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + return fmt.Errorf("devicemapper: Error running deviceCreate (CreateSnapDeviceRaw) %s", err) + } + + return nil +} + // CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { devinfo, _ := GetInfo(baseName) @@ -761,42 +802,15 @@ func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDevice } } - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { + if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil { if doSuspend { - ResumeDevice(baseName) + if err2 := ResumeDevice(baseName); err2 != nil { + return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2) + } } return err } - if err := task.setSector(0); err != nil { - if doSuspend { - ResumeDevice(baseName) - } - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { - if doSuspend { - ResumeDevice(baseName) - } - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawExist = false // reset before the task is run - if err := task.run(); err != nil { - if doSuspend { - ResumeDevice(baseName) - } - // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. - if dmSawExist { - return ErrDeviceIDExists - } - - return fmt.Errorf("devicemapper: Error running deviceCreate (createSnapDevice) %s", err) - - } - if doSuspend { if err := ResumeDevice(baseName); err != nil { return err diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go index 76c97566..b540281f 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go @@ -1,21 +1,49 @@ -// +build linux +// +build linux,cgo package devicemapper import "C" import ( + "fmt" "strings" + + "github.com/sirupsen/logrus" ) +// DevmapperLogger defines methods required to register as a callback for +// logging events recieved from devicemapper. Note that devicemapper will send +// *all* logs regardless to callbacks (including debug logs) so it's +// recommended to not spam the console with the outputs. +type DevmapperLogger interface { + // DMLog is the logging callback containing all of the information from + // devicemapper. The interface is identical to the C libdm counterpart. + DMLog(level int, file string, line int, dmError int, message string) +} + +// dmLogger is the current logger in use that is being forwarded our messages. +var dmLogger DevmapperLogger + +// LogInit changes the logging callback called after processing libdm logs for +// error message information. The default logger simply forwards all logs to +// logrus. Calling LogInit(nil) disables the calling of callbacks. +func LogInit(logger DevmapperLogger) { + dmLogger = logger +} + // Due to the way cgo works this has to be in a separate file, as devmapper.go has // definitions in the cgo block, which is incompatible with using "//export" -// StorageDevmapperLogCallback exports the devmapper log callback for cgo. +// StorageDevmapperLogCallback exports the devmapper log callback for cgo. Note that +// because we are using callbacks, this function will be called for *every* log +// in libdm (even debug ones because there's no way of setting the verbosity +// level for an external logging callback). //export StorageDevmapperLogCallback -func StorageDevmapperLogCallback(level C.int, file *C.char, line C.int, dmErrnoOrClass C.int, message *C.char) { +func StorageDevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) { msg := C.GoString(message) - if level < 7 { + + // Track what errno libdm saw, because the library only gives us 0 or 1. + if level < LogLevelDebug { if strings.Contains(msg, "busy") { dmSawBusy = true } @@ -33,3 +61,61 @@ func StorageDevmapperLogCallback(level C.int, file *C.char, line C.int, dmErrnoO dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) } } + +// DefaultLogger is the default logger used by pkg/devicemapper. It forwards +// all logs that are of higher or equal priority to the given level to the +// corresponding logrus level. +type DefaultLogger struct { + // Level corresponds to the highest libdm level that will be forwarded to + // logrus. In order to change this, register a new DefaultLogger. + Level int +} + +// DMLog is the logging callback containing all of the information from +// devicemapper. The interface is identical to the C libdm counterpart. +func (l DefaultLogger) DMLog(level int, file string, line, dmError int, message string) { + if level <= l.Level { + // Forward the log to the correct logrus level, if allowed by dmLogLevel. + logMsg := fmt.Sprintf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + switch level { + case LogLevelFatal, LogLevelErr: + logrus.Error(logMsg) + case LogLevelWarn: + logrus.Warn(logMsg) + case LogLevelNotice, LogLevelInfo: + logrus.Info(logMsg) + case LogLevelDebug: + logrus.Debug(logMsg) + default: + // Don't drop any "unknown" levels. + logrus.Info(logMsg) + } + } +} + +// registerLogCallback registers our own logging callback function for libdm +// (which is StorageDevmapperLogCallback). +// +// Because libdm only gives us {0,1} error codes we need to parse the logs +// produced by libdm (to set dmSawBusy and so on). Note that by registering a +// callback using StorageDevmapperLogCallback, libdm will no longer output logs to +// stderr so we have to log everything ourselves. None of this handling is +// optional because we depend on log callbacks to parse the logs, and if we +// don't forward the log information we'll be in a lot of trouble when +// debugging things. +func registerLogCallback() { + LogWithErrnoInit() +} + +func init() { + // Use the default logger by default. We only allow LogLevelFatal by + // default, because internally we mask a lot of libdm errors by retrying + // and similar tricks. Also, libdm is very chatty and we don't want to + // worry users for no reason. + dmLogger = DefaultLogger{ + Level: LogLevelFatal, + } + + // Register as early as possible so we don't miss anything. + registerLogCallback() +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go index e37e0205..190d83d4 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go @@ -1,9 +1,9 @@ -// +build linux +// +build linux,cgo package devicemapper /* -#cgo LDFLAGS: -L. -ldevmapper +#define _GNU_SOURCE #include #include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? @@ -12,19 +12,25 @@ extern void StorageDevmapperLogCallback(int level, char *file, int line, int dm_ static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) { - char buffer[256]; - va_list ap; + char *buffer = NULL; + va_list ap; + int ret; - va_start(ap, f); - vsnprintf(buffer, 256, f, ap); - va_end(ap); + va_start(ap, f); + ret = vasprintf(&buffer, f, ap); + va_end(ap); + if (ret < 0) { + // memory allocation failed -- should never happen? + return; + } - StorageDevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); + StorageDevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); + free(buffer); } static void log_with_errno_init() { - dm_log_with_errno_init(log_cb); + dm_log_with_errno_init(log_cb); } */ import "C" @@ -56,7 +62,6 @@ const ( var ( DmGetLibraryVersion = dmGetLibraryVersionFct DmGetNextTarget = dmGetNextTargetFct - DmLogInitVerbose = dmLogInitVerboseFct DmSetDevDir = dmSetDevDirFct DmTaskAddTarget = dmTaskAddTargetFct DmTaskCreate = dmTaskCreateFct @@ -226,10 +231,6 @@ func dmCookieSupportedFct() int { return int(C.dm_cookie_supported()) } -func dmLogInitVerboseFct(level int) { - C.dm_log_init_verbose(C.int(level)) -} - func logWithErrnoInitFct() { C.log_with_errno_init() } diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go index dc361eab..7f793c27 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go @@ -1,14 +1,11 @@ -// +build linux,!libdm_no_deferred_remove +// +build linux,cgo,!libdm_no_deferred_remove package devicemapper -/* -#cgo LDFLAGS: -L. -ldevmapper -#include -*/ +// #include import "C" -// LibraryDeferredRemovalSupport is supported when statically linked. +// LibraryDeferredRemovalSupport tells if the feature is enabled in the build const LibraryDeferredRemovalSupport = true func dmTaskDeferredRemoveFct(task *cdmTask) int { diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go new file mode 100644 index 00000000..7d845089 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go @@ -0,0 +1,6 @@ +// +build linux,cgo,!static_build + +package devicemapper + +// #cgo pkg-config: devmapper +import "C" diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go index 4a6665de..a880fec8 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go @@ -1,8 +1,8 @@ -// +build linux,libdm_no_deferred_remove +// +build linux,cgo,libdm_no_deferred_remove package devicemapper -// LibraryDeferredRemovalsupport is not supported when statically linked. +// LibraryDeferredRemovalSupport tells if the feature is enabled in the build const LibraryDeferredRemovalSupport = false func dmTaskDeferredRemoveFct(task *cdmTask) int { diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go new file mode 100644 index 00000000..cf7f26a4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go @@ -0,0 +1,6 @@ +// +build linux,cgo,static_build + +package devicemapper + +// #cgo pkg-config: --static devmapper +import "C" diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go index 581b57eb..50ea7c48 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go @@ -1,15 +1,16 @@ -// +build linux +// +build linux,cgo package devicemapper import ( - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) func ioctlBlkGetSize64(fd uintptr) (int64, error) { var size int64 - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { return 0, err } return size, nil @@ -20,7 +21,7 @@ func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { r[0] = offset r[1] = length - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { return err } return nil diff --git a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go new file mode 100644 index 00000000..7df7f3d4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go @@ -0,0 +1,20 @@ +// +build linux + +package dmesg + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +// Dmesg returns last messages from the kernel log, up to size bytes +func Dmesg(size int) []byte { + t := uintptr(3) // SYSLOG_ACTION_READ_ALL + b := make([]byte, size) + amt, _, err := unix.Syscall(unix.SYS_SYSLOG, t, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))) + if err != 0 { + return []byte{} + } + return b[:amt] +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go index 6f4a6e61..a129e654 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go @@ -13,98 +13,74 @@ import ( "github.com/sirupsen/logrus" ) -// exclusion returns true if the specified pattern is an exclusion -func exclusion(pattern string) bool { - return pattern[0] == '!' +// PatternMatcher allows checking paths agaist a list of patterns +type PatternMatcher struct { + patterns []*Pattern + exclusions bool } -// empty returns true if the specified pattern is empty -func empty(pattern string) bool { - return pattern == "" -} - -// CleanPatterns takes a slice of patterns returns a new -// slice of patterns cleaned with filepath.Clean, stripped -// of any empty patterns and lets the caller know whether the -// slice contains any exception patterns (prefixed with !). -func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { - // Loop over exclusion patterns and: - // 1. Clean them up. - // 2. Indicate whether we are dealing with any exception rules. - // 3. Error if we see a single exclusion marker on it's own (!). - cleanedPatterns := []string{} - patternDirs := [][]string{} - exceptions := false - for _, pattern := range patterns { +// NewPatternMatcher creates a new matcher object for specific patterns that can +// be used later to match against patterns against paths +func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { + pm := &PatternMatcher{ + patterns: make([]*Pattern, 0, len(patterns)), + } + for _, p := range patterns { // Eliminate leading and trailing whitespace. - pattern = strings.TrimSpace(pattern) - if empty(pattern) { + p = strings.TrimSpace(p) + if p == "" { continue } - if exclusion(pattern) { - if len(pattern) == 1 { - return nil, nil, false, errors.New("Illegal exclusion pattern: !") + p = filepath.Clean(p) + newp := &Pattern{} + if p[0] == '!' { + if len(p) == 1 { + return nil, errors.New("illegal exclusion pattern: \"!\"") } - exceptions = true + newp.exclusion = true + p = p[1:] + pm.exclusions = true } - pattern = filepath.Clean(pattern) - cleanedPatterns = append(cleanedPatterns, pattern) - if exclusion(pattern) { - pattern = pattern[1:] + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(p, "."); err != nil { + return nil, err } - patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator))) + newp.cleanedPattern = p + newp.dirs = strings.Split(p, string(os.PathSeparator)) + pm.patterns = append(pm.patterns, newp) } - - return cleanedPatterns, patternDirs, exceptions, nil + return pm, nil } -// Matches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func Matches(file string, patterns []string) (bool, error) { - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - patterns, patDirs, _, err := CleanPatterns(patterns) - if err != nil { - return false, err - } - - return OptimizedMatches(file, patterns, patDirs) -} - -// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. -// It will assume that the inputs have been preprocessed and therefore the function -// doesn't need to do as much error checking and clean-up. This was done to avoid -// repeating these steps on each file being checked during the archive process. -// The more generic fileutils.Matches() can't make these assumptions. -func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { +// Matches matches path against all the patterns. Matches is not safe to be +// called concurrently +func (pm *PatternMatcher) Matches(file string) (bool, error) { matched := false file = filepath.FromSlash(file) parentPath := filepath.Dir(file) parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - for i, pattern := range patterns { + for _, pattern := range pm.patterns { negative := false - if exclusion(pattern) { + if pattern.exclusion { negative = true - pattern = pattern[1:] } - match, err := regexpMatch(pattern, file) + match, err := pattern.match(file) if err != nil { - return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err) + return false, err } if !match && parentPath != "." { // Check to see if the pattern matches one of our parent dirs. - if len(patDirs[i]) <= len(parentPathDirs) { - match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)), - strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator))) + if len(pattern.dirs) <= len(parentPathDirs) { + match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) } } @@ -120,28 +96,49 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, return matched, nil } -// regexpMatch tries to match the logic of filepath.Match but -// does so using regexp logic. We do this so that we can expand the -// wildcard set to include other things, like "**" to mean any number -// of directories. This means that we should be backwards compatible -// with filepath.Match(). We'll end up supporting more stuff, due to -// the fact that we're using regexp, but that's ok - it does no harm. -// -// As per the comment in golangs filepath.Match, on Windows, escaping -// is disabled. Instead, '\\' is treated as path separator. -func regexpMatch(pattern, path string) (bool, error) { - regStr := "^" +// Exclusions returns true if any of the patterns define exclusions +func (pm *PatternMatcher) Exclusions() bool { + return pm.exclusions +} - // Do some syntax checking on the pattern. - // filepath's Match() has some really weird rules that are inconsistent - // so instead of trying to dup their logic, just call Match() for its - // error state and if there is an error in the pattern return it. - // If this becomes an issue we can remove this since its really only - // needed in the error (syntax) case - which isn't really critical. - if _, err := filepath.Match(pattern, path); err != nil { - return false, err +// Patterns returns array of active patterns +func (pm *PatternMatcher) Patterns() []*Pattern { + return pm.patterns +} + +// Pattern defines a single regexp used used to filter file paths. +type Pattern struct { + cleanedPattern string + dirs []string + regexp *regexp.Regexp + exclusion bool +} + +func (p *Pattern) String() string { + return p.cleanedPattern +} + +// Exclusion returns true if this pattern defines exclusion +func (p *Pattern) Exclusion() bool { + return p.exclusion +} + +func (p *Pattern) match(path string) (bool, error) { + + if p.regexp == nil { + if err := p.compile(); err != nil { + return false, filepath.ErrBadPattern + } } + b := p.regexp.MatchString(path) + + return b, nil +} + +func (p *Pattern) compile() error { + regStr := "^" + pattern := p.cleanedPattern // Go through the pattern and convert it to a regexp. // We use a scanner so we can support utf-8 chars. var scan scanner.Scanner @@ -161,17 +158,19 @@ func regexpMatch(pattern, path string) (bool, error) { // is some flavor of "**" scan.Next() + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + if scan.Peek() == scanner.EOF { // is "**EOF" - to align with .gitignore just accept all regStr += ".*" } else { // is "**" - regStr += "((.*" + escSL + ")|([^" + escSL + "]*))" - } - - // Treat **/ as ** so eat the "/" - if string(scan.Peek()) == sl { - scan.Next() + // Note that this allows for any # of /'s (even 0) because + // the .* will eat everything, even /'s + regStr += "(.*" + escSL + ")?" } } else { // is "*" so map it to anything but "/" @@ -180,7 +179,7 @@ func regexpMatch(pattern, path string) (bool, error) { } else if ch == '?' { // "?" is any char except "/" regStr += "[^" + escSL + "]" - } else if strings.Index(".$", string(ch)) != -1 { + } else if ch == '.' || ch == '$' { // Escape some regexp special chars that have no meaning // in golang's filepath.Match regStr += `\` + string(ch) @@ -206,14 +205,30 @@ func regexpMatch(pattern, path string) (bool, error) { regStr += "$" - res, err := regexp.MatchString(regStr, path) - - // Map regexp's error to filepath's so no one knows we're not using filepath + re, err := regexp.Compile(regStr) if err != nil { - err = filepath.ErrBadPattern + return err } - return res, err + p.regexp = re + return nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + pm, err := NewPatternMatcher(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.Matches(file) } // CopyFile copies from src to dst until either EOF is reached diff --git a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go new file mode 100644 index 00000000..e6094b55 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go @@ -0,0 +1,88 @@ +// +build linux + +package fsutils + +import ( + "fmt" + "io/ioutil" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +func locateDummyIfEmpty(path string) (string, error) { + children, err := ioutil.ReadDir(path) + if err != nil { + return "", err + } + if len(children) != 0 { + return "", nil + } + dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") + if err != nil { + return "", err + } + name := dummyFile.Name() + err = dummyFile.Close() + return name, err +} + +// SupportsDType returns whether the filesystem mounted on path supports d_type +func SupportsDType(path string) (bool, error) { + // locate dummy so that we have at least one dirent + dummy, err := locateDummyIfEmpty(path) + if err != nil { + return false, err + } + if dummy != "" { + defer os.Remove(dummy) + } + + visited := 0 + supportsDType := true + fn := func(ent *unix.Dirent) bool { + visited++ + if ent.Type == unix.DT_UNKNOWN { + supportsDType = false + // stop iteration + return true + } + // continue iteration + return false + } + if err = iterateReadDir(path, fn); err != nil { + return false, err + } + if visited == 0 { + return false, fmt.Errorf("did not hit any dirent during iteration %s", path) + } + return supportsDType, nil +} + +func iterateReadDir(path string, fn func(*unix.Dirent) bool) error { + d, err := os.Open(path) + if err != nil { + return err + } + defer d.Close() + fd := int(d.Fd()) + buf := make([]byte, 4096) + for { + nbytes, err := unix.ReadDirent(fd, buf) + if err != nil { + return err + } + if nbytes == 0 { + break + } + for off := 0; off < nbytes; { + ent := (*unix.Dirent)(unsafe.Pointer(&buf[off])) + if stop := fn(ent); stop { + return nil + } + off += int(ent.Reclen) + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go new file mode 100644 index 00000000..c001fbec --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go @@ -0,0 +1,23 @@ +// +build linux + +package homedir + +import ( + "os" + + "github.com/containers/storage/pkg/idtools" +) + +// GetStatic returns the home directory for the current user without calling +// os/user.Current(). This is useful for static-linked binary on glibc-based +// system, because a call to os/user.Current() in a static binary leads to +// segfault due to a glibc issue that won't be fixed in a short term. +// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) +func GetStatic() (string, error) { + uid := os.Getuid() + usr, err := idtools.LookupUID(uid) + if err != nil { + return "", err + } + return usr.Home, nil +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go new file mode 100644 index 00000000..6b96b856 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go @@ -0,0 +1,13 @@ +// +build !linux + +package homedir + +import ( + "errors" +) + +// GetStatic is not needed for non-linux systems. +// (Precisely, it is needed only for glibc-based linux systems.) +func GetStatic() (string, error) { + return "", errors.New("homedir.GetStatic() is not supported on this system") +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go similarity index 77% rename from vendor/github.com/containers/storage/pkg/homedir/homedir.go rename to vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go index 8154e83f..f2a20ea8 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go @@ -1,8 +1,9 @@ +// +build !windows + package homedir import ( "os" - "runtime" "github.com/opencontainers/runc/libcontainer/user" ) @@ -10,9 +11,6 @@ import ( // Key returns the env var name for the user's home dir based on // the platform being run on func Key() string { - if runtime.GOOS == "windows" { - return "USERPROFILE" - } return "HOME" } @@ -21,7 +19,7 @@ func Key() string { // Returned path should be used with "path/filepath" to form new paths. func Get() string { home := os.Getenv(Key()) - if home == "" && runtime.GOOS != "windows" { + if home == "" { if u, err := user.CurrentUser(); err == nil { return u.Home } @@ -32,8 +30,5 @@ func Get() string { // GetShortcutString returns the string that is shortcut to user's home directory // in the native shell of the platform running on. func GetShortcutString() string { - if runtime.GOOS == "windows" { - return "%USERPROFILE%" // be careful while using in format functions - } return "~" } diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go new file mode 100644 index 00000000..fafdb2bb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go @@ -0,0 +1,24 @@ +package homedir + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + return os.Getenv(Key()) +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go index 6bca4662..68a072db 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -37,49 +37,56 @@ const ( // MkdirAllAs creates a directory (include any along the path) and then modifies // ownership to the requested uid/gid. If the directory already exists, this // function will still change ownership to the requested uid/gid pair. +// Deprecated: Use MkdirAllAndChown func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { return mkdirAs(path, mode, ownerUID, ownerGID, true, true) } -// MkdirAllNewAs creates a directory (include any along the path) and then modifies -// ownership ONLY of newly created directories to the requested uid/gid. If the -// directories along the path exist, no change of ownership will be performed -func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, false) -} - // MkdirAs creates a directory and then modifies ownership to the requested uid/gid. // If the directory already exists, this function still changes ownership +// Deprecated: Use MkdirAndChown with a IDPair func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { return mkdirAs(path, mode, ownerUID, ownerGID, false, true) } +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, true) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, false, true) +} + +// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, false) +} + // GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. // If the maps are empty, then the root uid/gid will default to "real" 0/0 func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - var uid, gid int - - if uidMap != nil { - xUID, err := ToHost(0, uidMap) - if err != nil { - return -1, -1, err - } - uid = xUID + uid, err := toHost(0, uidMap) + if err != nil { + return -1, -1, err } - if gidMap != nil { - xGID, err := ToHost(0, gidMap) - if err != nil { - return -1, -1, err - } - gid = xGID + gid, err := toHost(0, gidMap) + if err != nil { + return -1, -1, err } return uid, gid, nil } -// ToContainer takes an id mapping, and uses it to translate a +// toContainer takes an id mapping, and uses it to translate a // host ID to the remapped ID. If no map is provided, then the translation // assumes a 1-to-1 mapping and returns the passed in id -func ToContainer(hostID int, idMap []IDMap) (int, error) { +func toContainer(hostID int, idMap []IDMap) (int, error) { if idMap == nil { return hostID, nil } @@ -92,10 +99,10 @@ func ToContainer(hostID int, idMap []IDMap) (int, error) { return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) } -// ToHost takes an id mapping and a remapped ID, and translates the +// toHost takes an id mapping and a remapped ID, and translates the // ID to the mapped host ID. If no map is provided, then the translation // assumes a 1-to-1 mapping and returns the passed in id # -func ToHost(contID int, idMap []IDMap) (int, error) { +func toHost(contID int, idMap []IDMap) (int, error) { if idMap == nil { return contID, nil } @@ -108,26 +115,101 @@ func ToHost(contID int, idMap []IDMap) (int, error) { return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) } -// CreateIDMappings takes a requested user and group name and +// IDPair is a UID and GID pair +type IDPair struct { + UID int + GID int +} + +// IDMappings contains a mappings of UIDs and GIDs +type IDMappings struct { + uids []IDMap + gids []IDMap +} + +// NewIDMappings takes a requested user and group name and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair -func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { +func NewIDMappings(username, groupname string) (*IDMappings, error) { subuidRanges, err := parseSubuid(username) if err != nil { - return nil, nil, err + return nil, err } subgidRanges, err := parseSubgid(groupname) if err != nil { - return nil, nil, err + return nil, err } if len(subuidRanges) == 0 { - return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) + return nil, fmt.Errorf("No subuid ranges found for user %q", username) } if len(subgidRanges) == 0 { - return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) + return nil, fmt.Errorf("No subgid ranges found for group %q", groupname) } - return createIDMap(subuidRanges), createIDMap(subgidRanges), nil + return &IDMappings{ + uids: createIDMap(subuidRanges), + gids: createIDMap(subgidRanges), + }, nil +} + +// NewIDMappingsFromMaps creates a new mapping from two slices +// Deprecated: this is a temporary shim while transitioning to IDMapping +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { + return &IDMappings{uids: uids, gids: gids} +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i *IDMappings) RootPair() IDPair { + uid, gid, _ := GetRootUIDGID(i.uids, i.gids) + return IDPair{UID: uid, GID: gid} +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { + var err error + target := i.RootPair() + + if pair.UID != target.UID { + target.UID, err = toHost(pair.UID, i.uids) + if err != nil { + return target, err + } + } + + if pair.GID != target.GID { + target.GID, err = toHost(pair.GID, i.gids) + } + return target, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { + uid, err := toContainer(pair.UID, i.uids) + if err != nil { + return -1, -1, err + } + gid, err := toContainer(pair.GID, i.gids) + return uid, gid, err +} + +// Empty returns true if there are no id mappings +func (i *IDMappings) Empty() bool { + return len(i.uids) == 0 && len(i.gids) == 0 +} + +// UIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) UIDs() []IDMap { + return i.uids +} + +// GIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) GIDs() []IDMap { + return i.gids } func createIDMap(subidRanges ranges) []IDMap { diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go index b2cfb05e..b5870506 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -3,10 +3,21 @@ package idtools import ( + "bytes" + "fmt" + "io" "os" "path/filepath" + "strings" + "sync" "github.com/containers/storage/pkg/system" + "github.com/opencontainers/runc/libcontainer/user" +) + +var ( + entOnce sync.Once + getentCmd string ) func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { @@ -18,11 +29,8 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { paths = []string{path} } else if err == nil && chownExisting { - if err := os.Chown(path, ownerUID, ownerGID); err != nil { - return err - } // short-circuit--we were called with an existing directory and chown was requested - return nil + return os.Chown(path, ownerUID, ownerGID) } else if err == nil { // nothing to do; directory path fully exists already and chown was NOT requested return nil @@ -41,7 +49,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown paths = append(paths, dirPath) } } - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { return err } } else { @@ -58,3 +66,139 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown } return nil } + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +func CanAccess(path string, pair IDPair) bool { + statInfo, err := system.Stat(path) + if err != nil { + return false + } + fileMode := os.FileMode(statInfo.Mode()) + permBits := fileMode.Perm() + return accessible(statInfo.UID() == uint32(pair.UID), + statInfo.GID() == uint32(pair.GID), permBits) +} + +func accessible(isOwner, isGroup bool, perms os.FileMode) bool { + if isOwner && (perms&0100 == 0100) { + return true + } + if isGroup && (perms&0010 == 0010) { + return true + } + if perms&0001 == 0001 { + return true + } + return false +} + +// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUser(username string) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUser(username) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) + if err != nil { + return user.User{}, err + } + return usr, nil +} + +// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUID(uid int) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUid(uid) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) +} + +func getentUser(args string) (user.User, error) { + reader, err := callGetent(args) + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswd(reader) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) + } + return users[0], nil +} + +// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGroup(groupname string) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGroup(groupname) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) +} + +// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGID(gid int) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGid(gid) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %d", "group", gid)) +} + +func getentGroup(args string) (user.Group, error) { + reader, err := callGetent(args) + if err != nil { + return user.Group{}, err + } + groups, err := user.ParseGroup(reader) + if err != nil { + return user.Group{}, err + } + if len(groups) == 0 { + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) + } + return groups[0], nil +} + +func callGetent(args string) (io.Reader, error) { + entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) + // if no `getent` command on host, can't do anything else + if getentCmd == "" { + return nil, fmt.Errorf("") + } + out, err := execCmd(getentCmd, args) + if err != nil { + exitCode, errC := system.GetExitCode(err) + if errC != nil { + return nil, err + } + switch exitCode { + case 1: + return nil, fmt.Errorf("getent reported invalid parameters/database unknown") + case 2: + terms := strings.Split(args, " ") + return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) + case 3: + return nil, fmt.Errorf("getent database doesn't support enumeration") + default: + return nil, err + } + + } + return bytes.NewReader(out), nil +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go index 0cad1736..dbf6bc4c 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go @@ -11,8 +11,15 @@ import ( // Platforms such as Windows do not support the UID/GID concept. So make this // just a wrapper around system.MkdirAll. func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { return err } return nil } + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +// Windows does not require/support this function, so always return true +func CanAccess(path string, pair IDPair) bool { + return true +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go index 4a4aaed0..9da7975e 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go @@ -2,8 +2,6 @@ package idtools import ( "fmt" - "os/exec" - "path/filepath" "regexp" "sort" "strconv" @@ -33,23 +31,6 @@ var ( userMod = "usermod" ) -func resolveBinary(binname string) (string, error) { - binaryPath, err := exec.LookPath(binname) - if err != nil { - return "", err - } - resolvedPath, err := filepath.EvalSymlinks(binaryPath) - if err != nil { - return "", err - } - //only return no error if the final resolved binary basename - //matches what was searched for - if filepath.Base(resolvedPath) == binname { - return resolvedPath, nil - } - return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) -} - // AddNamespaceRangesUser takes a username and uses the standard system // utility to create a system user/group pair used to hold the // /etc/sub{uid,gid} ranges which will be used for user namespace @@ -181,8 +162,3 @@ func wouldOverlap(arange subIDRange, ID int) bool { } return false } - -func execCmd(cmd, args string) ([]byte, error) { - execCmd := exec.Command(cmd, strings.Split(args, " ")...) - return execCmd.CombinedOutput() -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go new file mode 100644 index 00000000..9703ecbd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go @@ -0,0 +1,32 @@ +// +build !windows + +package idtools + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +func execCmd(cmd, args string) ([]byte, error) { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + return execCmd.CombinedOutput() +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go new file mode 100644 index 00000000..72a04f34 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go @@ -0,0 +1,186 @@ +package ioutils + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// minCap is the lowest capacity to use in byte slices that buffer data +const minCap = 64 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +var ( + // ErrClosed is returned when Write is called on a closed BytesPipe. + ErrClosed = errors.New("write to closed BytesPipe") + + bufPools = make(map[int]*sync.Pool) + bufPoolsLock sync.Mutex +) + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe() *BytesPipe { + bp := &BytesPipe{} + bp.buf = append(bp.buf, getBuffer(minCap)) + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + + written := 0 +loop0: + for { + if bp.closeErr != nil { + bp.mu.Unlock() + return written, ErrClosed + } + + if len(bp.buf) == 0 { + bp.buf = append(bp.buf, getBuffer(64)) + } + // get the last buffer + b := bp.buf[len(bp.buf)-1] + + n, err := b.Write(p) + written += n + bp.bufLen += n + + // errBufferFull is an error we expect to get if the buffer is full + if err != nil && err != errBufferFull { + bp.wait.Broadcast() + bp.mu.Unlock() + return written, err + } + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // make sure the buffer doesn't grow too big from this write + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } + } + + // add new byte slice to the buffers slice and continue writing + nextCap := b.Cap() * 2 + if nextCap > maxCap { + nextCap = maxCap + } + bp.buf = append(bp.buf, getBuffer(nextCap)) + } + bp.wait.Broadcast() + bp.mu.Unlock() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + if bp.bufLen == 0 { + if bp.closeErr != nil { + bp.mu.Unlock() + return 0, bp.closeErr + } + bp.wait.Wait() + if bp.bufLen == 0 && bp.closeErr != nil { + err := bp.closeErr + bp.mu.Unlock() + return 0, err + } + } + + for bp.bufLen > 0 { + b := bp.buf[0] + read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error + n += read + bp.bufLen -= read + + if b.Len() == 0 { + // it's empty so return it to the pool and move to the next one + returnBuffer(b) + bp.buf[0] = nil + bp.buf = bp.buf[1:] + } + + if len(p) == read { + break + } + + p = p[read:] + } + + bp.wait.Broadcast() + bp.mu.Unlock() + return +} + +func returnBuffer(b *fixedBuffer) { + b.Reset() + bufPoolsLock.Lock() + pool := bufPools[b.Cap()] + bufPoolsLock.Unlock() + if pool != nil { + pool.Put(b) + } +} + +func getBuffer(size int) *fixedBuffer { + bufPoolsLock.Lock() + pool, ok := bufPools[size] + if !ok { + pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + bufPools[size] = pool + } + bufPoolsLock.Unlock() + return pool.Get().(*fixedBuffer) +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fmt.go b/vendor/github.com/containers/storage/pkg/ioutils/fmt.go deleted file mode 100644 index 0b04b0ba..00000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/fmt.go +++ /dev/null @@ -1,22 +0,0 @@ -package ioutils - -import ( - "fmt" - "io" -) - -// FprintfIfNotEmpty prints the string value if it's not empty -func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { - if value != "" { - return fmt.Fprintf(w, format, value) - } - return 0, nil -} - -// FprintfIfTrue prints the boolean value if it's true -func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { - if ok { - return fmt.Fprintf(w, format, ok) - } - return 0, nil -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go index 6dc50a03..a56c4626 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go @@ -80,3 +80,83 @@ func (w *atomicFileWriter) Close() (retErr error) { } return nil } + +// AtomicWriteSet is used to atomically write a set +// of files and ensure they are visible at the same time. +// Must be committed to a new directory. +type AtomicWriteSet struct { + root string +} + +// NewAtomicWriteSet creates a new atomic write set to +// atomically create a set of files. The given directory +// is used as the base directory for storing files before +// commit. If no temporary directory is given the system +// default is used. +func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { + td, err := ioutil.TempDir(tmpDir, "write-set-") + if err != nil { + return nil, err + } + + return &AtomicWriteSet{ + root: td, + }, nil +} + +// WriteFile writes a file to the set, guaranteeing the file +// has been synced. +func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type syncFileCloser struct { + *os.File +} + +func (w syncFileCloser) Close() error { + err := w.File.Sync() + if err1 := w.File.Close(); err == nil { + err = err1 + } + return err +} + +// FileWriter opens a file writer inside the set. The file +// should be synced and closed before calling commit. +func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) + if err != nil { + return nil, err + } + return syncFileCloser{f}, nil +} + +// Cancel cancels the set and removes all temporary data +// created in the set. +func (ws *AtomicWriteSet) Cancel() error { + return os.RemoveAll(ws.root) +} + +// Commit moves all created files to the target directory. The +// target directory must not exist and the parent of the target +// directory must exist. +func (ws *AtomicWriteSet) Commit(target string) error { + return os.Rename(ws.root, target) +} + +// String returns the location the set is writing to. +func (ws *AtomicWriteSet) String() string { + return ws.root +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/multireader.go b/vendor/github.com/containers/storage/pkg/ioutils/multireader.go deleted file mode 100644 index 0d2d76b4..00000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/multireader.go +++ /dev/null @@ -1,226 +0,0 @@ -package ioutils - -import ( - "bytes" - "fmt" - "io" - "os" -) - -type pos struct { - idx int - offset int64 -} - -type multiReadSeeker struct { - readers []io.ReadSeeker - pos *pos - posIdx map[io.ReadSeeker]int -} - -func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { - var tmpOffset int64 - switch whence { - case os.SEEK_SET: - for i, rdr := range r.readers { - // get size of the current reader - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - if offset > tmpOffset+s { - if i == len(r.readers)-1 { - rdrOffset := s + (offset - tmpOffset) - if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { - return -1, err - } - r.pos = &pos{i, rdrOffset} - return offset, nil - } - - tmpOffset += s - continue - } - - rdrOffset := offset - tmpOffset - idx := i - - rdr.Seek(rdrOffset, os.SEEK_SET) - // make sure all following readers are at 0 - for _, rdr := range r.readers[i+1:] { - rdr.Seek(0, os.SEEK_SET) - } - - if rdrOffset == s && i != len(r.readers)-1 { - idx++ - rdrOffset = 0 - } - r.pos = &pos{idx, rdrOffset} - return offset, nil - } - case os.SEEK_END: - for _, rdr := range r.readers { - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - tmpOffset += s - } - r.Seek(tmpOffset+offset, os.SEEK_SET) - return tmpOffset + offset, nil - case os.SEEK_CUR: - if r.pos == nil { - return r.Seek(offset, os.SEEK_SET) - } - // Just return the current offset - if offset == 0 { - return r.getCurOffset() - } - - curOffset, err := r.getCurOffset() - if err != nil { - return -1, err - } - rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) - if err != nil { - return -1, err - } - - r.pos = &pos{r.posIdx[rdr], rdrOffset} - return curOffset + offset, nil - default: - return -1, fmt.Errorf("Invalid whence: %d", whence) - } - - return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) -} - -func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { - var rdr io.ReadSeeker - var rdrOffset int64 - - for i, rdr := range r.readers { - offsetTo, err := r.getOffsetToReader(rdr) - if err != nil { - return nil, -1, err - } - if offsetTo > offset { - rdr = r.readers[i-1] - rdrOffset = offsetTo - offset - break - } - - if rdr == r.readers[len(r.readers)-1] { - rdrOffset = offsetTo + offset - break - } - } - - return rdr, rdrOffset, nil -} - -func (r *multiReadSeeker) getCurOffset() (int64, error) { - var totalSize int64 - for _, rdr := range r.readers[:r.pos.idx+1] { - if r.posIdx[rdr] == r.pos.idx { - totalSize += r.pos.offset - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, fmt.Errorf("error getting seeker size: %v", err) - } - totalSize += size - } - return totalSize, nil -} - -func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { - var offset int64 - for _, r := range r.readers { - if r == rdr { - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, err - } - offset += size - } - return offset, nil -} - -func (r *multiReadSeeker) Read(b []byte) (int, error) { - if r.pos == nil { - r.pos = &pos{0, 0} - } - - bCap := int64(cap(b)) - buf := bytes.NewBuffer(nil) - var rdr io.ReadSeeker - - for _, rdr = range r.readers[r.pos.idx:] { - readBytes, err := io.CopyN(buf, rdr, bCap) - if err != nil && err != io.EOF { - return -1, err - } - bCap -= readBytes - - if bCap == 0 { - break - } - } - - rdrPos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - r.pos = &pos{r.posIdx[rdr], rdrPos} - return buf.Read(b) -} - -func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { - // save the current position - pos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - - // get the size - size, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - // reset the position - if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { - return -1, err - } - return size, nil -} - -// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided -// input readseekers. After calling this method the initial position is set to the -// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances -// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. -// Seek can be used over the sum of lengths of all readseekers. -// -// When a MultiReadSeeker is used, no Read and Seek operations should be made on -// its ReadSeeker components. Also, users should make no assumption on the state -// of individual readseekers while the MultiReadSeeker is used. -func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { - if len(readers) == 1 { - return readers[0] - } - idx := make(map[io.ReadSeeker]int) - for i, rdr := range readers { - idx[rdr] = i - } - return &multiReadSeeker{ - readers: readers, - posIdx: idx, - } -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/readers.go b/vendor/github.com/containers/storage/pkg/ioutils/readers.go index 5a61e6bd..63f3c07f 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/readers.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/readers.go @@ -4,6 +4,8 @@ import ( "crypto/sha256" "encoding/hex" "io" + + "golang.org/x/net/context" ) type readCloserWrapper struct { @@ -81,3 +83,72 @@ func (r *OnEOFReader) runFunc() { r.Fn = nil } } + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/locker/README.md b/vendor/github.com/containers/storage/pkg/locker/README.md new file mode 100644 index 00000000..ad15e89a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however, the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/containers/storage/pkg/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + s.mu.Lock() + i.data[name] = data + s.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modifying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/vendor/github.com/containers/storage/pkg/locker/locker.go b/vendor/github.com/containers/storage/pkg/locker/locker.go new file mode 100644 index 00000000..0b22ddfa --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go index f166cb2f..5f76f331 100644 --- a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go @@ -45,4 +45,5 @@ const ( RELATIME = 0 REMOUNT = 0 STRICTATIME = 0 + mntDetach = 0 ) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_linux.go b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go index dc696dce..0425d0dd 100644 --- a/vendor/github.com/containers/storage/pkg/mount/flags_linux.go +++ b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go @@ -1,85 +1,87 @@ package mount import ( - "syscall" + "golang.org/x/sys/unix" ) const ( // RDONLY will mount the file system read-only. - RDONLY = syscall.MS_RDONLY + RDONLY = unix.MS_RDONLY // NOSUID will not allow set-user-identifier or set-group-identifier bits to // take effect. - NOSUID = syscall.MS_NOSUID + NOSUID = unix.MS_NOSUID // NODEV will not interpret character or block special devices on the file // system. - NODEV = syscall.MS_NODEV + NODEV = unix.MS_NODEV // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = syscall.MS_NOEXEC + NOEXEC = unix.MS_NOEXEC // SYNCHRONOUS will allow I/O to the file system to be done synchronously. - SYNCHRONOUS = syscall.MS_SYNCHRONOUS + SYNCHRONOUS = unix.MS_SYNCHRONOUS // DIRSYNC will force all directory updates within the file system to be done // synchronously. This affects the following system calls: create, link, // unlink, symlink, mkdir, rmdir, mknod and rename. - DIRSYNC = syscall.MS_DIRSYNC + DIRSYNC = unix.MS_DIRSYNC // REMOUNT will attempt to remount an already-mounted file system. This is // commonly used to change the mount flags for a file system, especially to // make a readonly file system writeable. It does not change device or mount // point. - REMOUNT = syscall.MS_REMOUNT + REMOUNT = unix.MS_REMOUNT // MANDLOCK will force mandatory locks on a filesystem. - MANDLOCK = syscall.MS_MANDLOCK + MANDLOCK = unix.MS_MANDLOCK // NOATIME will not update the file access time when reading from a file. - NOATIME = syscall.MS_NOATIME + NOATIME = unix.MS_NOATIME // NODIRATIME will not update the directory access time. - NODIRATIME = syscall.MS_NODIRATIME + NODIRATIME = unix.MS_NODIRATIME // BIND remounts a subtree somewhere else. - BIND = syscall.MS_BIND + BIND = unix.MS_BIND // RBIND remounts a subtree and all possible submounts somewhere else. - RBIND = syscall.MS_BIND | syscall.MS_REC + RBIND = unix.MS_BIND | unix.MS_REC // UNBINDABLE creates a mount which cannot be cloned through a bind operation. - UNBINDABLE = syscall.MS_UNBINDABLE + UNBINDABLE = unix.MS_UNBINDABLE // RUNBINDABLE marks the entire mount tree as UNBINDABLE. - RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC + RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC // PRIVATE creates a mount which carries no propagation abilities. - PRIVATE = syscall.MS_PRIVATE + PRIVATE = unix.MS_PRIVATE // RPRIVATE marks the entire mount tree as PRIVATE. - RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC + RPRIVATE = unix.MS_PRIVATE | unix.MS_REC // SLAVE creates a mount which receives propagation from its master, but not // vice versa. - SLAVE = syscall.MS_SLAVE + SLAVE = unix.MS_SLAVE // RSLAVE marks the entire mount tree as SLAVE. - RSLAVE = syscall.MS_SLAVE | syscall.MS_REC + RSLAVE = unix.MS_SLAVE | unix.MS_REC // SHARED creates a mount which provides the ability to create mirrors of // that mount such that mounts and unmounts within any of the mirrors // propagate to the other mirrors. - SHARED = syscall.MS_SHARED + SHARED = unix.MS_SHARED // RSHARED marks the entire mount tree as SHARED. - RSHARED = syscall.MS_SHARED | syscall.MS_REC + RSHARED = unix.MS_SHARED | unix.MS_REC // RELATIME updates inode access times relative to modify or change time. - RELATIME = syscall.MS_RELATIME + RELATIME = unix.MS_RELATIME // STRICTATIME allows to explicitly request full atime updates. This makes // it possible for the kernel to default to relatime or noatime but still // allow userspace to override it. - STRICTATIME = syscall.MS_STRICTATIME + STRICTATIME = unix.MS_STRICTATIME + + mntDetach = unix.MNT_DETACH ) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go index 5564f7b3..9ed741e3 100644 --- a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go @@ -27,4 +27,5 @@ const ( STRICTATIME = 0 SYNCHRONOUS = 0 RDONLY = 0 + mntDetach = 0 ) diff --git a/vendor/github.com/containers/storage/pkg/mount/mount.go b/vendor/github.com/containers/storage/pkg/mount/mount.go index 66ac4bf4..d3caa16b 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mount.go +++ b/vendor/github.com/containers/storage/pkg/mount/mount.go @@ -1,7 +1,11 @@ package mount import ( + "sort" + "strings" "time" + + "github.com/containers/storage/pkg/fileutils" ) // GetMounts retrieves a list of mounts for the current running process. @@ -17,6 +21,10 @@ func Mounted(mountpoint string) (bool, error) { return false, err } + mountpoint, err = fileutils.ReadSymlinkedDirectory(mountpoint) + if err != nil { + return false, err + } // Search the table for the mountpoint for _, e := range entries { if e.Mountpoint == mountpoint { @@ -46,13 +54,11 @@ func Mount(device, target, mType, options string) error { // flags.go for supported option flags. func ForceMount(device, target, mType, options string) error { flag, data := parseOptions(options) - if err := mount(device, target, mType, uintptr(flag), data); err != nil { - return err - } - return nil + return mount(device, target, mType, uintptr(flag), data) } -// Unmount will unmount the target filesystem, so long as it is mounted. +// Unmount lazily unmounts a filesystem on supported platforms, otherwise +// does a normal unmount. func Unmount(target string) error { if mounted, err := Mounted(target); err != nil || !mounted { return err @@ -60,6 +66,32 @@ func Unmount(target string) error { return ForceUnmount(target) } +// RecursiveUnmount unmounts the target and all mounts underneath, starting with +// the deepsest mount first. +func RecursiveUnmount(target string) error { + mounts, err := GetMounts() + if err != nil { + return err + } + + // Make the deepest mount be first + sort.Sort(sort.Reverse(byMountpoint(mounts))) + + for i, m := range mounts { + if !strings.HasPrefix(m.Mountpoint, target) { + continue + } + if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 { + if mounted, err := Mounted(m.Mountpoint); err != nil || mounted { + return err + } + // Ignore errors for submounts and continue trying to unmount others + // The final unmount should fail if there ane any submounts remaining + } + } + return nil +} + // ForceUnmount will force an unmount of the target filesystem, regardless if // it is mounted or not. func ForceUnmount(target string) (err error) { @@ -70,5 +102,5 @@ func ForceUnmount(target string) (err error) { } time.Sleep(100 * time.Millisecond) } - return + return nil } diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go index bb870e6f..814896cc 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go @@ -13,8 +13,9 @@ import "C" import ( "fmt" "strings" - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) func allocateIOVecs(options []string) []C.struct_iovec { @@ -55,5 +56,5 @@ func mount(device, target, mType string, flag uintptr, data string) error { } func unmount(target string, flag int) error { - return syscall.Unmount(target, flag) + return unix.Unmount(target, flag) } diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go index dd4280c7..39c36d47 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go @@ -1,21 +1,57 @@ package mount import ( - "syscall" + "golang.org/x/sys/unix" ) -func mount(device, target, mType string, flag uintptr, data string) error { - if err := syscall.Mount(device, target, mType, flag, data); err != nil { - return err +const ( + // ptypes is the set propagation types. + ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE + + // pflags is the full set valid flags for a change propagation call. + pflags = ptypes | unix.MS_REC | unix.MS_SILENT + + // broflags is the combination of bind and read only + broflags = unix.MS_BIND | unix.MS_RDONLY +) + +// isremount returns true if either device name or flags identify a remount request, false otherwise. +func isremount(device string, flags uintptr) bool { + switch { + // We treat device "" and "none" as a remount request to provide compatibility with + // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. + case flags&unix.MS_REMOUNT != 0, device == "", device == "none": + return true + default: + return false + } +} + +func mount(device, target, mType string, flags uintptr, data string) error { + oflags := flags &^ ptypes + if !isremount(device, flags) || data != "" { + // Initial call applying all non-propagation flags for mount + // or remount with changed data + if err := unix.Mount(device, target, mType, oflags, data); err != nil { + return err + } } - // If we have a bind mount or remount, remount... - if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY { - return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data) + if flags&ptypes != 0 { + // Change the propagation type. + if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { + return err + } } + + if oflags&broflags == broflags { + // Remount the bind to apply read only. + return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "") + } + return nil } func unmount(target string, flag int) error { - return syscall.Unmount(target, flag) + return unix.Unmount(target, flag) } diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go b/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go index c684aa81..48b86771 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go @@ -3,8 +3,9 @@ package mount import ( - "golang.org/x/sys/unix" "unsafe" + + "golang.org/x/sys/unix" ) // #include diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go index e3fc3535..ff4cc1d8 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mountinfo.go +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go @@ -38,3 +38,17 @@ type Info struct { // VfsOpts represents per super block options. VfsOpts string } + +type byMountpoint []*Info + +func (by byMountpoint) Len() int { + return len(by) +} + +func (by byMountpoint) Less(i, j int) bool { + return by[i].Mountpoint < by[j].Mountpoint +} + +func (by byMountpoint) Swap(i, j int) { + by[i], by[j] = by[j], by[i] +} diff --git a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go new file mode 100644 index 00000000..09f6b03c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go @@ -0,0 +1,58 @@ +// +build solaris + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + // TODO: Solaris does not support bind mounts. + // Evaluate lofs and also look at the relevant + // mount flags to be supported. + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go index 54a89d28..76e1e499 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go @@ -1,4 +1,4 @@ -// +build linux freebsd solaris +// +build linux freebsd solaris openbsd // Package kernel provides helper function to get, parse and compare kernel // versions for different platforms. @@ -6,6 +6,8 @@ package kernel import ( "bytes" + + "github.com/sirupsen/logrus" ) // GetKernelVersion gets the current kernel version. @@ -28,3 +30,16 @@ func GetKernelVersion() (*VersionInfo, error) { return ParseRelease(string(release)) } + +// CheckKernelVersion checks if current kernel is newer than (or equal to) +// the given version. +func CheckKernelVersion(k, major, minor int) bool { + if v, err := GetKernelVersion(); err != nil { + logrus.Warnf("error getting kernel version: %s", err) + } else { + if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { + return false + } + } + return true +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go index 80fab8ff..e5986727 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go @@ -4,8 +4,9 @@ package kernel import ( "fmt" - "syscall" "unsafe" + + "golang.org/x/sys/windows" ) // VersionInfo holds information about the kernel. @@ -24,28 +25,28 @@ func (k *VersionInfo) String() string { func GetKernelVersion() (*VersionInfo, error) { var ( - h syscall.Handle + h windows.Handle dwVersion uint32 err error ) KVI := &VersionInfo{"Unknown", 0, 0, 0} - if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, - syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + if err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, + windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), 0, - syscall.KEY_READ, + windows.KEY_READ, &h); err != nil { return KVI, err } - defer syscall.RegCloseKey(h) + defer windows.RegCloseKey(h) var buf [1 << 10]uint16 var typ uint32 n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 - if err = syscall.RegQueryValueEx(h, - syscall.StringToUTF16Ptr("BuildLabEx"), + if err = windows.RegQueryValueEx(h, + windows.StringToUTF16Ptr("BuildLabEx"), nil, &typ, (*byte)(unsafe.Pointer(&buf[0])), @@ -53,11 +54,11 @@ func GetKernelVersion() (*VersionInfo, error) { return KVI, err } - KVI.kvi = syscall.UTF16ToString(buf[:]) + KVI.kvi = windows.UTF16ToString(buf[:]) // Important - docker.exe MUST be manifested for this API to return // the correct information. - if dwVersion, err = syscall.GetVersion(); err != nil { + if dwVersion, err = windows.GetVersion(); err != nil { return KVI, err } diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go index bb9b3264..e913fad0 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go @@ -1,18 +1,16 @@ package kernel -import ( - "syscall" -) +import "golang.org/x/sys/unix" // Utsname represents the system name structure. -// It is passthrough for syscall.Utsname in order to make it portable with +// It is passthrough for unix.Utsname in order to make it portable with // other platforms where it is not available. -type Utsname syscall.Utsname +type Utsname unix.Utsname -func uname() (*syscall.Utsname, error) { - uts := &syscall.Utsname{} +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} - if err := syscall.Uname(uts); err != nil { + if err := unix.Uname(uts); err != nil { return nil, err } return uts, nil diff --git a/vendor/github.com/containers/storage/pkg/plugins/client.go b/vendor/github.com/containers/storage/pkg/plugins/client.go deleted file mode 100644 index b4c31c05..00000000 --- a/vendor/github.com/containers/storage/pkg/plugins/client.go +++ /dev/null @@ -1,188 +0,0 @@ -package plugins - -import ( - "bytes" - "encoding/json" - "io" - "io/ioutil" - "net/http" - "net/url" - "time" - - "github.com/containers/storage/pkg/plugins/transport" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/sirupsen/logrus" -) - -const ( - defaultTimeOut = 30 -) - -// NewClient creates a new plugin client (http). -func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) { - tr := &http.Transport{} - - if tlsConfig != nil { - c, err := tlsconfig.Client(*tlsConfig) - if err != nil { - return nil, err - } - tr.TLSClientConfig = c - } - - u, err := url.Parse(addr) - if err != nil { - return nil, err - } - socket := u.Host - if socket == "" { - // valid local socket addresses have the host empty. - socket = u.Path - } - if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil { - return nil, err - } - scheme := httpScheme(u) - - clientTransport := transport.NewHTTPTransport(tr, scheme, socket) - return NewClientWithTransport(clientTransport), nil -} - -// NewClientWithTransport creates a new plugin client with a given transport. -func NewClientWithTransport(tr transport.Transport) *Client { - return &Client{ - http: &http.Client{ - Transport: tr, - }, - requestFactory: tr, - } -} - -// Client represents a plugin client. -type Client struct { - http *http.Client // http client to use - requestFactory transport.RequestFactory -} - -// Call calls the specified method with the specified arguments for the plugin. -// It will retry for 30 seconds if a failure occurs when calling. -func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error { - var buf bytes.Buffer - if args != nil { - if err := json.NewEncoder(&buf).Encode(args); err != nil { - return err - } - } - body, err := c.callWithRetry(serviceMethod, &buf, true) - if err != nil { - return err - } - defer body.Close() - if ret != nil { - if err := json.NewDecoder(body).Decode(&ret); err != nil { - logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) - return err - } - } - return nil -} - -// Stream calls the specified method with the specified arguments for the plugin and returns the response body -func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) { - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(args); err != nil { - return nil, err - } - return c.callWithRetry(serviceMethod, &buf, true) -} - -// SendFile calls the specified method, and passes through the IO stream -func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error { - body, err := c.callWithRetry(serviceMethod, data, true) - if err != nil { - return err - } - defer body.Close() - if err := json.NewDecoder(body).Decode(&ret); err != nil { - logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) - return err - } - return nil -} - -func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) { - req, err := c.requestFactory.NewRequest(serviceMethod, data) - if err != nil { - return nil, err - } - - var retries int - start := time.Now() - - for { - resp, err := c.http.Do(req) - if err != nil { - if !retry { - return nil, err - } - - timeOff := backoff(retries) - if abort(start, timeOff) { - return nil, err - } - retries++ - logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) - time.Sleep(timeOff) - continue - } - - if resp.StatusCode != http.StatusOK { - b, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} - } - - // Plugins' Response(s) should have an Err field indicating what went - // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just - // return the string(body) - type responseErr struct { - Err string - } - remoteErr := responseErr{} - if err := json.Unmarshal(b, &remoteErr); err == nil { - if remoteErr.Err != "" { - return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} - } - } - // old way... - return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} - } - return resp.Body, nil - } -} - -func backoff(retries int) time.Duration { - b, max := 1, defaultTimeOut - for b < max && retries > 0 { - b *= 2 - retries-- - } - if b > max { - b = max - } - return time.Duration(b) * time.Second -} - -func abort(start time.Time, timeOff time.Duration) bool { - return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second -} - -func httpScheme(u *url.URL) string { - scheme := u.Scheme - if scheme != "https" { - scheme = "http" - } - return scheme -} diff --git a/vendor/github.com/containers/storage/pkg/plugins/discovery.go b/vendor/github.com/containers/storage/pkg/plugins/discovery.go deleted file mode 100644 index 4cb5a1a3..00000000 --- a/vendor/github.com/containers/storage/pkg/plugins/discovery.go +++ /dev/null @@ -1,132 +0,0 @@ -package plugins - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/url" - "os" - "path/filepath" - "strings" - "sync" -) - -var ( - // ErrNotFound plugin not found - ErrNotFound = errors.New("plugin not found") - socketsPath = "/run/containers/storage/plugins" - specsPaths = []string{"/etc/containers/storage/plugins", "/usr/lib/containers/storage/plugins"} -) - -// localRegistry defines a registry that is local (using unix socket). -type localRegistry struct{} - -func newLocalRegistry() localRegistry { - return localRegistry{} -} - -// Scan scans all the plugin paths and returns all the names it found -func Scan() ([]string, error) { - var names []string - if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return nil - } - - if fi.Mode()&os.ModeSocket != 0 { - name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) - names = append(names, name) - } - return nil - }); err != nil { - return nil, err - } - - for _, path := range specsPaths { - if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error { - if err != nil || fi.IsDir() { - return nil - } - name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) - names = append(names, name) - return nil - }); err != nil { - return nil, err - } - } - return names, nil -} - -// Plugin returns the plugin registered with the given name (or returns an error). -func (l *localRegistry) Plugin(name string) (*Plugin, error) { - socketpaths := pluginPaths(socketsPath, name, ".sock") - - for _, p := range socketpaths { - if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { - return NewLocalPlugin(name, "unix://"+p), nil - } - } - - var txtspecpaths []string - for _, p := range specsPaths { - txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) - txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) - } - - for _, p := range txtspecpaths { - if _, err := os.Stat(p); err == nil { - if strings.HasSuffix(p, ".json") { - return readPluginJSONInfo(name, p) - } - return readPluginInfo(name, p) - } - } - return nil, ErrNotFound -} - -func readPluginInfo(name, path string) (*Plugin, error) { - content, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - addr := strings.TrimSpace(string(content)) - - u, err := url.Parse(addr) - if err != nil { - return nil, err - } - - if len(u.Scheme) == 0 { - return nil, fmt.Errorf("Unknown protocol") - } - - return NewLocalPlugin(name, addr), nil -} - -func readPluginJSONInfo(name, path string) (*Plugin, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - var p Plugin - if err := json.NewDecoder(f).Decode(&p); err != nil { - return nil, err - } - p.name = name - if len(p.TLSConfig.CAFile) == 0 { - p.TLSConfig.InsecureSkipVerify = true - } - p.activateWait = sync.NewCond(&sync.Mutex{}) - - return &p, nil -} - -func pluginPaths(base, name, ext string) []string { - return []string{ - filepath.Join(base, name+ext), - filepath.Join(base, name, name+ext), - } -} diff --git a/vendor/github.com/containers/storage/pkg/plugins/errors.go b/vendor/github.com/containers/storage/pkg/plugins/errors.go deleted file mode 100644 index 79884710..00000000 --- a/vendor/github.com/containers/storage/pkg/plugins/errors.go +++ /dev/null @@ -1,33 +0,0 @@ -package plugins - -import ( - "fmt" - "net/http" -) - -type statusError struct { - status int - method string - err string -} - -// Error returns a formatted string for this error type -func (e *statusError) Error() string { - return fmt.Sprintf("%s: %v", e.method, e.err) -} - -// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin -func IsNotFound(err error) bool { - return isStatusError(err, http.StatusNotFound) -} - -func isStatusError(err error, status int) bool { - if err == nil { - return false - } - e, ok := err.(*statusError) - if !ok { - return false - } - return e.status == status -} diff --git a/vendor/github.com/containers/storage/pkg/plugins/plugins.go b/vendor/github.com/containers/storage/pkg/plugins/plugins.go deleted file mode 100644 index e197c3fd..00000000 --- a/vendor/github.com/containers/storage/pkg/plugins/plugins.go +++ /dev/null @@ -1,271 +0,0 @@ -// Package plugins provides structures and helper functions to manage Docker -// plugins. -// -// Storage discovers plugins by looking for them in the plugin directory whenever -// a user or container tries to use one by name. UNIX domain socket files must -// be located under /run/containers/storage/plugins, whereas spec files can be -// located either under /etc/containers/storage/plugins or -// /usr/lib/containers/storage/plugins. This is handled by the Registry -// interface, which lets you list all plugins or get a plugin by its name if it -// exists. -// -// The plugins need to implement an HTTP server and bind this to the UNIX socket -// or the address specified in the spec files. -// A handshake is send at /Plugin.Activate, and plugins are expected to return -// a Manifest with a list of subsystems which this plugin implements. As of -// this writing, the known subsystem is "GraphDriver". -// -// In order to use a plugins, you can use the ``Get`` with the name of the -// plugin and the subsystem it implements. -// -// plugin, err := plugins.Get("example", "VolumeDriver") -// if err != nil { -// return fmt.Errorf("Error looking up volume plugin example: %v", err) -// } -package plugins - -import ( - "errors" - "sync" - "time" - - "github.com/docker/go-connections/tlsconfig" - "github.com/sirupsen/logrus" -) - -var ( - // ErrNotImplements is returned if the plugin does not implement the requested driver. - ErrNotImplements = errors.New("Plugin does not implement the requested driver") -) - -type plugins struct { - sync.Mutex - plugins map[string]*Plugin -} - -var ( - storage = plugins{plugins: make(map[string]*Plugin)} - extpointHandlers = make(map[string]func(string, *Client)) -) - -// Manifest lists what a plugin implements. -type Manifest struct { - // List of subsystem the plugin implements. - Implements []string -} - -// Plugin is the definition of a storage plugin. -type Plugin struct { - // Name of the plugin - name string - // Address of the plugin - Addr string - // TLS configuration of the plugin - TLSConfig *tlsconfig.Options - // Client attached to the plugin - client *Client - // Manifest of the plugin (see above) - Manifest *Manifest `json:"-"` - - // error produced by activation - activateErr error - // specifies if the activation sequence is completed (not if it is successful or not) - activated bool - // wait for activation to finish - activateWait *sync.Cond -} - -// Name returns the name of the plugin. -func (p *Plugin) Name() string { - return p.name -} - -// Client returns a ready-to-use plugin client that can be used to communicate with the plugin. -func (p *Plugin) Client() *Client { - return p.client -} - -// NewLocalPlugin creates a new local plugin. -func NewLocalPlugin(name, addr string) *Plugin { - return &Plugin{ - name: name, - Addr: addr, - // TODO: change to nil - TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, - activateWait: sync.NewCond(&sync.Mutex{}), - } -} - -func (p *Plugin) activate() error { - p.activateWait.L.Lock() - if p.activated { - p.activateWait.L.Unlock() - return p.activateErr - } - - p.activateErr = p.activateWithLock() - p.activated = true - - p.activateWait.L.Unlock() - p.activateWait.Broadcast() - return p.activateErr -} - -func (p *Plugin) activateWithLock() error { - c, err := NewClient(p.Addr, p.TLSConfig) - if err != nil { - return err - } - p.client = c - - m := new(Manifest) - if err = p.client.Call("Plugin.Activate", nil, m); err != nil { - return err - } - - p.Manifest = m - - for _, iface := range m.Implements { - handler, handled := extpointHandlers[iface] - if !handled { - continue - } - handler(p.name, p.client) - } - return nil -} - -func (p *Plugin) waitActive() error { - p.activateWait.L.Lock() - for !p.activated { - p.activateWait.Wait() - } - p.activateWait.L.Unlock() - return p.activateErr -} - -func (p *Plugin) implements(kind string) bool { - if err := p.waitActive(); err != nil { - return false - } - for _, driver := range p.Manifest.Implements { - if driver == kind { - return true - } - } - return false -} - -func load(name string) (*Plugin, error) { - return loadWithRetry(name, true) -} - -func loadWithRetry(name string, retry bool) (*Plugin, error) { - registry := newLocalRegistry() - start := time.Now() - - var retries int - for { - pl, err := registry.Plugin(name) - if err != nil { - if !retry { - return nil, err - } - - timeOff := backoff(retries) - if abort(start, timeOff) { - return nil, err - } - retries++ - logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) - time.Sleep(timeOff) - continue - } - - storage.Lock() - storage.plugins[name] = pl - storage.Unlock() - - err = pl.activate() - - if err != nil { - storage.Lock() - delete(storage.plugins, name) - storage.Unlock() - } - - return pl, err - } -} - -func get(name string) (*Plugin, error) { - storage.Lock() - pl, ok := storage.plugins[name] - storage.Unlock() - if ok { - return pl, pl.activate() - } - return load(name) -} - -// Get returns the plugin given the specified name and requested implementation. -func Get(name, imp string) (*Plugin, error) { - pl, err := get(name) - if err != nil { - return nil, err - } - if pl.implements(imp) { - logrus.Debugf("%s implements: %s", name, imp) - return pl, nil - } - return nil, ErrNotImplements -} - -// Handle adds the specified function to the extpointHandlers. -func Handle(iface string, fn func(string, *Client)) { - extpointHandlers[iface] = fn -} - -// GetAll returns all the plugins for the specified implementation -func GetAll(imp string) ([]*Plugin, error) { - pluginNames, err := Scan() - if err != nil { - return nil, err - } - - type plLoad struct { - pl *Plugin - err error - } - - chPl := make(chan *plLoad, len(pluginNames)) - var wg sync.WaitGroup - for _, name := range pluginNames { - if pl, ok := storage.plugins[name]; ok { - chPl <- &plLoad{pl, nil} - continue - } - - wg.Add(1) - go func(name string) { - defer wg.Done() - pl, err := loadWithRetry(name, false) - chPl <- &plLoad{pl, err} - }(name) - } - - wg.Wait() - close(chPl) - - var out []*Plugin - for pl := range chPl { - if pl.err != nil { - logrus.Error(pl.err) - continue - } - if pl.pl.implements(imp) { - out = append(out, pl.pl) - } - } - return out, nil -} diff --git a/vendor/github.com/containers/storage/pkg/plugins/transport/http.go b/vendor/github.com/containers/storage/pkg/plugins/transport/http.go deleted file mode 100644 index 5be146af..00000000 --- a/vendor/github.com/containers/storage/pkg/plugins/transport/http.go +++ /dev/null @@ -1,36 +0,0 @@ -package transport - -import ( - "io" - "net/http" -) - -// httpTransport holds an http.RoundTripper -// and information about the scheme and address the transport -// sends request to. -type httpTransport struct { - http.RoundTripper - scheme string - addr string -} - -// NewHTTPTransport creates a new httpTransport. -func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport { - return httpTransport{ - RoundTripper: r, - scheme: scheme, - addr: addr, - } -} - -// NewRequest creates a new http.Request and sets the URL -// scheme and address with the transport's fields. -func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) { - req, err := newHTTPRequest(path, data) - if err != nil { - return nil, err - } - req.URL.Scheme = t.scheme - req.URL.Host = t.addr - return req, nil -} diff --git a/vendor/github.com/containers/storage/pkg/plugins/transport/transport.go b/vendor/github.com/containers/storage/pkg/plugins/transport/transport.go deleted file mode 100644 index d7f1e210..00000000 --- a/vendor/github.com/containers/storage/pkg/plugins/transport/transport.go +++ /dev/null @@ -1,36 +0,0 @@ -package transport - -import ( - "io" - "net/http" - "strings" -) - -// VersionMimetype is the Content-Type the engine sends to plugins. -const VersionMimetype = "application/vnd.docker.plugins.v1.2+json" - -// RequestFactory defines an interface that -// transports can implement to create new requests. -type RequestFactory interface { - NewRequest(path string, data io.Reader) (*http.Request, error) -} - -// Transport defines an interface that plugin transports -// must implement. -type Transport interface { - http.RoundTripper - RequestFactory -} - -// newHTTPRequest creates a new request with a path and a body. -func newHTTPRequest(path string, data io.Reader) (*http.Request, error) { - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - req, err := http.NewRequest("POST", path, data) - if err != nil { - return nil, err - } - req.Header.Add("Accept", VersionMimetype) - return req, nil -} diff --git a/vendor/github.com/containers/storage/pkg/random/random.go b/vendor/github.com/containers/storage/pkg/random/random.go deleted file mode 100644 index 70de4d13..00000000 --- a/vendor/github.com/containers/storage/pkg/random/random.go +++ /dev/null @@ -1,71 +0,0 @@ -package random - -import ( - cryptorand "crypto/rand" - "io" - "math" - "math/big" - "math/rand" - "sync" - "time" -) - -// Rand is a global *rand.Rand instance, which initialized with NewSource() source. -var Rand = rand.New(NewSource()) - -// Reader is a global, shared instance of a pseudorandom bytes generator. -// It doesn't consume entropy. -var Reader io.Reader = &reader{rnd: Rand} - -// copypaste from standard math/rand -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} - -// NewSource returns math/rand.Source safe for concurrent use and initialized -// with current unix-nano timestamp -func NewSource() rand.Source { - var seed int64 - if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { - // This should not happen, but worst-case fallback to time-based seed. - seed = time.Now().UnixNano() - } else { - seed = cryptoseed.Int64() - } - return &lockedSource{ - src: rand.NewSource(seed), - } -} - -type reader struct { - rnd *rand.Rand -} - -func (r *reader) Read(b []byte) (int, error) { - i := 0 - for { - val := r.rnd.Int63() - for val > 0 { - b[i] = byte(val) - i++ - if i == len(b) { - return i, nil - } - val >>= 8 - } - } -} diff --git a/vendor/github.com/containers/storage/pkg/reexec/README.md b/vendor/github.com/containers/storage/pkg/reexec/README.md new file mode 100644 index 00000000..6658f69b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/README.md @@ -0,0 +1,5 @@ +# reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go index 3c3a73a9..05319eac 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go @@ -5,6 +5,8 @@ package reexec import ( "os/exec" "syscall" + + "golang.org/x/sys/unix" ) // Self returns the path to the current process's binary. @@ -13,7 +15,7 @@ func Self() string { return "/proc/self/exe" } -// Command returns *exec.Cmd which have Path as current binary. Also it setting +// Command returns *exec.Cmd which has Path as current binary. Also it setting // SysProcAttr.Pdeathsig to SIGTERM. // This will use the in-memory version (/proc/self/exe) of the current binary, // it is thus safe to delete or replace the on-disk binary (os.Args[0]). @@ -22,7 +24,7 @@ func Command(args ...string) *exec.Cmd { Path: Self(), Args: args, SysProcAttr: &syscall.SysProcAttr{ - Pdeathsig: syscall.SIGTERM, + Pdeathsig: unix.SIGTERM, }, } } diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go index b70edcb3..778a720e 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go @@ -1,4 +1,4 @@ -// +build freebsd solaris +// +build freebsd solaris darwin package reexec @@ -12,7 +12,7 @@ func Self() string { return naiveSelf() } -// Command returns *exec.Cmd which have Path as current binary. +// Command returns *exec.Cmd which has Path as current binary. // For example if current binary is "docker" at "/usr/bin/", then cmd.Path will // be set to "/usr/bin/docker". func Command(args ...string) *exec.Cmd { diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go index 9aed004e..76edd824 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!windows,!freebsd,!solaris +// +build !linux,!windows,!freebsd,!solaris,!darwin package reexec @@ -6,7 +6,7 @@ import ( "os/exec" ) -// Command is unsupported on operating systems apart from Linux and Windows. +// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. func Command(args ...string) *exec.Cmd { return nil } diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go index 8d65e0ae..ca871c42 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go @@ -12,7 +12,7 @@ func Self() string { return naiveSelf() } -// Command returns *exec.Cmd which have Path as current binary. +// Command returns *exec.Cmd which has Path as current binary. // For example if current binary is "docker.exe" at "C:\", then cmd.Path will // be set to "C:\docker.exe". func Command(args ...string) *exec.Cmd { diff --git a/vendor/github.com/containers/storage/pkg/stringid/README.md b/vendor/github.com/containers/storage/pkg/stringid/README.md new file mode 100644 index 00000000..37a5098f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringid/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with string identifiers diff --git a/vendor/github.com/containers/storage/pkg/stringid/stringid.go b/vendor/github.com/containers/storage/pkg/stringid/stringid.go index 74dfaaaa..a0c7c42a 100644 --- a/vendor/github.com/containers/storage/pkg/stringid/stringid.go +++ b/vendor/github.com/containers/storage/pkg/stringid/stringid.go @@ -2,19 +2,25 @@ package stringid import ( - "crypto/rand" + cryptorand "crypto/rand" "encoding/hex" + "fmt" "io" + "math" + "math/big" + "math/rand" "regexp" "strconv" "strings" - - "github.com/containers/storage/pkg/random" + "time" ) const shortLen = 12 -var validShortID = regexp.MustCompile("^[a-z0-9]{12}$") +var ( + validShortID = regexp.MustCompile("^[a-f0-9]{12}$") + validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) +) // IsShortID determines if an arbitrary string *looks like* a short ID. func IsShortID(id string) bool { @@ -29,19 +35,14 @@ func TruncateID(id string) string { if i := strings.IndexRune(id, ':'); i >= 0 { id = id[i+1:] } - trimTo := shortLen - if len(id) < shortLen { - trimTo = len(id) + if len(id) > shortLen { + id = id[:shortLen] } - return id[:trimTo] + return id } -func generateID(crypto bool) string { +func generateID(r io.Reader) string { b := make([]byte, 32) - r := random.Reader - if crypto { - r = rand.Reader - } for { if _, err := io.ReadFull(r, b); err != nil { panic(err) // This shouldn't happen @@ -59,13 +60,40 @@ func generateID(crypto bool) string { // GenerateRandomID returns a unique id. func GenerateRandomID() string { - return generateID(true) - + return generateID(cryptorand.Reader) } // GenerateNonCryptoID generates unique id without using cryptographically // secure sources of random. // It helps you to save entropy. func GenerateNonCryptoID() string { - return generateID(false) + return generateID(readerFunc(rand.Read)) +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID %q is invalid", id) + } + return nil +} + +func init() { + // safely set the seed globally so we generate random ids. Tries to use a + // crypto seed before falling back to time. + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } + + rand.Seed(seed) +} + +type readerFunc func(p []byte) (int, error) + +func (fn readerFunc) Read(p []byte) (int, error) { + return fn(p) } diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes.go b/vendor/github.com/containers/storage/pkg/system/chtimes.go index 7637f12e..056d1995 100644 --- a/vendor/github.com/containers/storage/pkg/system/chtimes.go +++ b/vendor/github.com/containers/storage/pkg/system/chtimes.go @@ -2,26 +2,9 @@ package system import ( "os" - "syscall" "time" - "unsafe" ) -var ( - maxTime time.Time -) - -func init() { - if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { - // This is a 64 bit timespec - // os.Chtimes limits time to the following - maxTime = time.Unix(0, 1<<63-1) - } else { - // This is a 32 bit timespec - maxTime = time.Unix(1<<31-1, 0) - } -} - // Chtimes changes the access time and modified time of a file at the given path func Chtimes(name string, atime time.Time, mtime time.Time) error { unixMinTime := time.Unix(0, 0) diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go index 29458684..45428c14 100644 --- a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go @@ -3,25 +3,26 @@ package system import ( - "syscall" "time" + + "golang.org/x/sys/windows" ) //setCTime will set the create time on a file. On Windows, this requires //calling SetFileTime and explicitly including the create time. func setCTime(path string, ctime time.Time) error { - ctimespec := syscall.NsecToTimespec(ctime.UnixNano()) - pathp, e := syscall.UTF16PtrFromString(path) + ctimespec := windows.NsecToTimespec(ctime.UnixNano()) + pathp, e := windows.UTF16PtrFromString(path) if e != nil { return e } - h, e := syscall.CreateFile(pathp, - syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, - syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + h, e := windows.CreateFile(pathp, + windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, + windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) if e != nil { return e } - defer syscall.Close(h) - c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec)) - return syscall.SetFileTime(h, &c, nil, nil) + defer windows.Close(h) + c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) + return windows.SetFileTime(h, &c, nil, nil) } diff --git a/vendor/github.com/containers/storage/pkg/system/events_windows.go b/vendor/github.com/containers/storage/pkg/system/events_windows.go deleted file mode 100644 index 04e2de78..00000000 --- a/vendor/github.com/containers/storage/pkg/system/events_windows.go +++ /dev/null @@ -1,83 +0,0 @@ -package system - -// This file implements syscalls for Win32 events which are not implemented -// in golang. - -import ( - "syscall" - "unsafe" -) - -var ( - procCreateEvent = modkernel32.NewProc("CreateEventW") - procOpenEvent = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") -) - -// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. -func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if manualReset { - _p1 = 1 - } - var _p2 uint32 - if initialState { - _p2 = 1 - } - r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. -func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if inheritHandle { - _p1 = 1 - } - r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// SetEvent implements win32 SetEvent func in golang. -func SetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procSetEvent) -} - -// ResetEvent implements win32 ResetEvent func in golang. -func ResetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procResetEvent) -} - -// PulseEvent implements win32 PulseEvent func in golang. -func PulseEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procPulseEvent) -} - -func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { - r0, _, _ := proc.Call(uintptr(handle)) - if r0 != 0 { - err = syscall.Errno(r0) - } - return -} - -var temp unsafe.Pointer - -// use ensures a variable is kept alive without the GC freeing while still needed -func use(p unsafe.Pointer) { - temp = p -} diff --git a/vendor/github.com/containers/storage/pkg/system/exitcode.go b/vendor/github.com/containers/storage/pkg/system/exitcode.go new file mode 100644 index 00000000..60f0514b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/exitcode.go @@ -0,0 +1,33 @@ +package system + +import ( + "fmt" + "os/exec" + "syscall" +) + +// GetExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func GetExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + +// ProcessExitCode process the specified error and returns the exit status code +// if the error was of type exec.ExitError, returns nothing otherwise. +func ProcessExitCode(err error) (exitCode int) { + if err != nil { + var exiterr error + if exitCode, exiterr = GetExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + return +} diff --git a/vendor/github.com/containers/storage/pkg/system/filesys.go b/vendor/github.com/containers/storage/pkg/system/filesys.go index c14feb84..102565f7 100644 --- a/vendor/github.com/containers/storage/pkg/system/filesys.go +++ b/vendor/github.com/containers/storage/pkg/system/filesys.go @@ -3,13 +3,19 @@ package system import ( + "io/ioutil" "os" "path/filepath" ) +// MkdirAllWithACL is a wrapper for MkdirAll on unix systems. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return MkdirAll(path, perm, sddl) +} + // MkdirAll creates a directory named path along with any necessary parents, // with permission specified by attribute perm for all dir created. -func MkdirAll(path string, perm os.FileMode) error { +func MkdirAll(path string, perm os.FileMode, sddl string) error { return os.MkdirAll(path, perm) } @@ -17,3 +23,45 @@ func MkdirAll(path string, perm os.FileMode) error { func IsAbs(path string) bool { return filepath.IsAbs(path) } + +// The functions below here are wrappers for the equivalents in the os and ioutils packages. +// They are passthrough on Unix platforms, and only relevant on Windows. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return os.Create(name) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} + +// TempFileSequential creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + return ioutil.TempFile(dir, prefix) +} diff --git a/vendor/github.com/containers/storage/pkg/system/filesys_windows.go b/vendor/github.com/containers/storage/pkg/system/filesys_windows.go index 16823d55..a61b53d0 100644 --- a/vendor/github.com/containers/storage/pkg/system/filesys_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/filesys_windows.go @@ -6,17 +6,44 @@ import ( "os" "path/filepath" "regexp" + "strconv" "strings" + "sync" "syscall" + "time" + "unsafe" + + winio "github.com/Microsoft/go-winio" + "golang.org/x/sys/windows" ) +const ( + // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System + SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System + SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" +) + +// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory +// with an appropriate SDDL defined ACL. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return mkdirall(path, true, sddl) +} + // MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, perm os.FileMode) error { +func MkdirAll(path string, _ os.FileMode, sddl string) error { + return mkdirall(path, false, sddl) +} + +// mkdirall is a custom version of os.MkdirAll modified for use on Windows +// so that it is both volume path aware, and can create a directory with +// a DACL. +func mkdirall(path string, applyACL bool, sddl string) error { if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { return nil } - // The rest of this method is copied from os.MkdirAll and should be kept + // The rest of this method is largely copied from os.MkdirAll and should be kept // as-is to ensure compatibility. // Fast path: if we can tell whether path is a directory or file, stop with success or error. @@ -45,14 +72,19 @@ func MkdirAll(path string, perm os.FileMode) error { if j > 1 { // Create parent - err = MkdirAll(path[0:j-1], perm) + err = mkdirall(path[0:j-1], false, sddl) if err != nil { return err } } - // Parent now exists; invoke Mkdir and use its result. - err = os.Mkdir(path, perm) + // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. + if applyACL { + err = mkdirWithACL(path, sddl) + } else { + err = os.Mkdir(path, 0) + } + if err != nil { // Handle arguments like "foo/." by // double-checking that directory doesn't exist. @@ -65,6 +97,35 @@ func MkdirAll(path string, perm os.FileMode) error { return nil } +// mkdirWithACL creates a new directory. If there is an error, it will be of +// type *PathError. . +// +// This is a modified and combined version of os.Mkdir and windows.Mkdir +// in golang to cater for creating a directory am ACL permitting full +// access, with inheritance, to any subfolder/file for Built-in Administrators +// and Local System. +func mkdirWithACL(name string, sddl string) error { + sa := windows.SecurityAttributes{Length: 0} + sd, err := winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + + namep, err := windows.UTF16PtrFromString(name) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + + e := windows.CreateDirectory(namep, &sa) + if e != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: e} + } + return nil +} + // IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, // golang filepath.IsAbs does not consider a path \windows\system32 as absolute // as it doesn't start with a drive-letter/colon combination. However, in @@ -80,3 +141,158 @@ func IsAbs(path string) bool { } return true } + +// The origin of the functions below here are the golang OS and windows packages, +// slightly modified to only cope with files, not directories due to the +// specific use case. +// +// The alteration is to allow a file on Windows to be opened with +// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating +// the standby list, particularly when accessing large files such as layer.tar. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDONLY, 0) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + r, errf := windowsOpenFileSequential(name, flag, 0) + if errf == nil { + return r, nil + } + return nil, &os.PathError{Op: "open", Path: name, Err: errf} +} + +func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +func makeInheritSa() *windows.SecurityAttributes { + var sa windows.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { + if len(path) == 0 { + return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND + } + pathp, err := windows.UTF16PtrFromString(path) + if err != nil { + return windows.InvalidHandle, err + } + var access uint32 + switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { + case windows.O_RDONLY: + access = windows.GENERIC_READ + case windows.O_WRONLY: + access = windows.GENERIC_WRITE + case windows.O_RDWR: + access = windows.GENERIC_READ | windows.GENERIC_WRITE + } + if mode&windows.O_CREAT != 0 { + access |= windows.GENERIC_WRITE + } + if mode&windows.O_APPEND != 0 { + access &^= windows.GENERIC_WRITE + access |= windows.FILE_APPEND_DATA + } + sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) + var sa *windows.SecurityAttributes + if mode&windows.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): + createmode = windows.CREATE_NEW + case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): + createmode = windows.CREATE_ALWAYS + case mode&windows.O_CREAT == windows.O_CREAT: + createmode = windows.OPEN_ALWAYS + case mode&windows.O_TRUNC == windows.O_TRUNC: + createmode = windows.TRUNCATE_EXISTING + default: + createmode = windows.OPEN_EXISTING + } + // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. + //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + return h, e +} + +// Helpers for TempFileSequential +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential +// file access. Below is the original comment from golang: +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} diff --git a/vendor/github.com/containers/storage/pkg/system/init.go b/vendor/github.com/containers/storage/pkg/system/init.go new file mode 100644 index 00000000..17935088 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/init.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "time" + "unsafe" +) + +// Used by chtimes +var maxTime time.Time + +func init() { + // chtimes initialization + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} diff --git a/vendor/github.com/containers/storage/pkg/system/init_windows.go b/vendor/github.com/containers/storage/pkg/system/init_windows.go new file mode 100644 index 00000000..019c6644 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/init_windows.go @@ -0,0 +1,17 @@ +package system + +import "os" + +// LCOWSupported determines if Linux Containers on Windows are supported. +// Note: This feature is in development (06/17) and enabled through an +// environment variable. At a future time, it will be enabled based +// on build number. @jhowardmsft +var lcowSupported = false + +func init() { + // LCOW initialization + if os.Getenv("LCOW_SUPPORTED") != "" { + lcowSupported = true + } + +} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go new file mode 100644 index 00000000..cff33bb4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return false +} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_windows.go b/vendor/github.com/containers/storage/pkg/system/lcow_windows.go new file mode 100644 index 00000000..e54d01e6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lcow_windows.go @@ -0,0 +1,6 @@ +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return lcowSupported +} diff --git a/vendor/github.com/containers/storage/pkg/system/lstat.go b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go similarity index 100% rename from vendor/github.com/containers/storage/pkg/system/lstat.go rename to vendor/github.com/containers/storage/pkg/system/lstat_unix.go diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_windows.go b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go index 49e87eb4..e51df0da 100644 --- a/vendor/github.com/containers/storage/pkg/system/lstat_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go @@ -1,25 +1,14 @@ -// +build windows - package system -import ( - "os" -) +import "os" // Lstat calls os.Lstat to get a fileinfo interface back. // This is then copied into our own locally defined structure. -// Note the Linux version uses fromStatT to do the copy back, -// but that not strictly necessary when already in an OS specific module. func Lstat(path string) (*StatT, error) { fi, err := os.Lstat(path) if err != nil { return nil, err } - return &StatT{ - name: fi.Name(), - size: fi.Size(), - mode: fi.Mode(), - modTime: fi.ModTime(), - isDir: fi.IsDir()}, nil + return fromStatT(&fi) } diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go index 313c601b..925776e7 100644 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go @@ -7,6 +7,7 @@ import ( "unsafe" ) +// #cgo CFLAGS: -std=c99 // #cgo LDFLAGS: -lkstat // #include // #include @@ -89,7 +90,7 @@ func ReadMemInfo() (*MemInfo, error) { if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || SwapFree < 0 { - return nil, fmt.Errorf("Error getting system memory info %v\n", err) + return nil, fmt.Errorf("error getting system memory info %v\n", err) } meminfo := &MemInfo{} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go index d4664259..883944a4 100644 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go @@ -1,12 +1,13 @@ package system import ( - "syscall" "unsafe" + + "golang.org/x/sys/windows" ) var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") ) diff --git a/vendor/github.com/containers/storage/pkg/system/mknod.go b/vendor/github.com/containers/storage/pkg/system/mknod.go index 73958182..af79a653 100644 --- a/vendor/github.com/containers/storage/pkg/system/mknod.go +++ b/vendor/github.com/containers/storage/pkg/system/mknod.go @@ -3,13 +3,13 @@ package system import ( - "syscall" + "golang.org/x/sys/unix" ) // Mknod creates a filesystem node (file, device special file or named pipe) named path // with attributes specified by mode and dev. func Mknod(path string, mode uint32, dev int) error { - return syscall.Mknod(path, mode, dev) + return unix.Mknod(path, mode, dev) } // Mkdev is used to build the value of linux devices (in /dev/) which specifies major diff --git a/vendor/github.com/containers/storage/pkg/system/path.go b/vendor/github.com/containers/storage/pkg/system/path.go new file mode 100644 index 00000000..f634a6be --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/path.go @@ -0,0 +1,21 @@ +package system + +import "runtime" + +const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +func DefaultPathEnv(platform string) string { + if runtime.GOOS == "windows" { + if platform != runtime.GOOS && LCOWSupported() { + return defaultUnixPathEnv + } + // Deliberately empty on Windows containers on Windows as the default path will be set by + // the container. Docker has no context of what the default path should be. + return "" + } + return defaultUnixPathEnv + +} diff --git a/vendor/github.com/containers/storage/pkg/system/path_unix.go b/vendor/github.com/containers/storage/pkg/system/path_unix.go index c607c4db..f3762e69 100644 --- a/vendor/github.com/containers/storage/pkg/system/path_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/path_unix.go @@ -2,11 +2,6 @@ package system -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - // CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, // is the system drive. This is a no-op on Linux. func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { diff --git a/vendor/github.com/containers/storage/pkg/system/path_windows.go b/vendor/github.com/containers/storage/pkg/system/path_windows.go index cbfe2c15..aab89152 100644 --- a/vendor/github.com/containers/storage/pkg/system/path_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/path_windows.go @@ -8,15 +8,11 @@ import ( "strings" ) -// DefaultPathEnv is deliberately empty on Windows as the default path will be set by -// the container. Docker has no context of what the default path should be. -const DefaultPathEnv = "" - // CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. // This is used, for example, when validating a user provided path in docker cp. // If a drive letter is supplied, it must be the system drive. The drive letter // is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be contatenated with +// need the path in this syntax so that it can ultimately be concatenated with // a Windows long-path which doesn't support drive-letters. Examples: // C: --> Fail // C:\ --> \ diff --git a/vendor/github.com/containers/storage/pkg/system/process_unix.go b/vendor/github.com/containers/storage/pkg/system/process_unix.go new file mode 100644 index 00000000..26c8b42c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/process_unix.go @@ -0,0 +1,24 @@ +// +build linux freebsd solaris darwin + +package system + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := unix.Kill(pid, syscall.Signal(0)) + if err == nil || err == unix.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + unix.Kill(pid, unix.SIGKILL) +} diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go new file mode 100644 index 00000000..fc03c3e6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/rm.go @@ -0,0 +1,80 @@ +package system + +import ( + "os" + "syscall" + "time" + + "github.com/containers/storage/pkg/mount" + "github.com/pkg/errors" +) + +// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can +// often be remedied. +// Only use `EnsureRemoveAll` if you really want to make every effort to remove +// a directory. +// +// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there +// can be a race between reading directory entries and then actually attempting +// to remove everything in the directory. +// These types of errors do not need to be returned since it's ok for the dir to +// be gone we can just retry the remove operation. +// +// This should not return a `os.ErrNotExist` kind of error under any circumstances +func EnsureRemoveAll(dir string) error { + notExistErr := make(map[string]bool) + + // track retries + exitOnErr := make(map[string]int) + maxRetry := 5 + + // Attempt to unmount anything beneath this dir first + mount.RecursiveUnmount(dir) + + for { + err := os.RemoveAll(dir) + if err == nil { + return err + } + + pe, ok := err.(*os.PathError) + if !ok { + return err + } + + if os.IsNotExist(err) { + if notExistErr[pe.Path] { + return err + } + notExistErr[pe.Path] = true + + // There is a race where some subdir can be removed but after the parent + // dir entries have been read. + // So the path could be from `os.Remove(subdir)` + // If the reported non-existent path is not the passed in `dir` we + // should just retry, but otherwise return with no error. + if pe.Path == dir { + return nil + } + continue + } + + if pe.Err != syscall.EBUSY { + return err + } + + if mounted, _ := mount.Mounted(pe.Path); mounted { + if e := mount.Unmount(pe.Path); e != nil { + if mounted, _ := mount.Mounted(pe.Path); mounted { + return errors.Wrapf(e, "error while removing %s", dir) + } + } + } + + if exitOnErr[pe.Path] == maxRetry { + return err + } + exitOnErr[pe.Path]++ + time.Sleep(100 * time.Millisecond) + } +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unsupported.go b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go similarity index 59% rename from vendor/github.com/containers/storage/pkg/system/stat_unsupported.go rename to vendor/github.com/containers/storage/pkg/system/stat_darwin.go index f53e9de4..715f05b9 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go @@ -1,12 +1,8 @@ -// +build !linux,!windows,!freebsd,!solaris,!openbsd - package system -import ( - "syscall" -) +import "syscall" -// fromStatT creates a system.StatT type from a syscall.Stat_t type +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, mode: uint32(s.Mode), diff --git a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go index d0fb6f15..715f05b9 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go @@ -1,8 +1,6 @@ package system -import ( - "syscall" -) +import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { @@ -13,15 +11,3 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { rdev: uint64(s.Rdev), mtim: s.Mtimespec}, nil } - -// Stat takes a path to a file and returns -// a system.Stat_t type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go index 8b1eded1..1939f951 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go @@ -1,8 +1,6 @@ package system -import ( - "syscall" -) +import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { @@ -14,20 +12,8 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { mtim: s.Mtim}, nil } -// FromStatT exists only on linux, and loads a system.StatT from a -// syscal.Stat_t. +// FromStatT converts a syscall.Stat_t type to a system.Stat_t type +// This is exposed on Linux as pkg/archive/changes uses it. func FromStatT(s *syscall.Stat_t) (*StatT, error) { return fromStatT(s) } - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go index 3c3b71fb..b607dea9 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go @@ -1,10 +1,8 @@ package system -import ( - "syscall" -) +import "syscall" -// fromStatT creates a system.StatT type from a syscall.Stat_t type +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, mode: uint32(s.Mode), diff --git a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go index 0216985a..b607dea9 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go @@ -1,12 +1,8 @@ -// +build solaris - package system -import ( - "syscall" -) +import "syscall" -// fromStatT creates a system.StatT type from a syscall.Stat_t type +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, mode: uint32(s.Mode), @@ -15,20 +11,3 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { rdev: uint64(s.Rdev), mtim: s.Mtim}, nil } - -// FromStatT loads a system.StatT from a syscal.Stat_t. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat.go b/vendor/github.com/containers/storage/pkg/system/stat_unix.go similarity index 74% rename from vendor/github.com/containers/storage/pkg/system/stat.go rename to vendor/github.com/containers/storage/pkg/system/stat_unix.go index 087034c5..91c7d121 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_unix.go @@ -47,7 +47,14 @@ func (s StatT) Mtim() syscall.Timespec { return s.mtim } -// GetLastModification returns file's last modification time. -func (s StatT) GetLastModification() syscall.Timespec { - return s.Mtim() +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) } diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go index 39490c62..6c639726 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go @@ -1,5 +1,3 @@ -// +build windows - package system import ( @@ -8,18 +6,11 @@ import ( ) // StatT type contains status of a file. It contains metadata -// like name, permission, size, etc about a file. +// like permission, size, etc about a file. type StatT struct { - name string - size int64 - mode os.FileMode - modTime time.Time - isDir bool -} - -// Name returns file's name. -func (s StatT) Name() string { - return s.name + mode os.FileMode + size int64 + mtim time.Time } // Size returns file's size. @@ -29,15 +20,30 @@ func (s StatT) Size() int64 { // Mode returns file's permission mode. func (s StatT) Mode() os.FileMode { - return s.mode + return os.FileMode(s.mode) } -// ModTime returns file's last modification time. -func (s StatT) ModTime() time.Time { - return s.modTime +// Mtim returns file's last modification time. +func (s StatT) Mtim() time.Time { + return time.Time(s.mtim) } -// IsDir returns whether file is actually a directory. -func (s StatT) IsDir() bool { - return s.isDir +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + return fromStatT(&fi) +} + +// fromStatT converts a os.FileInfo type to a system.StatT type +func fromStatT(fi *os.FileInfo) (*StatT, error) { + return &StatT{ + size: (*fi).Size(), + mode: (*fi).Mode(), + mtim: (*fi).ModTime()}, nil } diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go index 3ae91284..49dbdd37 100644 --- a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go @@ -2,12 +2,12 @@ package system -import "syscall" +import "golang.org/x/sys/unix" // Unmount is a platform-specific helper function to call // the unmount syscall. func Unmount(dest string) error { - return syscall.Unmount(dest, 0) + return unix.Unmount(dest, 0) } // CommandLineToArgv should not be used on Unix. diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go index 7aaab7e7..23e9b207 100644 --- a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go @@ -1,15 +1,16 @@ package system import ( - "syscall" "unsafe" "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" ) var ( - ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") - procGetVersionExW = modkernel32.NewProc("GetVersionExW") + ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procGetProductInfo = modkernel32.NewProc("GetProductInfo") ) // OSVersion is a wrapper for Windows version information @@ -41,7 +42,7 @@ type osVersionInfoEx struct { func GetOSVersion() OSVersion { var err error osv := OSVersion{} - osv.Version, err = syscall.GetVersion() + osv.Version, err = windows.GetVersion() if err != nil { // GetVersion never fails. panic(err) @@ -53,6 +54,8 @@ func GetOSVersion() OSVersion { } // IsWindowsClient returns true if the SKU is client +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. func IsWindowsClient() bool { osviex := &osVersionInfoEx{OSVersionInfoSize: 284} r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) @@ -64,6 +67,22 @@ func IsWindowsClient() bool { return osviex.ProductType == verNTWorkstation } +// IsIoTCore returns true if the currently running image is based off of +// Windows 10 IoT Core. +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsIoTCore() bool { + var returnedProductType uint32 + r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) + if r1 == 0 { + logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) + return false + } + const productIoTUAP = 0x0000007B + const productIoTUAPCommercial = 0x00000083 + return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial +} + // Unmount is a platform-specific helper function to call // the unmount syscall. Not supported on Windows func Unmount(dest string) error { @@ -74,20 +93,20 @@ func Unmount(dest string) error { func CommandLineToArgv(commandLine string) ([]string, error) { var argc int32 - argsPtr, err := syscall.UTF16PtrFromString(commandLine) + argsPtr, err := windows.UTF16PtrFromString(commandLine) if err != nil { return nil, err } - argv, err := syscall.CommandLineToArgv(argsPtr, &argc) + argv, err := windows.CommandLineToArgv(argsPtr, &argc) if err != nil { return nil, err } - defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv)))) + defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv)))) newArgs := make([]string, argc) for i, v := range (*argv)[:argc] { - newArgs[i] = string(syscall.UTF16ToString((*v)[:])) + newArgs[i] = string(windows.UTF16ToString((*v)[:])) } return newArgs, nil diff --git a/vendor/github.com/containers/storage/pkg/system/umask.go b/vendor/github.com/containers/storage/pkg/system/umask.go index 3d0146b0..5a10eda5 100644 --- a/vendor/github.com/containers/storage/pkg/system/umask.go +++ b/vendor/github.com/containers/storage/pkg/system/umask.go @@ -3,11 +3,11 @@ package system import ( - "syscall" + "golang.org/x/sys/unix" ) // Umask sets current process's file mode creation mask to newmask // and returns oldmask. func Umask(newmask int) (oldmask int, err error) { - return syscall.Umask(newmask), nil + return unix.Umask(newmask), nil } diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_darwin.go b/vendor/github.com/containers/storage/pkg/system/utimes_darwin.go deleted file mode 100644 index 0a161975..00000000 --- a/vendor/github.com/containers/storage/pkg/system/utimes_darwin.go +++ /dev/null @@ -1,8 +0,0 @@ -package system - -import "syscall" - -// LUtimesNano is not supported by darwin platform. -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go index e2eac3b5..6a775243 100644 --- a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go @@ -3,18 +3,20 @@ package system import ( "syscall" "unsafe" + + "golang.org/x/sys/unix" ) // LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. func LUtimesNano(path string, ts []syscall.Timespec) error { var _path *byte - _path, err := syscall.BytePtrFromString(path) + _path, err := unix.BytePtrFromString(path) if err != nil { return err } - if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { + if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS { return err } diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_linux.go b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go index fc8a1aba..edc588a6 100644 --- a/vendor/github.com/containers/storage/pkg/system/utimes_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go @@ -3,22 +3,21 @@ package system import ( "syscall" "unsafe" + + "golang.org/x/sys/unix" ) // LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. func LUtimesNano(path string, ts []syscall.Timespec) error { - // These are not currently available in syscall - atFdCwd := -100 - atSymLinkNoFollow := 0x100 + atFdCwd := unix.AT_FDCWD var _path *byte - _path, err := syscall.BytePtrFromString(path) + _path, err := unix.BytePtrFromString(path) if err != nil { return err } - - if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { + if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { return err } diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go index 50c3a043..13971454 100644 --- a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go @@ -1,10 +1,10 @@ -// +build !linux,!freebsd,!darwin +// +build !linux,!freebsd package system import "syscall" -// LUtimesNano is not supported on platforms other than linux, freebsd and darwin. +// LUtimesNano is only supported on linux and freebsd. func LUtimesNano(path string, ts []syscall.Timespec) error { return ErrNotSupportedPlatform } diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go index d2e2c057..98b111be 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go @@ -1,63 +1,29 @@ package system -import ( - "syscall" - "unsafe" -) +import "golang.org/x/sys/unix" // Lgetxattr retrieves the value of the extended attribute identified by attr // and associated with the given path in the file system. // It will returns a nil slice and nil error if the xattr is not set. func Lgetxattr(path string, attr string) ([]byte, error) { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return nil, err - } - dest := make([]byte, 128) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - if errno == syscall.ENODATA { + sz, errno := unix.Lgetxattr(path, attr, dest) + if errno == unix.ENODATA { return nil, nil } - if errno == syscall.ERANGE { + if errno == unix.ERANGE { dest = make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + sz, errno = unix.Lgetxattr(path, attr, dest) } - if errno != 0 { + if errno != nil { return nil, errno } return dest[:sz], nil } -var _zero uintptr - // Lsetxattr sets the value of the extended attribute identified by attr // and associated with the given path in the file system. func Lsetxattr(path string, attr string, data []byte, flags int) error { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return err - } - var dataBytes unsafe.Pointer - if len(data) > 0 { - dataBytes = unsafe.Pointer(&data[0]) - } else { - dataBytes = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) - if errno != 0 { - return errno - } - return nil + return unix.Lsetxattr(path, attr, data, flags) } diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 4b696203..de605432 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -20,41 +20,11 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/stringid" - "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) var ( - // ErrLoadError indicates that there was an initialization error. - ErrLoadError = errors.New("error loading storage metadata") - // ErrDuplicateID indicates that an ID which is to be assigned to a new item is already being used. - ErrDuplicateID = errors.New("that ID is already in use") - // ErrDuplicateName indicates that a name which is to be assigned to a new item is already being used. - ErrDuplicateName = errors.New("that name is already in use") - // ErrParentIsContainer is returned when a caller attempts to create a layer as a child of a container's layer. - ErrParentIsContainer = errors.New("would-be parent layer is a container") - // ErrNotAContainer is returned when the caller attempts to delete a container that isn't a container. - ErrNotAContainer = errors.New("identifier is not a container") - // ErrNotAnImage is returned when the caller attempts to delete an image that isn't an image. - ErrNotAnImage = errors.New("identifier is not an image") - // ErrNotALayer is returned when the caller attempts to delete a layer that isn't a layer. - ErrNotALayer = errors.New("identifier is not a layer") - // ErrNotAnID is returned when the caller attempts to read or write metadata from an item that doesn't exist. - ErrNotAnID = errors.New("identifier is not a layer, image, or container") - // ErrLayerHasChildren is returned when the caller attempts to delete a layer that has children. - ErrLayerHasChildren = errors.New("layer has children") - // ErrLayerUsedByImage is returned when the caller attempts to delete a layer that is an image's top layer. - ErrLayerUsedByImage = errors.New("layer is in use by an image") - // ErrLayerUsedByContainer is returned when the caller attempts to delete a layer that is a container's layer. - ErrLayerUsedByContainer = errors.New("layer is in use by a container") - // ErrImageUsedByContainer is returned when the caller attempts to delete an image that is a container's image. - ErrImageUsedByContainer = errors.New("image is in use by a container") - // ErrIncompleteOptions is returned when the caller attempts to initialize a Store without providing required information. - ErrIncompleteOptions = errors.New("missing necessary StoreOptions") - // ErrSizeUnknown is returned when the caller asks for the size of a big data item, but the Store couldn't determine the answer. - ErrSizeUnknown = errors.New("size is not known") - // ErrStoreIsReadOnly is returned when the caller makes a call to a read-only store that would require modifying its contents. - ErrStoreIsReadOnly = errors.New("called a write method on a read-only store") // DefaultStoreOptions is a reasonable default set of options. DefaultStoreOptions StoreOptions stores []*store @@ -117,6 +87,10 @@ type ROBigDataStore interface { // data associated with this ID, if it has previously been set. BigDataSize(id, key string) (int64, error) + // BigDataDigest retrieves the digest of a (potentially large) piece of + // data associated with this ID, if it has previously been set. + BigDataDigest(id, key string) (digest.Digest, error) + // BigDataNames() returns a list of the names of previously-stored pieces of // data. BigDataNames(id string) ([]string, error) @@ -183,31 +157,6 @@ type Store interface { // by the Store. GraphDriver() (drivers.Driver, error) - // LayerStore obtains and returns a handle to the writeable layer store - // object used by the Store. Accessing this store directly will bypass - // locking and synchronization, so use it with care. - LayerStore() (LayerStore, error) - - // ROLayerStore obtains additional read/only layer store objects used - // by the Store. Accessing these stores directly will bypass locking - // and synchronization, so use them with care. - ROLayerStores() ([]ROLayerStore, error) - - // ImageStore obtains and returns a handle to the writable image store - // object used by the Store. Accessing this store directly will bypass - // locking and synchronization, so use it with care. - ImageStore() (ImageStore, error) - - // ROImageStores obtains additional read/only image store objects used - // by the Store. Accessing these stores directly will bypass locking - // and synchronization, so use them with care. - ROImageStores() ([]ROImageStore, error) - - // ContainerStore obtains and returns a handle to the container store - // object used by the Store. Accessing this store directly will bypass - // locking and synchronization, so use it with care. - ContainerStore() (ContainerStore, error) - // CreateLayer creates a new layer in the underlying storage driver, // optionally having the specified ID (one will be assigned if none is // specified), with the specified layer (or no layer) as its parent, @@ -225,7 +174,7 @@ type Store interface { // if reexec.Init { // return // } - PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff archive.Reader) (*Layer, int64, error) + PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff io.Reader) (*Layer, int64, error) // CreateImage creates a new image, optionally with the specified ID // (one will be assigned if none is specified), with optional names, @@ -340,7 +289,7 @@ type Store interface { // if reexec.Init { // return // } - ApplyDiff(to string, diff archive.Reader) (int64, error) + ApplyDiff(to string, diff io.Reader) (int64, error) // LayersByCompressedDigest returns a slice of the layers with the // specified compressed digest value recorded for them. @@ -367,6 +316,7 @@ type Store interface { Names(id string) ([]string, error) // SetNames changes the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. SetNames(id string, names []string) error // ListImageBigData retrieves a list of the (possibly large) chunks of @@ -381,6 +331,10 @@ type Store interface { // of named data associated with an image. ImageBigDataSize(id, key string) (int64, error) + // ImageBigDataDigest retrieves the digest of a (possibly large) chunk + // of named data associated with an image. + ImageBigDataDigest(id, key string) (digest.Digest, error) + // SetImageBigData stores a (possibly large) chunk of named data associated // with an image. SetImageBigData(id, key string, data []byte) error @@ -397,6 +351,10 @@ type Store interface { // chunk of named data associated with a container. ContainerBigDataSize(id, key string) (int64, error) + // ContainerBigDataDigest retrieves the digest of a (possibly large) + // chunk of named data associated with a container. + ContainerBigDataDigest(id, key string) (digest.Digest, error) + // SetContainerBigData stores a (possibly large) chunk of named data // associated with a container. SetContainerBigData(id, key string, data []byte) error @@ -412,6 +370,10 @@ type Store interface { // and may have different metadata, big data items, and flags. ImagesByTopLayer(id string) ([]*Image, error) + // ImagesByDigest returns a list of images which contain a big data item + // named ImageDigestBigDataKey whose contents have the specified digest. + ImagesByDigest(d digest.Digest) ([]*Image, error) + // Container returns a specific container. Container(id string) (*Container, error) @@ -472,6 +434,8 @@ type ImageOptions struct { // CreationDate, if not zero, will override the default behavior of marking the image as having been // created when CreateImage() was called, recording CreationDate instead. CreationDate time.Time + // Digest is a hard-coded digest value that we can use to look up the image. It is optional. + Digest digest.Digest } // ContainerOptions is used for passing options to a Store's CreateContainer() method. @@ -529,11 +493,6 @@ func GetStore(options StoreOptions) (Store, error) { if err := os.MkdirAll(options.RunRoot, 0700); err != nil && !os.IsExist(err) { return nil, err } - for _, subdir := range []string{} { - if err := os.MkdirAll(filepath.Join(options.RunRoot, subdir), 0700); err != nil && !os.IsExist(err) { - return nil, err - } - } if err := os.MkdirAll(options.GraphRoot, 0700); err != nil && !os.IsExist(err) { return nil, err } @@ -644,7 +603,13 @@ func (s *store) getGraphDriver() (drivers.Driver, error) { if s.graphDriver != nil { return s.graphDriver, nil } - driver, err := drivers.New(s.graphRoot, s.graphDriverName, s.graphOptions, s.uidMap, s.gidMap) + config := drivers.Options{ + Root: s.graphRoot, + DriverOptions: s.graphOptions, + UIDMaps: s.uidMap, + GIDMaps: s.gidMap, + } + driver, err := drivers.New(s.graphDriverName, config) if err != nil { return nil, err } @@ -664,6 +629,9 @@ func (s *store) GraphDriver() (drivers.Driver, error) { return s.getGraphDriver() } +// LayerStore obtains and returns a handle to the writeable layer store object +// used by the Store. Accessing this store directly will bypass locking and +// synchronization, so it is not a part of the exported Store interface. func (s *store) LayerStore() (LayerStore, error) { s.graphLock.Lock() defer s.graphLock.Unlock() @@ -696,6 +664,9 @@ func (s *store) LayerStore() (LayerStore, error) { return s.layerStore, nil } +// ROLayerStores obtains additional read/only layer store objects used by the +// Store. Accessing these stores directly will bypass locking and +// synchronization, so it is not part of the exported Store interface. func (s *store) ROLayerStores() ([]ROLayerStore, error) { s.graphLock.Lock() defer s.graphLock.Unlock() @@ -722,6 +693,9 @@ func (s *store) ROLayerStores() ([]ROLayerStore, error) { return s.roLayerStores, nil } +// ImageStore obtains and returns a handle to the writable image store object +// used by the Store. Accessing this store directly will bypass locking and +// synchronization, so it is not a part of the exported Store interface. func (s *store) ImageStore() (ImageStore, error) { if s.imageStore != nil { return s.imageStore, nil @@ -729,6 +703,9 @@ func (s *store) ImageStore() (ImageStore, error) { return nil, ErrLoadError } +// ROImageStores obtains additional read/only image store objects used by the +// Store. Accessing these stores directly will bypass locking and +// synchronization, so it is not a part of the exported Store interface. func (s *store) ROImageStores() ([]ROImageStore, error) { if len(s.roImageStores) != 0 { return s.roImageStores, nil @@ -749,6 +726,9 @@ func (s *store) ROImageStores() ([]ROImageStore, error) { return s.roImageStores, nil } +// ContainerStore obtains and returns a handle to the container store object +// used by the Store. Accessing this store directly will bypass locking and +// synchronization, so it is not a part of the exported Store interface. func (s *store) ContainerStore() (ContainerStore, error) { if s.containerStore != nil { return s.containerStore, nil @@ -756,16 +736,19 @@ func (s *store) ContainerStore() (ContainerStore, error) { return nil, ErrLoadError } -func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff archive.Reader) (*Layer, int64, error) { +func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff io.Reader) (*Layer, int64, error) { rlstore, err := s.LayerStore() if err != nil { return nil, -1, err } + rlstores, err := s.ROLayerStores() + if err != nil { + return nil, -1, err + } rcstore, err := s.ContainerStore() if err != nil { return nil, -1, err } - rlstore.Lock() defer rlstore.Unlock() if modified, err := rlstore.Modified(); modified || err != nil { @@ -780,9 +763,15 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w id = stringid.GenerateRandomID() } if parent != "" { - if l, err := rlstore.Get(parent); err == nil && l != nil { - parent = l.ID - } else { + var ilayer *Layer + for _, lstore := range append([]ROLayerStore{rlstore}, rlstores...) { + if l, err := lstore.Get(parent); err == nil && l != nil { + ilayer = l + parent = ilayer.ID + break + } + } + if ilayer == nil { return nil, -1, ErrLayerUnknown } containers, err := rcstore.Containers() @@ -808,31 +797,32 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o id = stringid.GenerateRandomID() } - rlstore, err := s.LayerStore() - if err != nil { - return nil, err - } - stores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - stores = append([]ROLayerStore{rlstore}, stores...) - var ilayer *Layer - for _, rlstore := range stores { - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + if layer != "" { + lstore, err := s.LayerStore() + if err != nil { + return nil, err } - ilayer, err = rlstore.Get(layer) - if err == nil { - break + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err } + var ilayer *Layer + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() + } + ilayer, err = store.Get(layer) + if err == nil { + break + } + } + if ilayer == nil { + return nil, ErrLayerUnknown + } + layer = ilayer.ID } - if ilayer == nil { - return nil, ErrLayerUnknown - } - layer = ilayer.ID ristore, err := s.ImageStore() if err != nil { @@ -845,11 +835,11 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o } creationDate := time.Now().UTC() - if options != nil { + if options != nil && !options.CreationDate.IsZero() { creationDate = options.CreationDate } - return ristore.Create(id, names, layer, metadata, creationDate) + return ristore.Create(id, names, layer, metadata, creationDate, options.Digest) } func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) { @@ -869,23 +859,22 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat imageTopLayer := "" imageID := "" if image != "" { - ristore, err := s.ImageStore() + istore, err := s.ImageStore() if err != nil { return nil, err } - stores, err := s.ROImageStores() + istores, err := s.ROImageStores() if err != nil { return nil, err } - stores = append([]ROImageStore{ristore}, stores...) var cimage *Image - for _, ristore := range stores { - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() + for _, store := range append([]ROImageStore{istore}, istores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - cimage, err = ristore.Get(image) + cimage, err = store.Get(image) if err == nil { break } @@ -960,23 +949,22 @@ func (s *store) SetMetadata(id, metadata string) error { } func (s *store) Metadata(id string) (string, error) { - rlstore, err := s.LayerStore() + lstore, err := s.LayerStore() if err != nil { return "", err } - stores, err := s.ROLayerStores() + lstores, err := s.ROLayerStores() if err != nil { return "", err } - stores = append([]ROLayerStore{rlstore}, stores...) - for _, rlstore := range stores { - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - if rlstore.Exists(id) { - return rlstore.Metadata(id) + if store.Exists(id) { + return store.Metadata(id) } } @@ -988,50 +976,48 @@ func (s *store) Metadata(id string) (string, error) { if err != nil { return "", err } - istores = append([]ROImageStore{istore}, istores...) - for _, ristore := range istores { - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() + for _, store := range append([]ROImageStore{istore}, istores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - if ristore.Exists(id) { - return ristore.Metadata(id) + if store.Exists(id) { + return store.Metadata(id) } } - rcstore, err := s.ContainerStore() + cstore, err := s.ContainerStore() if err != nil { return "", err } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() + cstore.Lock() + defer cstore.Unlock() + if modified, err := cstore.Modified(); modified || err != nil { + cstore.Load() } - if rcstore.Exists(id) { - return rcstore.Metadata(id) + if cstore.Exists(id) { + return cstore.Metadata(id) } return "", ErrNotAnID } func (s *store) ListImageBigData(id string) ([]string, error) { - ristore, err := s.ImageStore() + istore, err := s.ImageStore() if err != nil { return nil, err } - stores, err := s.ROImageStores() + istores, err := s.ROImageStores() if err != nil { return nil, err } - stores = append([]ROImageStore{ristore}, stores...) - for _, ristore := range stores { - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() + for _, store := range append([]ROImageStore{istore}, istores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - bigDataNames, err := ristore.BigDataNames(id) + bigDataNames, err := store.BigDataNames(id) if err == nil { return bigDataNames, err } @@ -1040,22 +1026,21 @@ func (s *store) ListImageBigData(id string) ([]string, error) { } func (s *store) ImageBigDataSize(id, key string) (int64, error) { - ristore, err := s.ImageStore() + istore, err := s.ImageStore() if err != nil { return -1, err } - stores, err := s.ROImageStores() + istores, err := s.ROImageStores() if err != nil { return -1, err } - stores = append([]ROImageStore{ristore}, stores...) - for _, ristore := range stores { - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() + for _, store := range append([]ROImageStore{istore}, istores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - size, err := ristore.BigDataSize(id, key) + size, err := store.BigDataSize(id, key) if err == nil { return size, nil } @@ -1063,14 +1048,14 @@ func (s *store) ImageBigDataSize(id, key string) (int64, error) { return -1, ErrSizeUnknown } -func (s *store) ImageBigData(id, key string) ([]byte, error) { +func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) { ristore, err := s.ImageStore() if err != nil { - return nil, err + return "", err } stores, err := s.ROImageStores() if err != nil { - return nil, err + return "", err } stores = append([]ROImageStore{ristore}, stores...) for _, ristore := range stores { @@ -1079,12 +1064,34 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) { if modified, err := ristore.Modified(); modified || err != nil { ristore.Load() } - data, err := ristore.BigData(id, key) + d, err := ristore.BigDataDigest(id, key) + if err == nil && d.Validate() == nil { + return d, nil + } + } + return "", ErrDigestUnknown +} + +func (s *store) ImageBigData(id, key string) ([]byte, error) { + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, store := range append([]ROImageStore{istore}, istores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() + } + data, err := store.BigData(id, key) if err == nil { return data, nil } } - return nil, ErrImageUnknown } @@ -1128,10 +1135,22 @@ func (s *store) ContainerBigDataSize(id, key string) (int64, error) { if modified, err := rcstore.Modified(); modified || err != nil { rcstore.Load() } - return rcstore.BigDataSize(id, key) } +func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return "", err + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + return rcstore.BigDataDigest(id, key) +} + func (s *store) ContainerBigData(id, key string) ([]byte, error) { rcstore, err := s.ContainerStore() if err != nil { @@ -1142,7 +1161,6 @@ func (s *store) ContainerBigData(id, key string) ([]byte, error) { if modified, err := rcstore.Modified(); modified || err != nil { rcstore.Load() } - return rcstore.BigData(id, key) } @@ -1156,7 +1174,6 @@ func (s *store) SetContainerBigData(id, key string, data []byte) error { if modified, err := rcstore.Modified(); modified || err != nil { rcstore.Load() } - return rcstore.SetBigData(id, key, data) } @@ -1169,34 +1186,32 @@ func (s *store) Exists(id string) bool { if err != nil { return false } - lstores = append([]ROLayerStore{lstore}, lstores...) - for _, rlstore := range lstores { - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - if rlstore.Exists(id) { + if store.Exists(id) { return true } } - ristore, err := s.ImageStore() + istore, err := s.ImageStore() if err != nil { return false } - stores, err := s.ROImageStores() + istores, err := s.ROImageStores() if err != nil { return false } - stores = append([]ROImageStore{ristore}, stores...) - for _, ristore := range stores { - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() + for _, store := range append([]ROImageStore{istore}, istores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - if ristore.Exists(id) { + if store.Exists(id) { return true } } @@ -1217,15 +1232,20 @@ func (s *store) Exists(id string) bool { return false } -func (s *store) SetNames(id string, names []string) error { - deduped := []string{} +func dedupeNames(names []string) []string { seen := make(map[string]bool) + deduped := make([]string, 0, len(names)) for _, name := range names { if _, wasSeen := seen[name]; !wasSeen { seen[name] = true deduped = append(deduped, name) } } + return deduped +} + +func (s *store) SetNames(id string, names []string) error { + deduped := dedupeNames(names) rlstore, err := s.LayerStore() if err != nil { @@ -1269,42 +1289,40 @@ func (s *store) SetNames(id string, names []string) error { } func (s *store) Names(id string) ([]string, error) { - rlstore, err := s.LayerStore() + lstore, err := s.LayerStore() if err != nil { return nil, err } - stores, err := s.ROLayerStores() + lstores, err := s.ROLayerStores() if err != nil { return nil, err } - stores = append([]ROLayerStore{rlstore}, stores...) - for _, rlstore := range stores { - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - if l, err := rlstore.Get(id); l != nil && err == nil { + if l, err := store.Get(id); l != nil && err == nil { return l.Names, nil } } - ristore, err := s.ImageStore() + istore, err := s.ImageStore() if err != nil { return nil, err } - ristores, err := s.ROImageStores() + istores, err := s.ROImageStores() if err != nil { return nil, err } - ristores = append([]ROImageStore{ristore}, ristores...) - for _, ristore := range stores { - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() + for _, store := range append([]ROImageStore{istore}, istores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - if i, err := ristore.Get(id); i != nil && err == nil { + if i, err := store.Get(id); i != nil && err == nil { return i.Names, nil } } @@ -1325,44 +1343,57 @@ func (s *store) Names(id string) ([]string, error) { } func (s *store) Lookup(name string) (string, error) { - rcstore, err := s.ContainerStore() + lstore, err := s.LayerStore() if err != nil { return "", err } - ristore, err := s.ImageStore() + lstores, err := s.ROLayerStores() if err != nil { return "", err } - rlstore, err := s.LayerStore() - if err != nil { - return "", err + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() + } + if l, err := store.Get(name); l != nil && err == nil { + return l.ID, nil + } } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + istore, err := s.ImageStore() + if err != nil { + return "", err } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() + istores, err := s.ROImageStores() + if err != nil { + return "", err } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() + for _, store := range append([]ROImageStore{istore}, istores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() + } + if i, err := store.Get(name); i != nil && err == nil { + return i.ID, nil + } } - if l, err := rlstore.Get(name); l != nil && err == nil { - return l.ID, nil + cstore, err := s.ContainerStore() + if err != nil { + return "", err } - if i, err := ristore.Get(name); i != nil && err == nil { - return i.ID, nil + cstore.Lock() + defer cstore.Unlock() + if modified, err := cstore.Modified(); modified || err != nil { + cstore.Load() } - if c, err := rcstore.Get(name); c != nil && err == nil { + if c, err := cstore.Get(name); c != nil && err == nil { return c.ID, nil } + return "", ErrLayerUnknown } @@ -1758,75 +1789,72 @@ func (s *store) Unmount(id string) error { } func (s *store) Changes(from, to string) ([]archive.Change, error) { - rlstore, err := s.LayerStore() + lstore, err := s.LayerStore() if err != nil { return nil, err } - stores, err := s.ROLayerStores() + lstores, err := s.ROLayerStores() if err != nil { return nil, err } - stores = append([]ROLayerStore{rlstore}, stores...) - for _, rlstore := range stores { - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - if rlstore.Exists(to) { - return rlstore.Changes(from, to) + if store.Exists(to) { + return store.Changes(from, to) } } return nil, ErrLayerUnknown } func (s *store) DiffSize(from, to string) (int64, error) { - rlstore, err := s.LayerStore() + lstore, err := s.LayerStore() if err != nil { return -1, err } - stores, err := s.ROLayerStores() + lstores, err := s.ROLayerStores() if err != nil { return -1, err } - stores = append([]ROLayerStore{rlstore}, stores...) - for _, rlstore := range stores { - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - if rlstore.Exists(to) { - return rlstore.DiffSize(from, to) + if store.Exists(to) { + return store.DiffSize(from, to) } } return -1, ErrLayerUnknown } func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { - rlstore, err := s.LayerStore() + lstore, err := s.LayerStore() if err != nil { return nil, err } - stores, err := s.ROLayerStores() + lstores, err := s.ROLayerStores() if err != nil { return nil, err } - stores = append([]ROLayerStore{rlstore}, stores...) - for _, rlstore := range stores { - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - if rlstore.Exists(to) { - return rlstore.Diff(from, to, options) + if store.Exists(to) { + return store.Diff(from, to, options) } } return nil, ErrLayerUnknown } -func (s *store) ApplyDiff(to string, diff archive.Reader) (int64, error) { +func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) { rlstore, err := s.LayerStore() if err != nil { return -1, err @@ -1844,37 +1872,47 @@ func (s *store) ApplyDiff(to string, diff archive.Reader) (int64, error) { func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) { var layers []Layer - rlstore, err := s.LayerStore() + lstore, err := s.LayerStore() if err != nil { return nil, err } - stores, err := s.ROLayerStores() + lstores, err := s.ROLayerStores() if err != nil { return nil, err } - stores = append([]ROLayerStore{rlstore}, stores...) - - for _, rlstore := range stores { - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - slayers, err := m(rlstore, d) + storeLayers, err := m(store, d) if err != nil { - return nil, err + if errors.Cause(err) != ErrLayerUnknown { + return nil, err + } + continue } - layers = append(layers, slayers...) + layers = append(layers, storeLayers...) + } + if len(layers) == 0 { + return nil, ErrLayerUnknown } return layers, nil } func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { + if err := d.Validate(); err != nil { + return nil, errors.Wrapf(err, "error looking for compressed layers matching digest %q", d) + } return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d) } func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { + if err := d.Validate(); err != nil { + return nil, errors.Wrapf(err, "error looking for layers matching digest %q", d) + } return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d) } @@ -1887,15 +1925,14 @@ func (s *store) LayerSize(id string) (int64, error) { if err != nil { return -1, err } - lstores = append([]ROLayerStore{lstore}, lstores...) - for _, rlstore := range lstores { - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - if rlstore.Exists(id) { - return rlstore.Size(id) + if store.Exists(id) { + return store.Size(id) } } return -1, ErrLayerUnknown @@ -1903,55 +1940,53 @@ func (s *store) LayerSize(id string) (int64, error) { func (s *store) Layers() ([]Layer, error) { var layers []Layer - rlstore, err := s.LayerStore() + lstore, err := s.LayerStore() if err != nil { return nil, err } - stores, err := s.ROLayerStores() + lstores, err := s.ROLayerStores() if err != nil { return nil, err } - stores = append([]ROLayerStore{rlstore}, stores...) - for _, rlstore := range stores { - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - slayers, err := rlstore.Layers() + storeLayers, err := store.Layers() if err != nil { return nil, err } - layers = append(layers, slayers...) + layers = append(layers, storeLayers...) } return layers, nil } func (s *store) Images() ([]Image, error) { var images []Image - ristore, err := s.ImageStore() + istore, err := s.ImageStore() if err != nil { return nil, err } - stores, err := s.ROImageStores() + istores, err := s.ROImageStores() if err != nil { return nil, err } - stores = append([]ROImageStore{ristore}, stores...) - for _, ristore := range stores { - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() + for _, store := range append([]ROImageStore{istore}, istores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - simages, err := ristore.Images() + storeImages, err := store.Images() if err != nil { return nil, err } - images = append(images, simages...) + images = append(images, storeImages...) } return images, nil } @@ -1972,24 +2007,21 @@ func (s *store) Containers() ([]Container, error) { } func (s *store) Layer(id string) (*Layer, error) { - rlstore, err := s.LayerStore() + lstore, err := s.LayerStore() if err != nil { return nil, err } - - stores, err := s.ROLayerStores() + lstores, err := s.ROLayerStores() if err != nil { return nil, err } - stores = append([]ROLayerStore{rlstore}, stores...) - - for _, rlstore := range stores { - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - layer, err := rlstore.Get(id) + layer, err := store.Get(id) if err == nil { return layer, nil } @@ -1998,22 +2030,21 @@ func (s *store) Layer(id string) (*Layer, error) { } func (s *store) Image(id string) (*Image, error) { - ristore, err := s.ImageStore() + istore, err := s.ImageStore() if err != nil { return nil, err } - stores, err := s.ROImageStores() + istores, err := s.ROImageStores() if err != nil { return nil, err } - stores = append([]ROImageStore{ristore}, stores...) - for _, ristore := range stores { - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() + for _, store := range append([]ROImageStore{istore}, istores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - image, err := ristore.Get(id) + image, err := store.Get(id) if err == nil { return image, nil } @@ -2028,23 +2059,22 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { return nil, err } - ristore, err := s.ImageStore() + istore, err := s.ImageStore() if err != nil { return nil, err } - stores, err := s.ROImageStores() + istores, err := s.ROImageStores() if err != nil { return nil, err } - stores = append([]ROImageStore{ristore}, stores...) - for _, ristore := range stores { - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() + for _, store := range append([]ROImageStore{istore}, istores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() } - imageList, err := ristore.Images() + imageList, err := store.Images() if err != nil { return nil, err } @@ -2057,6 +2087,33 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { return images, nil } +func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) { + images := []*Image{} + + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, store := range append([]ROImageStore{istore}, istores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() + } + imageList, err := store.ByDigest(d) + if err != nil && err != ErrImageUnknown { + return nil, err + } + images = append(images, imageList...) + } + return images, nil +} + func (s *store) Container(id string) (*Container, error) { rcstore, err := s.ContainerStore() if err != nil { @@ -2276,7 +2333,7 @@ func makeBigDataBaseName(key string) string { } func stringSliceWithoutValue(slice []string, value string) []string { - modified := []string{} + modified := make([]string, 0, len(slice)) for _, v := range slice { if v == value { continue @@ -2294,6 +2351,12 @@ type OptionsConfig struct { // Image stores. Usually used to access Networked File System // for shared image content AdditionalImageStores []string `toml:"additionalimagestores"` + + // Size + Size string `toml:"size"` + + // OverrideKernelCheck + OverrideKernelCheck string `toml:"override_kernel_check"` } // TOML-friendly explicit tables used for conversions. @@ -2337,7 +2400,12 @@ func init() { for _, s := range config.Storage.Options.AdditionalImageStores { DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s)) } - + if config.Storage.Options.Size != "" { + DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size)) + } + if config.Storage.Options.OverrideKernelCheck != "" { + DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.override_kernel_check=%s", config.Storage.Driver, config.Storage.Options.OverrideKernelCheck)) + } if os.Getenv("STORAGE_DRIVER") != "" { DefaultStoreOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER") } diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf index 81fbbadd..a30f8feb 100644 --- a/vendor/github.com/containers/storage/vendor.conf +++ b/vendor/github.com/containers/storage/vendor.conf @@ -1,11 +1,9 @@ github.com/BurntSushi/toml master github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165 github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8 -github.com/sirupsen/logrus v1.0.0 +github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/docker/engine-api 4290f40c056686fcaa5c9caf02eac1dde9315adf -github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52 -github.com/go-check/check 20d25e2804050c1cd24a7eea1e7a6447dd0e74ec github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6 github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062 github.com/opencontainers/go-digest master @@ -13,8 +11,11 @@ github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07 github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9 github.com/pkg/errors master +github.com/pmezard/go-difflib v1.0.0 +github.com/sirupsen/logrus v1.0.0 +github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 github.com/tchap/go-patricia v2.2.6 -github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721 -github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 -golang.org/x/net f2499483f923065a842d38eb4c7f1927e6fc6e6d -golang.org/x/sys d75a52659825e75fff6158388dddc6a5b04f9ba5 +github.com/vbatts/tar-split v0.10.2 +golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 +golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5 +github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac diff --git a/vendor/github.com/docker/docker/hack/README.md b/vendor/github.com/docker/docker/hack/README.md deleted file mode 100644 index 802395d5..00000000 --- a/vendor/github.com/docker/docker/hack/README.md +++ /dev/null @@ -1,60 +0,0 @@ -## About - -This directory contains a collection of scripts used to build and manage this -repository. If there are any issues regarding the intention of a particular -script (or even part of a certain script), please reach out to us. -It may help us either refine our current scripts, or add on new ones -that are appropriate for a given use case. - -## DinD (dind.sh) - -DinD is a wrapper script which allows Docker to be run inside a Docker -container. DinD requires the container to -be run with privileged mode enabled. - -## Generate Authors (generate-authors.sh) - -Generates AUTHORS; a file with all the names and corresponding emails of -individual contributors. AUTHORS can be found in the home directory of -this repository. - -## Make - -There are two make files, each with different extensions. Neither are supposed -to be called directly; only invoke `make`. Both scripts run inside a Docker -container. - -### make.ps1 - -- The Windows native build script that uses PowerShell semantics; it is limited -unlike `hack\make.sh` since it does not provide support for the full set of -operations provided by the Linux counterpart, `make.sh`. However, `make.ps1` -does provide support for local Windows development and Windows to Windows CI. -More information is found within `make.ps1` by the author, @jhowardmsft - -### make.sh - -- Referenced via `make test` when running tests on a local machine, -or directly referenced when running tests inside a Docker development container. -- When running on a local machine, `make test` to run all tests found in -`test`, `test-unit`, `test-integration-cli`, and `test-docker-py` on -your local machine. The default timeout is set in `make.sh` to 60 minutes -(`${TIMEOUT:=60m}`), since it currently takes up to an hour to run -all of the tests. -- When running inside a Docker development container, `hack/make.sh` does -not have a single target that runs all the tests. You need to provide a -single command line with multiple targets that performs the same thing. -An example referenced from [Run targets inside a development container](https://docs.docker.com/opensource/project/test-and-docs/#run-targets-inside-a-development-container): `root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit test-integration-cli test-docker-py` -- For more information related to testing outside the scope of this README, -refer to -[Run tests and test documentation](https://docs.docker.com/opensource/project/test-and-docs/) - -## Release (release.sh) - -Releases any bundles built by `make` on a public AWS S3 bucket. -For information regarding configuration, please view `release.sh`. - -## Vendor (vendor.sh) - -A shell script that is a wrapper around Vndr. For information on how to use -this, please refer to [vndr's README](https://github.com/LK4D4/vndr/blob/master/README.md) diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md deleted file mode 100644 index 1cea5252..00000000 --- a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Integration Testing on Swarm - -IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster - -## Architecture - -### Master service - - - Works as a funker caller - - Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`) - -### Worker service - - - Works as a funker callee - - Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`) - -### Client - - - Controls master and workers via `docker stack` - - No need to have a local daemon - -Typically, the master and workers are supposed to be running on a cloud environment, -while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows. - -## Requirement - - - Docker daemon 1.13 or later - - Private registry for distributed execution with multiple nodes - -## Usage - -### Step 1: Prepare images - - $ make build-integration-cli-on-swarm - -Following environment variables are known to work in this step: - - - `BUILDFLAGS` - - `DOCKER_INCREMENTAL_BINARY` - -Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`. - -### Step 2: Execute tests - - $ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest - -Following environment variables are known to work in this step: - - - `DOCKER_GRAPHDRIVER` - - `DOCKER_EXPERIMENTAL` - -#### Flags - -Basic flags: - - - `-replicas N`: the number of worker service replicas. i.e. degree of parallelism. - - `-chunks N`: the number of chunks. By default, `chunks` == `replicas`. - - `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`. - -Experimental flags for mitigating makespan nonuniformity: - - - `-shuffle`: Shuffle the test filter strings - -Flags for debugging IT on Swarm itself: - - - `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used. - - `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated. - - `-dry-run`: skip the actual workload - - `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf deleted file mode 100644 index efd6d6d0..00000000 --- a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf +++ /dev/null @@ -1,2 +0,0 @@ -# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here -github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773 diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go new file mode 100644 index 00000000..012fe52a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -0,0 +1,23 @@ +// +build linux + +package homedir + +import ( + "os" + + "github.com/docker/docker/pkg/idtools" +) + +// GetStatic returns the home directory for the current user without calling +// os/user.Current(). This is useful for static-linked binary on glibc-based +// system, because a call to os/user.Current() in a static binary leads to +// segfault due to a glibc issue that won't be fixed in a short term. +// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) +func GetStatic() (string, error) { + uid := os.Getuid() + usr, err := idtools.LookupUID(uid) + if err != nil { + return "", err + } + return usr.Home, nil +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go new file mode 100644 index 00000000..6b96b856 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -0,0 +1,13 @@ +// +build !linux + +package homedir + +import ( + "errors" +) + +// GetStatic is not needed for non-linux systems. +// (Precisely, it is needed only for glibc-based linux systems.) +func GetStatic() (string, error) { + return "", errors.New("homedir.GetStatic() is not supported on this system") +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go new file mode 100644 index 00000000..f2a20ea8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +package homedir + +import ( + "os" + + "github.com/opencontainers/runc/libcontainer/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home == "" { + if u, err := user.CurrentUser(); err == nil { + return u.Home + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "~" +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go new file mode 100644 index 00000000..fafdb2bb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go @@ -0,0 +1,24 @@ +package homedir + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + return os.Getenv(Key()) +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go new file mode 100644 index 00000000..68a072db --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -0,0 +1,279 @@ +package idtools + +import ( + "bufio" + "fmt" + "os" + "sort" + "strconv" + "strings" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" +) + +// MkdirAllAs creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +// Deprecated: Use MkdirAllAndChown +func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, true) +} + +// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +// Deprecated: Use MkdirAndChown with a IDPair +func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, false, true) +} + +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, true) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, false, true) +} + +// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, false) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + uid, err := toHost(0, uidMap) + if err != nil { + return -1, -1, err + } + gid, err := toHost(0, gidMap) + if err != nil { + return -1, -1, err + } + return uid, gid, nil +} + +// toContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func toContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// toHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func toHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// IDPair is a UID and GID pair +type IDPair struct { + UID int + GID int +} + +// IDMappings contains a mappings of UIDs and GIDs +type IDMappings struct { + uids []IDMap + gids []IDMap +} + +// NewIDMappings takes a requested user and group name and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func NewIDMappings(username, groupname string) (*IDMappings, error) { + subuidRanges, err := parseSubuid(username) + if err != nil { + return nil, err + } + subgidRanges, err := parseSubgid(groupname) + if err != nil { + return nil, err + } + if len(subuidRanges) == 0 { + return nil, fmt.Errorf("No subuid ranges found for user %q", username) + } + if len(subgidRanges) == 0 { + return nil, fmt.Errorf("No subgid ranges found for group %q", groupname) + } + + return &IDMappings{ + uids: createIDMap(subuidRanges), + gids: createIDMap(subgidRanges), + }, nil +} + +// NewIDMappingsFromMaps creates a new mapping from two slices +// Deprecated: this is a temporary shim while transitioning to IDMapping +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { + return &IDMappings{uids: uids, gids: gids} +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i *IDMappings) RootPair() IDPair { + uid, gid, _ := GetRootUIDGID(i.uids, i.gids) + return IDPair{UID: uid, GID: gid} +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { + var err error + target := i.RootPair() + + if pair.UID != target.UID { + target.UID, err = toHost(pair.UID, i.uids) + if err != nil { + return target, err + } + } + + if pair.GID != target.GID { + target.GID, err = toHost(pair.GID, i.gids) + } + return target, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { + uid, err := toContainer(pair.UID, i.uids) + if err != nil { + return -1, -1, err + } + gid, err := toContainer(pair.GID, i.gids) + return uid, gid, err +} + +// Empty returns true if there are no id mappings +func (i *IDMappings) Empty() bool { + return len(i.uids) == 0 && len(i.gids) == 0 +} + +// UIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) UIDs() []IDMap { + return i.uids +} + +// GIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) GIDs() []IDMap { + return i.gids +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + // sort the ranges by lowest ID first + sort.Sort(subidRanges) + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned +func parseSubidFile(path, username string) (ranges, error) { + var rangeList ranges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + if err := s.Err(); err != nil { + return rangeList, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + return rangeList, nil +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go new file mode 100644 index 00000000..8701bb7f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -0,0 +1,204 @@ +// +build !windows + +package idtools + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runc/libcontainer/user" +) + +var ( + entOnce sync.Once + getentCmd string +) + +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + var paths []string + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + paths = []string{path} + } else if err == nil && chownExisting { + // short-circuit--we were called with an existing directory and chown was requested + return os.Chown(path, ownerUID, ownerGID) + } else if err == nil { + // nothing to do; directory path fully exists already and chown was NOT requested + return nil + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { + return err + } + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +func CanAccess(path string, pair IDPair) bool { + statInfo, err := system.Stat(path) + if err != nil { + return false + } + fileMode := os.FileMode(statInfo.Mode()) + permBits := fileMode.Perm() + return accessible(statInfo.UID() == uint32(pair.UID), + statInfo.GID() == uint32(pair.GID), permBits) +} + +func accessible(isOwner, isGroup bool, perms os.FileMode) bool { + if isOwner && (perms&0100 == 0100) { + return true + } + if isGroup && (perms&0010 == 0010) { + return true + } + if perms&0001 == 0001 { + return true + } + return false +} + +// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUser(username string) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUser(username) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) + if err != nil { + return user.User{}, err + } + return usr, nil +} + +// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUID(uid int) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUid(uid) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) +} + +func getentUser(args string) (user.User, error) { + reader, err := callGetent(args) + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswd(reader) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) + } + return users[0], nil +} + +// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGroup(groupname string) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGroup(groupname) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) +} + +// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGID(gid int) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGid(gid) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %d", "group", gid)) +} + +func getentGroup(args string) (user.Group, error) { + reader, err := callGetent(args) + if err != nil { + return user.Group{}, err + } + groups, err := user.ParseGroup(reader) + if err != nil { + return user.Group{}, err + } + if len(groups) == 0 { + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) + } + return groups[0], nil +} + +func callGetent(args string) (io.Reader, error) { + entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) + // if no `getent` command on host, can't do anything else + if getentCmd == "" { + return nil, fmt.Errorf("") + } + out, err := execCmd(getentCmd, args) + if err != nil { + exitCode, errC := system.GetExitCode(err) + if errC != nil { + return nil, err + } + switch exitCode { + case 1: + return nil, fmt.Errorf("getent reported invalid parameters/database unknown") + case 2: + terms := strings.Split(args, " ") + return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) + case 3: + return nil, fmt.Errorf("getent database doesn't support enumeration") + default: + return nil, err + } + + } + return bytes.NewReader(out), nil +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go new file mode 100644 index 00000000..45d2878e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package idtools + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +// Platforms such as Windows do not support the UID/GID concept. So make this +// just a wrapper around system.MkdirAll. +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { + return err + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +// Windows does not require/support this function, so always return true +func CanAccess(path string, pair IDPair) bool { + return true +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 00000000..9da7975e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,164 @@ +package idtools + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + + cmdTemplates = map[string]string{ + "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", + "useradd": "-r -s /bin/false %s", + "usermod": "-%s %d-%d %s", + } + + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 + userMod = "usermod" +) + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + } + return uid, gid, nil +} + +func addUser(userName string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + if userCommand == "" { + return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + } + args := fmt.Sprintf(cmdTemplates[userCommand], userName) + out, err := execCmd(userCommand, args) + if err != nil { + return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := parseSubuid(name) + if err != nil { + return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("Can't find available subuid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + } + } + + ranges, err = parseSubgid(name) + if err != nil { + return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("Can't find available subgid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := parseSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := parseSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 00000000..d98b354c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package idtools + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go new file mode 100644 index 00000000..9703ecbd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go @@ -0,0 +1,32 @@ +// +build !windows + +package idtools + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +func execCmd(cmd, args string) ([]byte, error) { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + return execCmd.CombinedOutput() +} diff --git a/cmd/kpod/formats/templates.go b/vendor/github.com/docker/docker/pkg/templates/templates.go similarity index 96% rename from cmd/kpod/formats/templates.go rename to vendor/github.com/docker/docker/pkg/templates/templates.go index c2582552..d2d7e0c3 100644 --- a/cmd/kpod/formats/templates.go +++ b/vendor/github.com/docker/docker/pkg/templates/templates.go @@ -1,4 +1,4 @@ -package formats +package templates import ( "bytes" @@ -14,7 +14,7 @@ var basicFunctions = template.FuncMap{ buf := &bytes.Buffer{} enc := json.NewEncoder(buf) enc.SetEscapeHTML(false) - _ = enc.Encode(v) + enc.Encode(v) // Remove the trailing new line added by the encoder return strings.TrimSpace(buf.String()) }, @@ -31,7 +31,7 @@ var basicFunctions = template.FuncMap{ // This is a replacement of basicFunctions for header generation // because we want the header to remain intact. // Some functions like `split` are irrelevant so not added. -var headerFunctions = template.FuncMap{ +var HeaderFunctions = template.FuncMap{ "json": func(v string) string { return v }, diff --git a/vendor/github.com/emicklei/go-restful-swagger12/LICENSE b/vendor/github.com/emicklei/go-restful-swagger12/LICENSE deleted file mode 100644 index aeab5b44..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2017 Ernest Micklei - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful-swagger12/README.md b/vendor/github.com/emicklei/go-restful-swagger12/README.md deleted file mode 100644 index 037b9b09..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/README.md +++ /dev/null @@ -1,83 +0,0 @@ -# go-restful-swagger12 - -[![Build Status](https://travis-ci.org/emicklei/go-restful-swagger12.png)](https://travis-ci.org/emicklei/go-restful-swagger12) -[![GoDoc](https://godoc.org/github.com/emicklei/go-restful-swagger12?status.svg)](https://godoc.org/github.com/emicklei/go-restful-swagger12) - -How to use Swagger UI with go-restful -= - -Get the Swagger UI sources (version 1.2 only) - - git clone https://github.com/wordnik/swagger-ui.git - -The project contains a "dist" folder. -Its contents has all the Swagger UI files you need. - -The `index.html` has an `url` set to `http://petstore.swagger.wordnik.com/api/api-docs`. -You need to change that to match your WebService JSON endpoint e.g. `http://localhost:8080/apidocs.json` - -Now, you can install the Swagger WebService for serving the Swagger specification in JSON. - - config := swagger.Config{ - WebServices: restful.RegisteredWebServices(), - ApiPath: "/apidocs.json", - SwaggerPath: "/apidocs/", - SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"} - swagger.InstallSwaggerService(config) - - -Documenting Structs --- - -Currently there are 2 ways to document your structs in the go-restful Swagger. - -###### By using struct tags -- Use tag "description" to annotate a struct field with a description to show in the UI -- Use tag "modelDescription" to annotate the struct itself with a description to show in the UI. The tag can be added in an field of the struct and in case that there are multiple definition, they will be appended with an empty line. - -###### By using the SwaggerDoc method -Here is an example with an `Address` struct and the documentation for each of the fields. The `""` is a special entry for **documenting the struct itself**. - - type Address struct { - Country string `json:"country,omitempty"` - PostCode int `json:"postcode,omitempty"` - } - - func (Address) SwaggerDoc() map[string]string { - return map[string]string{ - "": "Address doc", - "country": "Country doc", - "postcode": "PostCode doc", - } - } - -This example will generate a JSON like this - - { - "Address": { - "id": "Address", - "description": "Address doc", - "properties": { - "country": { - "type": "string", - "description": "Country doc" - }, - "postcode": { - "type": "integer", - "format": "int32", - "description": "PostCode doc" - } - } - } - } - -**Very Important Notes:** -- `SwaggerDoc()` is using a **NON-Pointer** receiver (e.g. func (Address) and not func (*Address)) -- The returned map should use as key the name of the field as defined in the JSON parameter (e.g. `"postcode"` and not `"PostCode"`) - -Notes --- -- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..) -- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints. - -© 2017, ernestmicklei.com. MIT License. Contributions welcome. \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go b/vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go deleted file mode 100644 index 9f4c3690..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go +++ /dev/null @@ -1,64 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "encoding/json" -) - -// ApiDeclarationList maintains an ordered list of ApiDeclaration. -type ApiDeclarationList struct { - List []ApiDeclaration -} - -// At returns the ApiDeclaration by its path unless absent, then ok is false -func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) { - for _, each := range l.List { - if each.ResourcePath == path { - return each, true - } - } - return a, false -} - -// Put adds or replaces a ApiDeclaration with this name -func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) { - // maybe replace existing - for i, each := range l.List { - if each.ResourcePath == path { - // replace - l.List[i] = a - return - } - } - // add - l.List = append(l.List, a) -} - -// Do enumerates all the properties, each with its assigned name -func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) { - for _, each := range l.List { - block(each.ResourcePath, each) - } -} - -// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty -func (l ApiDeclarationList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - buf.WriteString("{\n") - for i, each := range l.List { - buf.WriteString("\"") - buf.WriteString(each.ResourcePath) - buf.WriteString("\": ") - encoder.Encode(each) - if i < len(l.List)-1 { - buf.WriteString(",\n") - } - } - buf.WriteString("}") - return buf.Bytes(), nil -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/config.go b/vendor/github.com/emicklei/go-restful-swagger12/config.go deleted file mode 100644 index 18f8e57d..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/config.go +++ /dev/null @@ -1,46 +0,0 @@ -package swagger - -import ( - "net/http" - "reflect" - - "github.com/emicklei/go-restful" -) - -// PostBuildDeclarationMapFunc can be used to modify the api declaration map. -type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList) - -// MapSchemaFormatFunc can be used to modify typeName at definition time. -type MapSchemaFormatFunc func(typeName string) string - -// MapModelTypeNameFunc can be used to return the desired typeName for a given -// type. It will return false if the default name should be used. -type MapModelTypeNameFunc func(t reflect.Type) (string, bool) - -type Config struct { - // url where the services are available, e.g. http://localhost:8080 - // if left empty then the basePath of Swagger is taken from the actual request - WebServicesUrl string - // path where the JSON api is avaiable , e.g. /apidocs - ApiPath string - // [optional] path where the swagger UI will be served, e.g. /swagger - SwaggerPath string - // [optional] location of folder containing Swagger HTML5 application index.html - SwaggerFilePath string - // api listing is constructed from this list of restful WebServices. - WebServices []*restful.WebService - // will serve all static content (scripts,pages,images) - StaticHandler http.Handler - // [optional] on default CORS (Cross-Origin-Resource-Sharing) is enabled. - DisableCORS bool - // Top-level API version. Is reflected in the resource listing. - ApiVersion string - // If set then call this handler after building the complete ApiDeclaration Map - PostBuildHandler PostBuildDeclarationMapFunc - // Swagger global info struct - Info Info - // [optional] If set, model builder should call this handler to get addition typename-to-swagger-format-field conversion. - SchemaFormatHandler MapSchemaFormatFunc - // [optional] If set, model builder should call this handler to retrieve the name for a given type. - ModelTypeNameHandler MapModelTypeNameFunc -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_builder.go b/vendor/github.com/emicklei/go-restful-swagger12/model_builder.go deleted file mode 100644 index d40786f2..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_builder.go +++ /dev/null @@ -1,467 +0,0 @@ -package swagger - -import ( - "encoding/json" - "reflect" - "strings" -) - -// ModelBuildable is used for extending Structs that need more control over -// how the Model appears in the Swagger api declaration. -type ModelBuildable interface { - PostBuildModel(m *Model) *Model -} - -type modelBuilder struct { - Models *ModelList - Config *Config -} - -type documentable interface { - SwaggerDoc() map[string]string -} - -// Check if this structure has a method with signature func () SwaggerDoc() map[string]string -// If it exists, retrive the documentation and overwrite all struct tag descriptions -func getDocFromMethodSwaggerDoc2(model reflect.Type) map[string]string { - if docable, ok := reflect.New(model).Elem().Interface().(documentable); ok { - return docable.SwaggerDoc() - } - return make(map[string]string) -} - -// addModelFrom creates and adds a Model to the builder and detects and calls -// the post build hook for customizations -func (b modelBuilder) addModelFrom(sample interface{}) { - if modelOrNil := b.addModel(reflect.TypeOf(sample), ""); modelOrNil != nil { - // allow customizations - if buildable, ok := sample.(ModelBuildable); ok { - modelOrNil = buildable.PostBuildModel(modelOrNil) - b.Models.Put(modelOrNil.Id, *modelOrNil) - } - } -} - -func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model { - // Turn pointers into simpler types so further checks are - // correct. - if st.Kind() == reflect.Ptr { - st = st.Elem() - } - - modelName := b.keyFrom(st) - if nameOverride != "" { - modelName = nameOverride - } - // no models needed for primitive types - if b.isPrimitiveType(modelName) { - return nil - } - // golang encoding/json packages says array and slice values encode as - // JSON arrays, except that []byte encodes as a base64-encoded string. - // If we see a []byte here, treat it at as a primitive type (string) - // and deal with it in buildArrayTypeProperty. - if (st.Kind() == reflect.Slice || st.Kind() == reflect.Array) && - st.Elem().Kind() == reflect.Uint8 { - return nil - } - // see if we already have visited this model - if _, ok := b.Models.At(modelName); ok { - return nil - } - sm := Model{ - Id: modelName, - Required: []string{}, - Properties: ModelPropertyList{}} - - // reference the model before further initializing (enables recursive structs) - b.Models.Put(modelName, sm) - - // check for slice or array - if st.Kind() == reflect.Slice || st.Kind() == reflect.Array { - b.addModel(st.Elem(), "") - return &sm - } - // check for structure or primitive type - if st.Kind() != reflect.Struct { - return &sm - } - - fullDoc := getDocFromMethodSwaggerDoc2(st) - modelDescriptions := []string{} - - for i := 0; i < st.NumField(); i++ { - field := st.Field(i) - jsonName, modelDescription, prop := b.buildProperty(field, &sm, modelName) - if len(modelDescription) > 0 { - modelDescriptions = append(modelDescriptions, modelDescription) - } - - // add if not omitted - if len(jsonName) != 0 { - // update description - if fieldDoc, ok := fullDoc[jsonName]; ok { - prop.Description = fieldDoc - } - // update Required - if b.isPropertyRequired(field) { - sm.Required = append(sm.Required, jsonName) - } - sm.Properties.Put(jsonName, prop) - } - } - - // We always overwrite documentation if SwaggerDoc method exists - // "" is special for documenting the struct itself - if modelDoc, ok := fullDoc[""]; ok { - sm.Description = modelDoc - } else if len(modelDescriptions) != 0 { - sm.Description = strings.Join(modelDescriptions, "\n") - } - - // update model builder with completed model - b.Models.Put(modelName, sm) - - return &sm -} - -func (b modelBuilder) isPropertyRequired(field reflect.StructField) bool { - required := true - if jsonTag := field.Tag.Get("json"); jsonTag != "" { - s := strings.Split(jsonTag, ",") - if len(s) > 1 && s[1] == "omitempty" { - return false - } - } - return required -} - -func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, modelName string) (jsonName, modelDescription string, prop ModelProperty) { - jsonName = b.jsonNameOfField(field) - if len(jsonName) == 0 { - // empty name signals skip property - return "", "", prop - } - - if field.Name == "XMLName" && field.Type.String() == "xml.Name" { - // property is metadata for the xml.Name attribute, can be skipped - return "", "", prop - } - - if tag := field.Tag.Get("modelDescription"); tag != "" { - modelDescription = tag - } - - prop.setPropertyMetadata(field) - if prop.Type != nil { - return jsonName, modelDescription, prop - } - fieldType := field.Type - - // check if type is doing its own marshalling - marshalerType := reflect.TypeOf((*json.Marshaler)(nil)).Elem() - if fieldType.Implements(marshalerType) { - var pType = "string" - if prop.Type == nil { - prop.Type = &pType - } - if prop.Format == "" { - prop.Format = b.jsonSchemaFormat(b.keyFrom(fieldType)) - } - return jsonName, modelDescription, prop - } - - // check if annotation says it is a string - if jsonTag := field.Tag.Get("json"); jsonTag != "" { - s := strings.Split(jsonTag, ",") - if len(s) > 1 && s[1] == "string" { - stringt := "string" - prop.Type = &stringt - return jsonName, modelDescription, prop - } - } - - fieldKind := fieldType.Kind() - switch { - case fieldKind == reflect.Struct: - jsonName, prop := b.buildStructTypeProperty(field, jsonName, model) - return jsonName, modelDescription, prop - case fieldKind == reflect.Slice || fieldKind == reflect.Array: - jsonName, prop := b.buildArrayTypeProperty(field, jsonName, modelName) - return jsonName, modelDescription, prop - case fieldKind == reflect.Ptr: - jsonName, prop := b.buildPointerTypeProperty(field, jsonName, modelName) - return jsonName, modelDescription, prop - case fieldKind == reflect.String: - stringt := "string" - prop.Type = &stringt - return jsonName, modelDescription, prop - case fieldKind == reflect.Map: - // if it's a map, it's unstructured, and swagger 1.2 can't handle it - objectType := "object" - prop.Type = &objectType - return jsonName, modelDescription, prop - } - - fieldTypeName := b.keyFrom(fieldType) - if b.isPrimitiveType(fieldTypeName) { - mapped := b.jsonSchemaType(fieldTypeName) - prop.Type = &mapped - prop.Format = b.jsonSchemaFormat(fieldTypeName) - return jsonName, modelDescription, prop - } - modelType := b.keyFrom(fieldType) - prop.Ref = &modelType - - if fieldType.Name() == "" { // override type of anonymous structs - nestedTypeName := modelName + "." + jsonName - prop.Ref = &nestedTypeName - b.addModel(fieldType, nestedTypeName) - } - return jsonName, modelDescription, prop -} - -func hasNamedJSONTag(field reflect.StructField) bool { - parts := strings.Split(field.Tag.Get("json"), ",") - if len(parts) == 0 { - return false - } - for _, s := range parts[1:] { - if s == "inline" { - return false - } - } - return len(parts[0]) > 0 -} - -func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonName string, model *Model) (nameJson string, prop ModelProperty) { - prop.setPropertyMetadata(field) - // Check for type override in tag - if prop.Type != nil { - return jsonName, prop - } - fieldType := field.Type - // check for anonymous - if len(fieldType.Name()) == 0 { - // anonymous - anonType := model.Id + "." + jsonName - b.addModel(fieldType, anonType) - prop.Ref = &anonType - return jsonName, prop - } - - if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) { - // embedded struct - sub := modelBuilder{new(ModelList), b.Config} - sub.addModel(fieldType, "") - subKey := sub.keyFrom(fieldType) - // merge properties from sub - subModel, _ := sub.Models.At(subKey) - subModel.Properties.Do(func(k string, v ModelProperty) { - model.Properties.Put(k, v) - // if subModel says this property is required then include it - required := false - for _, each := range subModel.Required { - if k == each { - required = true - break - } - } - if required { - model.Required = append(model.Required, k) - } - }) - // add all new referenced models - sub.Models.Do(func(key string, sub Model) { - if key != subKey { - if _, ok := b.Models.At(key); !ok { - b.Models.Put(key, sub) - } - } - }) - // empty name signals skip property - return "", prop - } - // simple struct - b.addModel(fieldType, "") - var pType = b.keyFrom(fieldType) - prop.Ref = &pType - return jsonName, prop -} - -func (b modelBuilder) buildArrayTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) { - // check for type override in tags - prop.setPropertyMetadata(field) - if prop.Type != nil { - return jsonName, prop - } - fieldType := field.Type - if fieldType.Elem().Kind() == reflect.Uint8 { - stringt := "string" - prop.Type = &stringt - return jsonName, prop - } - var pType = "array" - prop.Type = &pType - isPrimitive := b.isPrimitiveType(fieldType.Elem().Name()) - elemTypeName := b.getElementTypeName(modelName, jsonName, fieldType.Elem()) - prop.Items = new(Item) - if isPrimitive { - mapped := b.jsonSchemaType(elemTypeName) - prop.Items.Type = &mapped - } else { - prop.Items.Ref = &elemTypeName - } - // add|overwrite model for element type - if fieldType.Elem().Kind() == reflect.Ptr { - fieldType = fieldType.Elem() - } - if !isPrimitive { - b.addModel(fieldType.Elem(), elemTypeName) - } - return jsonName, prop -} - -func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) { - prop.setPropertyMetadata(field) - // Check for type override in tags - if prop.Type != nil { - return jsonName, prop - } - fieldType := field.Type - - // override type of pointer to list-likes - if fieldType.Elem().Kind() == reflect.Slice || fieldType.Elem().Kind() == reflect.Array { - var pType = "array" - prop.Type = &pType - isPrimitive := b.isPrimitiveType(fieldType.Elem().Elem().Name()) - elemName := b.getElementTypeName(modelName, jsonName, fieldType.Elem().Elem()) - if isPrimitive { - primName := b.jsonSchemaType(elemName) - prop.Items = &Item{Ref: &primName} - } else { - prop.Items = &Item{Ref: &elemName} - } - if !isPrimitive { - // add|overwrite model for element type - b.addModel(fieldType.Elem().Elem(), elemName) - } - } else { - // non-array, pointer type - fieldTypeName := b.keyFrom(fieldType.Elem()) - var pType = b.jsonSchemaType(fieldTypeName) // no star, include pkg path - if b.isPrimitiveType(fieldTypeName) { - prop.Type = &pType - prop.Format = b.jsonSchemaFormat(fieldTypeName) - return jsonName, prop - } - prop.Ref = &pType - elemName := "" - if fieldType.Elem().Name() == "" { - elemName = modelName + "." + jsonName - prop.Ref = &elemName - } - b.addModel(fieldType.Elem(), elemName) - } - return jsonName, prop -} - -func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.Type) string { - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - if t.Name() == "" { - return modelName + "." + jsonName - } - return b.keyFrom(t) -} - -func (b modelBuilder) keyFrom(st reflect.Type) string { - key := st.String() - if b.Config != nil && b.Config.ModelTypeNameHandler != nil { - if name, ok := b.Config.ModelTypeNameHandler(st); ok { - key = name - } - } - if len(st.Name()) == 0 { // unnamed type - // Swagger UI has special meaning for [ - key = strings.Replace(key, "[]", "||", -1) - } - return key -} - -// see also https://golang.org/ref/spec#Numeric_types -func (b modelBuilder) isPrimitiveType(modelName string) bool { - if len(modelName) == 0 { - return false - } - return strings.Contains("uint uint8 uint16 uint32 uint64 int int8 int16 int32 int64 float32 float64 bool string byte rune time.Time", modelName) -} - -// jsonNameOfField returns the name of the field as it should appear in JSON format -// An empty string indicates that this field is not part of the JSON representation -func (b modelBuilder) jsonNameOfField(field reflect.StructField) string { - if jsonTag := field.Tag.Get("json"); jsonTag != "" { - s := strings.Split(jsonTag, ",") - if s[0] == "-" { - // empty name signals skip property - return "" - } else if s[0] != "" { - return s[0] - } - } - return field.Name -} - -// see also http://json-schema.org/latest/json-schema-core.html#anchor8 -func (b modelBuilder) jsonSchemaType(modelName string) string { - schemaMap := map[string]string{ - "uint": "integer", - "uint8": "integer", - "uint16": "integer", - "uint32": "integer", - "uint64": "integer", - - "int": "integer", - "int8": "integer", - "int16": "integer", - "int32": "integer", - "int64": "integer", - - "byte": "integer", - "float64": "number", - "float32": "number", - "bool": "boolean", - "time.Time": "string", - } - mapped, ok := schemaMap[modelName] - if !ok { - return modelName // use as is (custom or struct) - } - return mapped -} - -func (b modelBuilder) jsonSchemaFormat(modelName string) string { - if b.Config != nil && b.Config.SchemaFormatHandler != nil { - if mapped := b.Config.SchemaFormatHandler(modelName); mapped != "" { - return mapped - } - } - schemaMap := map[string]string{ - "int": "int32", - "int32": "int32", - "int64": "int64", - "byte": "byte", - "uint": "integer", - "uint8": "byte", - "float64": "double", - "float32": "float", - "time.Time": "date-time", - "*time.Time": "date-time", - } - mapped, ok := schemaMap[modelName] - if !ok { - return "" // no format - } - return mapped -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_list.go b/vendor/github.com/emicklei/go-restful-swagger12/model_list.go deleted file mode 100644 index 9bb6cb67..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_list.go +++ /dev/null @@ -1,86 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "encoding/json" -) - -// NamedModel associates a name with a Model (not using its Id) -type NamedModel struct { - Name string - Model Model -} - -// ModelList encapsulates a list of NamedModel (association) -type ModelList struct { - List []NamedModel -} - -// Put adds or replaces a Model by its name -func (l *ModelList) Put(name string, model Model) { - for i, each := range l.List { - if each.Name == name { - // replace - l.List[i] = NamedModel{name, model} - return - } - } - // add - l.List = append(l.List, NamedModel{name, model}) -} - -// At returns a Model by its name, ok is false if absent -func (l *ModelList) At(name string) (m Model, ok bool) { - for _, each := range l.List { - if each.Name == name { - return each.Model, true - } - } - return m, false -} - -// Do enumerates all the models, each with its assigned name -func (l *ModelList) Do(block func(name string, value Model)) { - for _, each := range l.List { - block(each.Name, each.Model) - } -} - -// MarshalJSON writes the ModelList as if it was a map[string]Model -func (l ModelList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - buf.WriteString("{\n") - for i, each := range l.List { - buf.WriteString("\"") - buf.WriteString(each.Name) - buf.WriteString("\": ") - encoder.Encode(each.Model) - if i < len(l.List)-1 { - buf.WriteString(",\n") - } - } - buf.WriteString("}") - return buf.Bytes(), nil -} - -// UnmarshalJSON reads back a ModelList. This is an expensive operation. -func (l *ModelList) UnmarshalJSON(data []byte) error { - raw := map[string]interface{}{} - json.NewDecoder(bytes.NewReader(data)).Decode(&raw) - for k, v := range raw { - // produces JSON bytes for each value - data, err := json.Marshal(v) - if err != nil { - return err - } - var m Model - json.NewDecoder(bytes.NewReader(data)).Decode(&m) - l.Put(k, m) - } - return nil -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go b/vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go deleted file mode 100644 index a433b6b7..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go +++ /dev/null @@ -1,81 +0,0 @@ -package swagger - -import ( - "reflect" - "strings" -) - -func (prop *ModelProperty) setDescription(field reflect.StructField) { - if tag := field.Tag.Get("description"); tag != "" { - prop.Description = tag - } -} - -func (prop *ModelProperty) setDefaultValue(field reflect.StructField) { - if tag := field.Tag.Get("default"); tag != "" { - prop.DefaultValue = Special(tag) - } -} - -func (prop *ModelProperty) setEnumValues(field reflect.StructField) { - // We use | to separate the enum values. This value is chosen - // since its unlikely to be useful in actual enumeration values. - if tag := field.Tag.Get("enum"); tag != "" { - prop.Enum = strings.Split(tag, "|") - } -} - -func (prop *ModelProperty) setMaximum(field reflect.StructField) { - if tag := field.Tag.Get("maximum"); tag != "" { - prop.Maximum = tag - } -} - -func (prop *ModelProperty) setType(field reflect.StructField) { - if tag := field.Tag.Get("type"); tag != "" { - // Check if the first two characters of the type tag are - // intended to emulate slice/array behaviour. - // - // If type is intended to be a slice/array then add the - // overriden type to the array item instead of the main property - if len(tag) > 2 && tag[0:2] == "[]" { - pType := "array" - prop.Type = &pType - prop.Items = new(Item) - - iType := tag[2:] - prop.Items.Type = &iType - return - } - - prop.Type = &tag - } -} - -func (prop *ModelProperty) setMinimum(field reflect.StructField) { - if tag := field.Tag.Get("minimum"); tag != "" { - prop.Minimum = tag - } -} - -func (prop *ModelProperty) setUniqueItems(field reflect.StructField) { - tag := field.Tag.Get("unique") - switch tag { - case "true": - v := true - prop.UniqueItems = &v - case "false": - v := false - prop.UniqueItems = &v - } -} - -func (prop *ModelProperty) setPropertyMetadata(field reflect.StructField) { - prop.setDescription(field) - prop.setEnumValues(field) - prop.setMinimum(field) - prop.setMaximum(field) - prop.setUniqueItems(field) - prop.setDefaultValue(field) - prop.setType(field) -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go b/vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go deleted file mode 100644 index 3babb194..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go +++ /dev/null @@ -1,87 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "encoding/json" -) - -// NamedModelProperty associates a name to a ModelProperty -type NamedModelProperty struct { - Name string - Property ModelProperty -} - -// ModelPropertyList encapsulates a list of NamedModelProperty (association) -type ModelPropertyList struct { - List []NamedModelProperty -} - -// At returns the ModelPropety by its name unless absent, then ok is false -func (l *ModelPropertyList) At(name string) (p ModelProperty, ok bool) { - for _, each := range l.List { - if each.Name == name { - return each.Property, true - } - } - return p, false -} - -// Put adds or replaces a ModelProperty with this name -func (l *ModelPropertyList) Put(name string, prop ModelProperty) { - // maybe replace existing - for i, each := range l.List { - if each.Name == name { - // replace - l.List[i] = NamedModelProperty{Name: name, Property: prop} - return - } - } - // add - l.List = append(l.List, NamedModelProperty{Name: name, Property: prop}) -} - -// Do enumerates all the properties, each with its assigned name -func (l *ModelPropertyList) Do(block func(name string, value ModelProperty)) { - for _, each := range l.List { - block(each.Name, each.Property) - } -} - -// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty -func (l ModelPropertyList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - buf.WriteString("{\n") - for i, each := range l.List { - buf.WriteString("\"") - buf.WriteString(each.Name) - buf.WriteString("\": ") - encoder.Encode(each.Property) - if i < len(l.List)-1 { - buf.WriteString(",\n") - } - } - buf.WriteString("}") - return buf.Bytes(), nil -} - -// UnmarshalJSON reads back a ModelPropertyList. This is an expensive operation. -func (l *ModelPropertyList) UnmarshalJSON(data []byte) error { - raw := map[string]interface{}{} - json.NewDecoder(bytes.NewReader(data)).Decode(&raw) - for k, v := range raw { - // produces JSON bytes for each value - data, err := json.Marshal(v) - if err != nil { - return err - } - var m ModelProperty - json.NewDecoder(bytes.NewReader(data)).Decode(&m) - l.Put(k, m) - } - return nil -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go b/vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go deleted file mode 100644 index b33ccfbe..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go +++ /dev/null @@ -1,36 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import "github.com/emicklei/go-restful" - -type orderedRouteMap struct { - elements map[string][]restful.Route - keys []string -} - -func newOrderedRouteMap() *orderedRouteMap { - return &orderedRouteMap{ - elements: map[string][]restful.Route{}, - keys: []string{}, - } -} - -func (o *orderedRouteMap) Add(key string, route restful.Route) { - routes, ok := o.elements[key] - if ok { - routes = append(routes, route) - o.elements[key] = routes - return - } - o.elements[key] = []restful.Route{route} - o.keys = append(o.keys, key) -} - -func (o *orderedRouteMap) Do(block func(key string, routes []restful.Route)) { - for _, k := range o.keys { - block(k, o.elements[k]) - } -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/swagger.go b/vendor/github.com/emicklei/go-restful-swagger12/swagger.go deleted file mode 100644 index 9c40833e..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/swagger.go +++ /dev/null @@ -1,185 +0,0 @@ -// Package swagger implements the structures of the Swagger -// https://github.com/wordnik/swagger-spec/blob/master/versions/1.2.md -package swagger - -const swaggerVersion = "1.2" - -// 4.3.3 Data Type Fields -type DataTypeFields struct { - Type *string `json:"type,omitempty"` // if Ref not used - Ref *string `json:"$ref,omitempty"` // if Type not used - Format string `json:"format,omitempty"` - DefaultValue Special `json:"defaultValue,omitempty"` - Enum []string `json:"enum,omitempty"` - Minimum string `json:"minimum,omitempty"` - Maximum string `json:"maximum,omitempty"` - Items *Item `json:"items,omitempty"` - UniqueItems *bool `json:"uniqueItems,omitempty"` -} - -type Special string - -// 4.3.4 Items Object -type Item struct { - Type *string `json:"type,omitempty"` - Ref *string `json:"$ref,omitempty"` - Format string `json:"format,omitempty"` -} - -// 5.1 Resource Listing -type ResourceListing struct { - SwaggerVersion string `json:"swaggerVersion"` // e.g 1.2 - Apis []Resource `json:"apis"` - ApiVersion string `json:"apiVersion"` - Info Info `json:"info"` - Authorizations []Authorization `json:"authorizations,omitempty"` -} - -// 5.1.2 Resource Object -type Resource struct { - Path string `json:"path"` // relative or absolute, must start with / - Description string `json:"description"` -} - -// 5.1.3 Info Object -type Info struct { - Title string `json:"title"` - Description string `json:"description"` - TermsOfServiceUrl string `json:"termsOfServiceUrl,omitempty"` - Contact string `json:"contact,omitempty"` - License string `json:"license,omitempty"` - LicenseUrl string `json:"licenseUrl,omitempty"` -} - -// 5.1.5 -type Authorization struct { - Type string `json:"type"` - PassAs string `json:"passAs"` - Keyname string `json:"keyname"` - Scopes []Scope `json:"scopes"` - GrantTypes []GrantType `json:"grandTypes"` -} - -// 5.1.6, 5.2.11 -type Scope struct { - // Required. The name of the scope. - Scope string `json:"scope"` - // Recommended. A short description of the scope. - Description string `json:"description"` -} - -// 5.1.7 -type GrantType struct { - Implicit Implicit `json:"implicit"` - AuthorizationCode AuthorizationCode `json:"authorization_code"` -} - -// 5.1.8 Implicit Object -type Implicit struct { - // Required. The login endpoint definition. - loginEndpoint LoginEndpoint `json:"loginEndpoint"` - // An optional alternative name to standard "access_token" OAuth2 parameter. - TokenName string `json:"tokenName"` -} - -// 5.1.9 Authorization Code Object -type AuthorizationCode struct { - TokenRequestEndpoint TokenRequestEndpoint `json:"tokenRequestEndpoint"` - TokenEndpoint TokenEndpoint `json:"tokenEndpoint"` -} - -// 5.1.10 Login Endpoint Object -type LoginEndpoint struct { - // Required. The URL of the authorization endpoint for the implicit grant flow. The value SHOULD be in a URL format. - Url string `json:"url"` -} - -// 5.1.11 Token Request Endpoint Object -type TokenRequestEndpoint struct { - // Required. The URL of the authorization endpoint for the authentication code grant flow. The value SHOULD be in a URL format. - Url string `json:"url"` - // An optional alternative name to standard "client_id" OAuth2 parameter. - ClientIdName string `json:"clientIdName"` - // An optional alternative name to the standard "client_secret" OAuth2 parameter. - ClientSecretName string `json:"clientSecretName"` -} - -// 5.1.12 Token Endpoint Object -type TokenEndpoint struct { - // Required. The URL of the token endpoint for the authentication code grant flow. The value SHOULD be in a URL format. - Url string `json:"url"` - // An optional alternative name to standard "access_token" OAuth2 parameter. - TokenName string `json:"tokenName"` -} - -// 5.2 API Declaration -type ApiDeclaration struct { - SwaggerVersion string `json:"swaggerVersion"` - ApiVersion string `json:"apiVersion"` - BasePath string `json:"basePath"` - ResourcePath string `json:"resourcePath"` // must start with / - Info Info `json:"info"` - Apis []Api `json:"apis,omitempty"` - Models ModelList `json:"models,omitempty"` - Produces []string `json:"produces,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Authorizations []Authorization `json:"authorizations,omitempty"` -} - -// 5.2.2 API Object -type Api struct { - Path string `json:"path"` // relative or absolute, must start with / - Description string `json:"description"` - Operations []Operation `json:"operations,omitempty"` -} - -// 5.2.3 Operation Object -type Operation struct { - DataTypeFields - Method string `json:"method"` - Summary string `json:"summary,omitempty"` - Notes string `json:"notes,omitempty"` - Nickname string `json:"nickname"` - Authorizations []Authorization `json:"authorizations,omitempty"` - Parameters []Parameter `json:"parameters"` - ResponseMessages []ResponseMessage `json:"responseMessages,omitempty"` // optional - Produces []string `json:"produces,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Deprecated string `json:"deprecated,omitempty"` -} - -// 5.2.4 Parameter Object -type Parameter struct { - DataTypeFields - ParamType string `json:"paramType"` // path,query,body,header,form - Name string `json:"name"` - Description string `json:"description"` - Required bool `json:"required"` - AllowMultiple bool `json:"allowMultiple"` -} - -// 5.2.5 Response Message Object -type ResponseMessage struct { - Code int `json:"code"` - Message string `json:"message"` - ResponseModel string `json:"responseModel,omitempty"` -} - -// 5.2.6, 5.2.7 Models Object -type Model struct { - Id string `json:"id"` - Description string `json:"description,omitempty"` - Required []string `json:"required,omitempty"` - Properties ModelPropertyList `json:"properties"` - SubTypes []string `json:"subTypes,omitempty"` - Discriminator string `json:"discriminator,omitempty"` -} - -// 5.2.8 Properties Object -type ModelProperty struct { - DataTypeFields - Description string `json:"description,omitempty"` -} - -// 5.2.10 -type Authorizations map[string]Authorization diff --git a/vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go b/vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go deleted file mode 100644 index 05a3c7e7..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go +++ /dev/null @@ -1,21 +0,0 @@ -package swagger - -type SwaggerBuilder struct { - SwaggerService -} - -func NewSwaggerBuilder(config Config) *SwaggerBuilder { - return &SwaggerBuilder{*newSwaggerService(config)} -} - -func (sb SwaggerBuilder) ProduceListing() ResourceListing { - return sb.SwaggerService.produceListing() -} - -func (sb SwaggerBuilder) ProduceAllDeclarations() map[string]ApiDeclaration { - return sb.SwaggerService.produceAllDeclarations() -} - -func (sb SwaggerBuilder) ProduceDeclarations(route string) (*ApiDeclaration, bool) { - return sb.SwaggerService.produceDeclarations(route) -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go b/vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go deleted file mode 100644 index d9062312..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go +++ /dev/null @@ -1,443 +0,0 @@ -package swagger - -import ( - "fmt" - - "github.com/emicklei/go-restful" - // "github.com/emicklei/hopwatch" - "net/http" - "reflect" - "sort" - "strings" - - "github.com/emicklei/go-restful/log" -) - -type SwaggerService struct { - config Config - apiDeclarationMap *ApiDeclarationList -} - -func newSwaggerService(config Config) *SwaggerService { - sws := &SwaggerService{ - config: config, - apiDeclarationMap: new(ApiDeclarationList)} - - // Build all ApiDeclarations - for _, each := range config.WebServices { - rootPath := each.RootPath() - // skip the api service itself - if rootPath != config.ApiPath { - if rootPath == "" || rootPath == "/" { - // use routes - for _, route := range each.Routes() { - entry := staticPathFromRoute(route) - _, exists := sws.apiDeclarationMap.At(entry) - if !exists { - sws.apiDeclarationMap.Put(entry, sws.composeDeclaration(each, entry)) - } - } - } else { // use root path - sws.apiDeclarationMap.Put(each.RootPath(), sws.composeDeclaration(each, each.RootPath())) - } - } - } - - // if specified then call the PostBuilderHandler - if config.PostBuildHandler != nil { - config.PostBuildHandler(sws.apiDeclarationMap) - } - return sws -} - -// LogInfo is the function that is called when this package needs to log. It defaults to log.Printf -var LogInfo = func(format string, v ...interface{}) { - // use the restful package-wide logger - log.Printf(format, v...) -} - -// InstallSwaggerService add the WebService that provides the API documentation of all services -// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki). -func InstallSwaggerService(aSwaggerConfig Config) { - RegisterSwaggerService(aSwaggerConfig, restful.DefaultContainer) -} - -// RegisterSwaggerService add the WebService that provides the API documentation of all services -// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki). -func RegisterSwaggerService(config Config, wsContainer *restful.Container) { - sws := newSwaggerService(config) - ws := new(restful.WebService) - ws.Path(config.ApiPath) - ws.Produces(restful.MIME_JSON) - if config.DisableCORS { - ws.Filter(enableCORS) - } - ws.Route(ws.GET("/").To(sws.getListing)) - ws.Route(ws.GET("/{a}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}/{g}").To(sws.getDeclarations)) - LogInfo("[restful/swagger] listing is available at %v%v", config.WebServicesUrl, config.ApiPath) - wsContainer.Add(ws) - - // Check paths for UI serving - if config.StaticHandler == nil && config.SwaggerFilePath != "" && config.SwaggerPath != "" { - swaggerPathSlash := config.SwaggerPath - // path must end with slash / - if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] { - LogInfo("[restful/swagger] use corrected SwaggerPath ; must end with slash (/)") - swaggerPathSlash += "/" - } - - LogInfo("[restful/swagger] %v%v is mapped to folder %v", config.WebServicesUrl, swaggerPathSlash, config.SwaggerFilePath) - wsContainer.Handle(swaggerPathSlash, http.StripPrefix(swaggerPathSlash, http.FileServer(http.Dir(config.SwaggerFilePath)))) - - //if we define a custom static handler use it - } else if config.StaticHandler != nil && config.SwaggerPath != "" { - swaggerPathSlash := config.SwaggerPath - // path must end with slash / - if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] { - LogInfo("[restful/swagger] use corrected SwaggerFilePath ; must end with slash (/)") - swaggerPathSlash += "/" - - } - LogInfo("[restful/swagger] %v%v is mapped to custom Handler %T", config.WebServicesUrl, swaggerPathSlash, config.StaticHandler) - wsContainer.Handle(swaggerPathSlash, config.StaticHandler) - - } else { - LogInfo("[restful/swagger] Swagger(File)Path is empty ; no UI is served") - } -} - -func staticPathFromRoute(r restful.Route) string { - static := r.Path - bracket := strings.Index(static, "{") - if bracket <= 1 { // result cannot be empty - return static - } - if bracket != -1 { - static = r.Path[:bracket] - } - if strings.HasSuffix(static, "/") { - return static[:len(static)-1] - } else { - return static - } -} - -func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - if origin := req.HeaderParameter(restful.HEADER_Origin); origin != "" { - // prevent duplicate header - if len(resp.Header().Get(restful.HEADER_AccessControlAllowOrigin)) == 0 { - resp.AddHeader(restful.HEADER_AccessControlAllowOrigin, origin) - } - } - chain.ProcessFilter(req, resp) -} - -func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) { - listing := sws.produceListing() - resp.WriteAsJson(listing) -} - -func (sws SwaggerService) produceListing() ResourceListing { - listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion, Info: sws.config.Info} - sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) { - ref := Resource{Path: k} - if len(v.Apis) > 0 { // use description of first (could still be empty) - ref.Description = v.Apis[0].Description - } - listing.Apis = append(listing.Apis, ref) - }) - return listing -} - -func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) { - decl, ok := sws.produceDeclarations(composeRootPath(req)) - if !ok { - resp.WriteErrorString(http.StatusNotFound, "ApiDeclaration not found") - return - } - // unless WebServicesUrl is given - if len(sws.config.WebServicesUrl) == 0 { - // update base path from the actual request - // TODO how to detect https? assume http for now - var host string - // X-Forwarded-Host or Host or Request.Host - hostvalues, ok := req.Request.Header["X-Forwarded-Host"] // apache specific? - if !ok || len(hostvalues) == 0 { - forwarded, ok := req.Request.Header["Host"] // without reverse-proxy - if !ok || len(forwarded) == 0 { - // fallback to Host field - host = req.Request.Host - } else { - host = forwarded[0] - } - } else { - host = hostvalues[0] - } - // inspect Referer for the scheme (http vs https) - scheme := "http" - if referer := req.Request.Header["Referer"]; len(referer) > 0 { - if strings.HasPrefix(referer[0], "https") { - scheme = "https" - } - } - decl.BasePath = fmt.Sprintf("%s://%s", scheme, host) - } - resp.WriteAsJson(decl) -} - -func (sws SwaggerService) produceAllDeclarations() map[string]ApiDeclaration { - decls := map[string]ApiDeclaration{} - sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) { - decls[k] = v - }) - return decls -} - -func (sws SwaggerService) produceDeclarations(route string) (*ApiDeclaration, bool) { - decl, ok := sws.apiDeclarationMap.At(route) - if !ok { - return nil, false - } - decl.BasePath = sws.config.WebServicesUrl - return &decl, true -} - -// composeDeclaration uses all routes and parameters to create a ApiDeclaration -func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix string) ApiDeclaration { - decl := ApiDeclaration{ - SwaggerVersion: swaggerVersion, - BasePath: sws.config.WebServicesUrl, - ResourcePath: pathPrefix, - Models: ModelList{}, - ApiVersion: ws.Version()} - - // collect any path parameters - rootParams := []Parameter{} - for _, param := range ws.PathParameters() { - rootParams = append(rootParams, asSwaggerParameter(param.Data())) - } - // aggregate by path - pathToRoutes := newOrderedRouteMap() - for _, other := range ws.Routes() { - if strings.HasPrefix(other.Path, pathPrefix) { - if len(pathPrefix) > 1 && len(other.Path) > len(pathPrefix) && other.Path[len(pathPrefix)] != '/' { - continue - } - pathToRoutes.Add(other.Path, other) - } - } - pathToRoutes.Do(func(path string, routes []restful.Route) { - api := Api{Path: strings.TrimSuffix(withoutWildcard(path), "/"), Description: ws.Documentation()} - voidString := "void" - for _, route := range routes { - operation := Operation{ - Method: route.Method, - Summary: route.Doc, - Notes: route.Notes, - // Type gets overwritten if there is a write sample - DataTypeFields: DataTypeFields{Type: &voidString}, - Parameters: []Parameter{}, - Nickname: route.Operation, - ResponseMessages: composeResponseMessages(route, &decl, &sws.config)} - - operation.Consumes = route.Consumes - operation.Produces = route.Produces - - // share root params if any - for _, swparam := range rootParams { - operation.Parameters = append(operation.Parameters, swparam) - } - // route specific params - for _, param := range route.ParameterDocs { - operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data())) - } - - sws.addModelsFromRouteTo(&operation, route, &decl) - api.Operations = append(api.Operations, operation) - } - decl.Apis = append(decl.Apis, api) - }) - return decl -} - -func withoutWildcard(path string) string { - if strings.HasSuffix(path, ":*}") { - return path[0:len(path)-3] + "}" - } - return path -} - -// composeResponseMessages takes the ResponseErrors (if any) and creates ResponseMessages from them. -func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config *Config) (messages []ResponseMessage) { - if route.ResponseErrors == nil { - return messages - } - // sort by code - codes := sort.IntSlice{} - for code := range route.ResponseErrors { - codes = append(codes, code) - } - codes.Sort() - for _, code := range codes { - each := route.ResponseErrors[code] - message := ResponseMessage{ - Code: code, - Message: each.Message, - } - if each.Model != nil { - st := reflect.TypeOf(each.Model) - isCollection, st := detectCollectionType(st) - // collection cannot be in responsemodel - if !isCollection { - modelName := modelBuilder{}.keyFrom(st) - modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "") - message.ResponseModel = modelName - } - } - messages = append(messages, message) - } - return -} - -// addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it. -func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) { - if route.ReadSample != nil { - sws.addModelFromSampleTo(operation, false, route.ReadSample, &decl.Models) - } - if route.WriteSample != nil { - sws.addModelFromSampleTo(operation, true, route.WriteSample, &decl.Models) - } -} - -func detectCollectionType(st reflect.Type) (bool, reflect.Type) { - isCollection := false - if st.Kind() == reflect.Slice || st.Kind() == reflect.Array { - st = st.Elem() - isCollection = true - } else { - if st.Kind() == reflect.Ptr { - if st.Elem().Kind() == reflect.Slice || st.Elem().Kind() == reflect.Array { - st = st.Elem().Elem() - isCollection = true - } - } - } - return isCollection, st -} - -// addModelFromSample creates and adds (or overwrites) a Model from a sample resource -func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) { - mb := modelBuilder{Models: models, Config: &sws.config} - if isResponse { - sampleType, items := asDataType(sample, &sws.config) - operation.Type = sampleType - operation.Items = items - } - mb.addModelFrom(sample) -} - -func asSwaggerParameter(param restful.ParameterData) Parameter { - return Parameter{ - DataTypeFields: DataTypeFields{ - Type: ¶m.DataType, - Format: asFormat(param.DataType, param.DataFormat), - DefaultValue: Special(param.DefaultValue), - }, - Name: param.Name, - Description: param.Description, - ParamType: asParamType(param.Kind), - - Required: param.Required} -} - -// Between 1..7 path parameters is supported -func composeRootPath(req *restful.Request) string { - path := "/" + req.PathParameter("a") - b := req.PathParameter("b") - if b == "" { - return path - } - path = path + "/" + b - c := req.PathParameter("c") - if c == "" { - return path - } - path = path + "/" + c - d := req.PathParameter("d") - if d == "" { - return path - } - path = path + "/" + d - e := req.PathParameter("e") - if e == "" { - return path - } - path = path + "/" + e - f := req.PathParameter("f") - if f == "" { - return path - } - path = path + "/" + f - g := req.PathParameter("g") - if g == "" { - return path - } - return path + "/" + g -} - -func asFormat(dataType string, dataFormat string) string { - if dataFormat != "" { - return dataFormat - } - return "" // TODO -} - -func asParamType(kind int) string { - switch { - case kind == restful.PathParameterKind: - return "path" - case kind == restful.QueryParameterKind: - return "query" - case kind == restful.BodyParameterKind: - return "body" - case kind == restful.HeaderParameterKind: - return "header" - case kind == restful.FormParameterKind: - return "form" - } - return "" -} - -func asDataType(any interface{}, config *Config) (*string, *Item) { - // If it's not a collection, return the suggested model name - st := reflect.TypeOf(any) - isCollection, st := detectCollectionType(st) - modelName := modelBuilder{}.keyFrom(st) - // if it's not a collection we are done - if !isCollection { - return &modelName, nil - } - - // XXX: This is not very elegant - // We create an Item object referring to the given model - models := ModelList{} - mb := modelBuilder{Models: &models, Config: config} - mb.addModelFrom(any) - - elemTypeName := mb.getElementTypeName(modelName, "", st) - item := new(Item) - if mb.isPrimitiveType(elemTypeName) { - mapped := mb.jsonSchemaType(elemTypeName) - item.Type = &mapped - } else { - item.Ref = &elemTypeName - } - tmp := "array" - return &tmp, item -} diff --git a/vendor/github.com/exponent-io/jsonpath/LICENSE b/vendor/github.com/exponent-io/jsonpath/LICENSE deleted file mode 100644 index 54197725..00000000 --- a/vendor/github.com/exponent-io/jsonpath/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Exponent Labs LLC - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/exponent-io/jsonpath/README.md b/vendor/github.com/exponent-io/jsonpath/README.md deleted file mode 100644 index 382fb313..00000000 --- a/vendor/github.com/exponent-io/jsonpath/README.md +++ /dev/null @@ -1,66 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/exponent-io/jsonpath?status.svg)](https://godoc.org/github.com/exponent-io/jsonpath) -[![Build Status](https://travis-ci.org/exponent-io/jsonpath.svg?branch=master)](https://travis-ci.org/exponent-io/jsonpath) - -# jsonpath - -This package extends the [json.Decoder](https://golang.org/pkg/encoding/json/#Decoder) to support navigating a stream of JSON tokens. You should be able to use this extended Decoder places where a json.Decoder would have been used. - -This Decoder has the following enhancements... - * The [Scan](https://godoc.org/github.com/exponent-io/jsonpath/#Decoder.Scan) method supports scanning a JSON stream while extracting particular values along the way using [PathActions](https://godoc.org/github.com/exponent-io/jsonpath#PathActions). - * The [SeekTo](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.SeekTo) method supports seeking forward in a JSON token stream to a particular path. - * The [Path](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Path) method returns the path of the most recently parsed token. - * The [Token](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Token) method has been modified to distinguish between strings that are object keys and strings that are values. Object key strings are returned as the [KeyString](https://godoc.org/github.com/exponent-io/jsonpath#KeyString) type rather than a native string. - -## Installation - - go get -u github.com/exponent-io/jsonpath - -## Example Usage - -#### SeekTo - -```go -import "github.com/exponent-io/jsonpath" - -var j = []byte(`[ - {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}}, - {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}} -]`) - -w := json.NewDecoder(bytes.NewReader(j)) -var v interface{} - -w.SeekTo(1, "Point", "G") -w.Decode(&v) // v is 218 -``` - -#### Scan with PathActions - -```go -var j = []byte(`{"colors":[ - {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10, "A": 58}}, - {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255, "A": 231}} -]}`) - -var actions PathActions - -// Extract the value at Point.A -actions.Add(func(d *Decoder) error { - var alpha int - err := d.Decode(&alpha) - fmt.Printf("Alpha: %v\n", alpha) - return err -}, "Point", "A") - -w := NewDecoder(bytes.NewReader(j)) -w.SeekTo("colors", 0) - -var ok = true -var err error -for ok { - ok, err = w.Scan(&actions) - if err != nil && err != io.EOF { - panic(err) - } -} -``` diff --git a/vendor/github.com/exponent-io/jsonpath/decoder.go b/vendor/github.com/exponent-io/jsonpath/decoder.go deleted file mode 100644 index 31de46c7..00000000 --- a/vendor/github.com/exponent-io/jsonpath/decoder.go +++ /dev/null @@ -1,210 +0,0 @@ -package jsonpath - -import ( - "encoding/json" - "io" -) - -// KeyString is returned from Decoder.Token to represent each key in a JSON object value. -type KeyString string - -// Decoder extends the Go runtime's encoding/json.Decoder to support navigating in a stream of JSON tokens. -type Decoder struct { - json.Decoder - - path JsonPath - context jsonContext -} - -// NewDecoder creates a new instance of the extended JSON Decoder. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{Decoder: *json.NewDecoder(r)} -} - -// SeekTo causes the Decoder to move forward to a given path in the JSON structure. -// -// The path argument must consist of strings or integers. Each string specifies an JSON object key, and -// each integer specifies an index into a JSON array. -// -// Consider the JSON structure -// -// { "a": [0,"s",12e4,{"b":0,"v":35} ] } -// -// SeekTo("a",3,"v") will move to the value referenced by the "a" key in the current object, -// followed by a move to the 4th value (index 3) in the array, followed by a move to the value at key "v". -// In this example, a subsequent call to the decoder's Decode() would unmarshal the value 35. -// -// SeekTo returns a boolean value indicating whether a match was found. -// -// Decoder is intended to be used with a stream of tokens. As a result it navigates forward only. -func (d *Decoder) SeekTo(path ...interface{}) (bool, error) { - - if len(path) == 0 { - return len(d.path) == 0, nil - } - last := len(path) - 1 - if i, ok := path[last].(int); ok { - path[last] = i - 1 - } - - for { - if d.path.Equal(path) { - return true, nil - } - _, err := d.Token() - if err == io.EOF { - return false, nil - } else if err != nil { - return false, err - } - } -} - -// Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. This is -// equivalent to encoding/json.Decode(). -func (d *Decoder) Decode(v interface{}) error { - switch d.context { - case objValue: - d.context = objKey - break - case arrValue: - d.path.incTop() - break - } - return d.Decoder.Decode(v) -} - -// Path returns a slice of string and/or int values representing the path from the root of the JSON object to the -// position of the most-recently parsed token. -func (d *Decoder) Path() JsonPath { - p := make(JsonPath, len(d.path)) - copy(p, d.path) - return p -} - -// Token is equivalent to the Token() method on json.Decoder. The primary difference is that it distinguishes -// between strings that are keys and and strings that are values. String tokens that are object keys are returned as a -// KeyString rather than as a native string. -func (d *Decoder) Token() (json.Token, error) { - t, err := d.Decoder.Token() - if err != nil { - return t, err - } - - if t == nil { - switch d.context { - case objValue: - d.context = objKey - break - case arrValue: - d.path.incTop() - break - } - return t, err - } - - switch t := t.(type) { - case json.Delim: - switch t { - case json.Delim('{'): - if d.context == arrValue { - d.path.incTop() - } - d.path.push("") - d.context = objKey - break - case json.Delim('}'): - d.path.pop() - d.context = d.path.inferContext() - break - case json.Delim('['): - if d.context == arrValue { - d.path.incTop() - } - d.path.push(-1) - d.context = arrValue - break - case json.Delim(']'): - d.path.pop() - d.context = d.path.inferContext() - break - } - case float64, json.Number, bool: - switch d.context { - case objValue: - d.context = objKey - break - case arrValue: - d.path.incTop() - break - } - break - case string: - switch d.context { - case objKey: - d.path.nameTop(t) - d.context = objValue - return KeyString(t), err - case objValue: - d.context = objKey - case arrValue: - d.path.incTop() - } - break - } - - return t, err -} - -// Scan moves forward over the JSON stream consuming all the tokens at the current level (current object, current array) -// invoking each matching PathAction along the way. -// -// Scan returns true if there are more contiguous values to scan (for example in an array). -func (d *Decoder) Scan(ext *PathActions) (bool, error) { - - rootPath := d.Path() - - // If this is an array path, increment the root path in our local copy. - if rootPath.inferContext() == arrValue { - rootPath.incTop() - } - - for { - // advance the token position - _, err := d.Token() - if err != nil { - return false, err - } - - match: - var relPath JsonPath - - // capture the new JSON path - path := d.Path() - - if len(path) > len(rootPath) { - // capture the path relative to where the scan started - relPath = path[len(rootPath):] - } else { - // if the path is not longer than the root, then we are done with this scan - // return boolean flag indicating if there are more items to scan at the same level - return d.Decoder.More(), nil - } - - // match the relative path against the path actions - if node := ext.node.match(relPath); node != nil { - if node.action != nil { - // we have a match so execute the action - err = node.action(d) - if err != nil { - return d.Decoder.More(), err - } - // The action may have advanced the decoder. If we are in an array, advancing it further would - // skip tokens. So, if we are scanning an array, jump to the top without advancing the token. - if d.path.inferContext() == arrValue && d.Decoder.More() { - goto match - } - } - } - } -} diff --git a/vendor/github.com/exponent-io/jsonpath/path.go b/vendor/github.com/exponent-io/jsonpath/path.go deleted file mode 100644 index d7db2ad3..00000000 --- a/vendor/github.com/exponent-io/jsonpath/path.go +++ /dev/null @@ -1,67 +0,0 @@ -// Extends the Go runtime's json.Decoder enabling navigation of a stream of json tokens. -package jsonpath - -import "fmt" - -type jsonContext int - -const ( - none jsonContext = iota - objKey - objValue - arrValue -) - -// AnyIndex can be used in a pattern to match any array index. -const AnyIndex = -2 - -// JsonPath is a slice of strings and/or integers. Each string specifies an JSON object key, and -// each integer specifies an index into a JSON array. -type JsonPath []interface{} - -func (p *JsonPath) push(n interface{}) { *p = append(*p, n) } -func (p *JsonPath) pop() { *p = (*p)[:len(*p)-1] } - -// increment the index at the top of the stack (must be an array index) -func (p *JsonPath) incTop() { (*p)[len(*p)-1] = (*p)[len(*p)-1].(int) + 1 } - -// name the key at the top of the stack (must be an object key) -func (p *JsonPath) nameTop(n string) { (*p)[len(*p)-1] = n } - -// infer the context from the item at the top of the stack -func (p *JsonPath) inferContext() jsonContext { - if len(*p) == 0 { - return none - } - t := (*p)[len(*p)-1] - switch t.(type) { - case string: - return objKey - case int: - return arrValue - default: - panic(fmt.Sprintf("Invalid stack type %T", t)) - } -} - -// Equal tests for equality between two JsonPath types. -func (p *JsonPath) Equal(o JsonPath) bool { - if len(*p) != len(o) { - return false - } - for i, v := range *p { - if v != o[i] { - return false - } - } - return true -} - -func (p *JsonPath) HasPrefix(o JsonPath) bool { - for i, v := range o { - if v != (*p)[i] { - return false - } - } - return true -} diff --git a/vendor/github.com/exponent-io/jsonpath/pathaction.go b/vendor/github.com/exponent-io/jsonpath/pathaction.go deleted file mode 100644 index 497ed686..00000000 --- a/vendor/github.com/exponent-io/jsonpath/pathaction.go +++ /dev/null @@ -1,61 +0,0 @@ -package jsonpath - -// pathNode is used to construct a trie of paths to be matched -type pathNode struct { - matchOn interface{} // string, or integer - childNodes []pathNode - action DecodeAction -} - -// match climbs the trie to find a node that matches the given JSON path. -func (n *pathNode) match(path JsonPath) *pathNode { - var node *pathNode = n - for _, ps := range path { - found := false - for i, n := range node.childNodes { - if n.matchOn == ps { - node = &node.childNodes[i] - found = true - break - } else if _, ok := ps.(int); ok && n.matchOn == AnyIndex { - node = &node.childNodes[i] - found = true - break - } - } - if !found { - return nil - } - } - return node -} - -// PathActions represents a collection of DecodeAction functions that should be called at certain path positions -// when scanning the JSON stream. PathActions can be created once and used many times in one or more JSON streams. -type PathActions struct { - node pathNode -} - -// DecodeAction handlers are called by the Decoder when scanning objects. See PathActions.Add for more detail. -type DecodeAction func(d *Decoder) error - -// Add specifies an action to call on the Decoder when the specified path is encountered. -func (je *PathActions) Add(action DecodeAction, path ...interface{}) { - - var node *pathNode = &je.node - for _, ps := range path { - found := false - for i, n := range node.childNodes { - if n.matchOn == ps { - node = &node.childNodes[i] - found = true - break - } - } - if !found { - node.childNodes = append(node.childNodes, pathNode{matchOn: ps}) - node = &node.childNodes[len(node.childNodes)-1] - } - } - node.action = action -} diff --git a/vendor/github.com/fatih/camelcase/LICENSE.md b/vendor/github.com/fatih/camelcase/LICENSE.md deleted file mode 100644 index aa4a536c..00000000 --- a/vendor/github.com/fatih/camelcase/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Fatih Arslan - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/camelcase/README.md b/vendor/github.com/fatih/camelcase/README.md deleted file mode 100644 index 105a6ae3..00000000 --- a/vendor/github.com/fatih/camelcase/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# CamelCase [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/camelcase) [![Build Status](http://img.shields.io/travis/fatih/camelcase.svg?style=flat-square)](https://travis-ci.org/fatih/camelcase) - -CamelCase is a Golang (Go) package to split the words of a camelcase type -string into a slice of words. It can be used to convert a camelcase word (lower -or upper case) into any type of word. - -## Splitting rules: - -1. If string is not valid UTF-8, return it without splitting as - single item array. -2. Assign all unicode characters into one of 4 sets: lower case - letters, upper case letters, numbers, and all other characters. -3. Iterate through characters of string, introducing splits - between adjacent characters that belong to different sets. -4. Iterate through array of split strings, and if a given string - is upper case: - * if subsequent string is lower case: - * move last character of upper case string to beginning of - lower case string - -## Install - -```bash -go get github.com/fatih/camelcase -``` - -## Usage and examples - -```go -splitted := camelcase.Split("GolangPackage") - -fmt.Println(splitted[0], splitted[1]) // prints: "Golang", "Package" -``` - -Both lower camel case and upper camel case are supported. For more info please -check: [http://en.wikipedia.org/wiki/CamelCase](http://en.wikipedia.org/wiki/CamelCase) - -Below are some example cases: - -``` -"" => [] -"lowercase" => ["lowercase"] -"Class" => ["Class"] -"MyClass" => ["My", "Class"] -"MyC" => ["My", "C"] -"HTML" => ["HTML"] -"PDFLoader" => ["PDF", "Loader"] -"AString" => ["A", "String"] -"SimpleXMLParser" => ["Simple", "XML", "Parser"] -"vimRPCPlugin" => ["vim", "RPC", "Plugin"] -"GL11Version" => ["GL", "11", "Version"] -"99Bottles" => ["99", "Bottles"] -"May5" => ["May", "5"] -"BFG9000" => ["BFG", "9000"] -"BöseÜberraschung" => ["Böse", "Überraschung"] -"Two spaces" => ["Two", " ", "spaces"] -"BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"] -``` diff --git a/vendor/github.com/fatih/camelcase/camelcase.go b/vendor/github.com/fatih/camelcase/camelcase.go deleted file mode 100644 index 02160c9a..00000000 --- a/vendor/github.com/fatih/camelcase/camelcase.go +++ /dev/null @@ -1,90 +0,0 @@ -// Package camelcase is a micro package to split the words of a camelcase type -// string into a slice of words. -package camelcase - -import ( - "unicode" - "unicode/utf8" -) - -// Split splits the camelcase word and returns a list of words. It also -// supports digits. Both lower camel case and upper camel case are supported. -// For more info please check: http://en.wikipedia.org/wiki/CamelCase -// -// Examples -// -// "" => [""] -// "lowercase" => ["lowercase"] -// "Class" => ["Class"] -// "MyClass" => ["My", "Class"] -// "MyC" => ["My", "C"] -// "HTML" => ["HTML"] -// "PDFLoader" => ["PDF", "Loader"] -// "AString" => ["A", "String"] -// "SimpleXMLParser" => ["Simple", "XML", "Parser"] -// "vimRPCPlugin" => ["vim", "RPC", "Plugin"] -// "GL11Version" => ["GL", "11", "Version"] -// "99Bottles" => ["99", "Bottles"] -// "May5" => ["May", "5"] -// "BFG9000" => ["BFG", "9000"] -// "BöseÜberraschung" => ["Böse", "Überraschung"] -// "Two spaces" => ["Two", " ", "spaces"] -// "BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"] -// -// Splitting rules -// -// 1) If string is not valid UTF-8, return it without splitting as -// single item array. -// 2) Assign all unicode characters into one of 4 sets: lower case -// letters, upper case letters, numbers, and all other characters. -// 3) Iterate through characters of string, introducing splits -// between adjacent characters that belong to different sets. -// 4) Iterate through array of split strings, and if a given string -// is upper case: -// if subsequent string is lower case: -// move last character of upper case string to beginning of -// lower case string -func Split(src string) (entries []string) { - // don't split invalid utf8 - if !utf8.ValidString(src) { - return []string{src} - } - entries = []string{} - var runes [][]rune - lastClass := 0 - class := 0 - // split into fields based on class of unicode character - for _, r := range src { - switch true { - case unicode.IsLower(r): - class = 1 - case unicode.IsUpper(r): - class = 2 - case unicode.IsDigit(r): - class = 3 - default: - class = 4 - } - if class == lastClass { - runes[len(runes)-1] = append(runes[len(runes)-1], r) - } else { - runes = append(runes, []rune{r}) - } - lastClass = class - } - // handle upper case -> lower case sequences, e.g. - // "PDFL", "oader" -> "PDF", "Loader" - for i := 0; i < len(runes)-1; i++ { - if unicode.IsUpper(runes[i][0]) && unicode.IsLower(runes[i+1][0]) { - runes[i+1] = append([]rune{runes[i][len(runes[i])-1]}, runes[i+1]...) - runes[i] = runes[i][:len(runes[i])-1] - } - } - // construct []string from results - for _, s := range runes { - if len(s) > 0 { - entries = append(entries, string(s)) - } - } - return -} diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md deleted file mode 100644 index d675c49d..00000000 --- a/vendor/github.com/go-openapi/analysis/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# OpenAPI initiative analysis [![Build Status](https://ci.vmware.run/api/badges/go-openapi/analysis/status.svg)](https://ci.vmware.run/go-openapi/analysis) [![Coverage](https://coverage.vmware.run/badges/go-openapi/analysis/coverage.svg)](https://coverage.vmware.run/go-openapi/analysis) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/analysis?status.svg)](http://godoc.org/github.com/go-openapi/analysis) - - -A foundational library to analyze an OAI specification document for easier reasoning about the content. \ No newline at end of file diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go deleted file mode 100644 index d388db3a..00000000 --- a/vendor/github.com/go-openapi/analysis/analyzer.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - slashpath "path" - "strconv" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -type referenceAnalysis struct { - schemas map[string]spec.Ref - responses map[string]spec.Ref - parameters map[string]spec.Ref - items map[string]spec.Ref - allRefs map[string]spec.Ref - referenced struct { - schemas map[string]SchemaRef - responses map[string]*spec.Response - parameters map[string]*spec.Parameter - } -} - -func (r *referenceAnalysis) addRef(key string, ref spec.Ref) { - r.allRefs["#"+key] = ref -} - -func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items) { - r.items["#"+key] = items.Ref - r.addRef(key, items.Ref) -} - -func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) { - r.schemas["#"+key] = ref.Schema.Ref - r.addRef(key, ref.Schema.Ref) -} - -func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) { - r.responses["#"+key] = resp.Ref - r.addRef(key, resp.Ref) -} - -func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) { - r.parameters["#"+key] = param.Ref - r.addRef(key, param.Ref) -} - -// New takes a swagger spec object and returns an analyzed spec document. -// The analyzed document contains a number of indices that make it easier to -// reason about semantics of a swagger specification for use in code generation -// or validation etc. -func New(doc *spec.Swagger) *Spec { - a := &Spec{ - spec: doc, - consumes: make(map[string]struct{}, 150), - produces: make(map[string]struct{}, 150), - authSchemes: make(map[string]struct{}, 150), - operations: make(map[string]map[string]*spec.Operation, 150), - allSchemas: make(map[string]SchemaRef, 150), - allOfs: make(map[string]SchemaRef, 150), - references: referenceAnalysis{ - schemas: make(map[string]spec.Ref, 150), - responses: make(map[string]spec.Ref, 150), - parameters: make(map[string]spec.Ref, 150), - items: make(map[string]spec.Ref, 150), - allRefs: make(map[string]spec.Ref, 150), - }, - } - a.references.referenced.schemas = make(map[string]SchemaRef, 150) - a.references.referenced.responses = make(map[string]*spec.Response, 150) - a.references.referenced.parameters = make(map[string]*spec.Parameter, 150) - a.initialize() - return a -} - -// Spec takes a swagger spec object and turns it into a registry -// with a bunch of utility methods to act on the information in the spec -type Spec struct { - spec *spec.Swagger - consumes map[string]struct{} - produces map[string]struct{} - authSchemes map[string]struct{} - operations map[string]map[string]*spec.Operation - references referenceAnalysis - allSchemas map[string]SchemaRef - allOfs map[string]SchemaRef -} - -func (s *Spec) initialize() { - for _, c := range s.spec.Consumes { - s.consumes[c] = struct{}{} - } - for _, c := range s.spec.Produces { - s.produces[c] = struct{}{} - } - for _, ss := range s.spec.Security { - for k := range ss { - s.authSchemes[k] = struct{}{} - } - } - for path, pathItem := range s.AllPaths() { - s.analyzeOperations(path, &pathItem) - } - - for name, parameter := range s.spec.Parameters { - refPref := slashpath.Join("/parameters", jsonpointer.Escape(name)) - if parameter.Items != nil { - s.analyzeItems("items", parameter.Items, refPref) - } - if parameter.In == "body" && parameter.Schema != nil { - s.analyzeSchema("schema", *parameter.Schema, refPref) - } - } - - for name, response := range s.spec.Responses { - refPref := slashpath.Join("/responses", jsonpointer.Escape(name)) - for _, v := range response.Headers { - if v.Items != nil { - s.analyzeItems("items", v.Items, refPref) - } - } - if response.Schema != nil { - s.analyzeSchema("schema", *response.Schema, refPref) - } - } - - for name, schema := range s.spec.Definitions { - s.analyzeSchema(name, schema, "/definitions") - } - // TODO: after analyzing all things and flattening schemas etc - // resolve all the collected references to their final representations - // best put in a separate method because this could get expensive -} - -func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) { - // TODO: resolve refs here? - op := pi - s.analyzeOperation("GET", path, op.Get) - s.analyzeOperation("PUT", path, op.Put) - s.analyzeOperation("POST", path, op.Post) - s.analyzeOperation("PATCH", path, op.Patch) - s.analyzeOperation("DELETE", path, op.Delete) - s.analyzeOperation("HEAD", path, op.Head) - s.analyzeOperation("OPTIONS", path, op.Options) - for i, param := range op.Parameters { - refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i)) - if param.Ref.String() != "" { - s.references.addParamRef(refPref, ¶m) - } - if param.Items != nil { - s.analyzeItems("items", param.Items, refPref) - } - if param.Schema != nil { - s.analyzeSchema("schema", *param.Schema, refPref) - } - } -} - -func (s *Spec) analyzeItems(name string, items *spec.Items, prefix string) { - if items == nil { - return - } - refPref := slashpath.Join(prefix, name) - s.analyzeItems(name, items.Items, refPref) - if items.Ref.String() != "" { - s.references.addItemsRef(refPref, items) - } -} - -func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) { - if op == nil { - return - } - - for _, c := range op.Consumes { - s.consumes[c] = struct{}{} - } - for _, c := range op.Produces { - s.produces[c] = struct{}{} - } - for _, ss := range op.Security { - for k := range ss { - s.authSchemes[k] = struct{}{} - } - } - if _, ok := s.operations[method]; !ok { - s.operations[method] = make(map[string]*spec.Operation) - } - s.operations[method][path] = op - prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method)) - for i, param := range op.Parameters { - refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i)) - if param.Ref.String() != "" { - s.references.addParamRef(refPref, ¶m) - } - s.analyzeItems("items", param.Items, refPref) - if param.In == "body" && param.Schema != nil { - s.analyzeSchema("schema", *param.Schema, refPref) - } - } - if op.Responses != nil { - if op.Responses.Default != nil { - refPref := slashpath.Join(prefix, "responses", "default") - if op.Responses.Default.Ref.String() != "" { - s.references.addResponseRef(refPref, op.Responses.Default) - } - for _, v := range op.Responses.Default.Headers { - s.analyzeItems("items", v.Items, refPref) - } - if op.Responses.Default.Schema != nil { - s.analyzeSchema("schema", *op.Responses.Default.Schema, refPref) - } - } - for k, res := range op.Responses.StatusCodeResponses { - refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k)) - if res.Ref.String() != "" { - s.references.addResponseRef(refPref, &res) - } - for _, v := range res.Headers { - s.analyzeItems("items", v.Items, refPref) - } - if res.Schema != nil { - s.analyzeSchema("schema", *res.Schema, refPref) - } - } - } -} - -func (s *Spec) analyzeSchema(name string, schema spec.Schema, prefix string) { - refURI := slashpath.Join(prefix, jsonpointer.Escape(name)) - schRef := SchemaRef{ - Name: name, - Schema: &schema, - Ref: spec.MustCreateRef("#" + refURI), - } - s.allSchemas["#"+refURI] = schRef - if schema.Ref.String() != "" { - s.references.addSchemaRef(refURI, schRef) - } - for k, v := range schema.Definitions { - s.analyzeSchema(k, v, slashpath.Join(refURI, "definitions")) - } - for k, v := range schema.Properties { - s.analyzeSchema(k, v, slashpath.Join(refURI, "properties")) - } - for k, v := range schema.PatternProperties { - s.analyzeSchema(k, v, slashpath.Join(refURI, "patternProperties")) - } - for i, v := range schema.AllOf { - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf")) - } - if len(schema.AllOf) > 0 { - s.allOfs["#"+refURI] = SchemaRef{Name: name, Schema: &schema, Ref: spec.MustCreateRef("#" + refURI)} - } - for i, v := range schema.AnyOf { - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf")) - } - for i, v := range schema.OneOf { - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf")) - } - if schema.Not != nil { - s.analyzeSchema("not", *schema.Not, refURI) - } - if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { - s.analyzeSchema("additionalProperties", *schema.AdditionalProperties.Schema, refURI) - } - if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { - s.analyzeSchema("additionalItems", *schema.AdditionalItems.Schema, refURI) - } - if schema.Items != nil { - if schema.Items.Schema != nil { - s.analyzeSchema("items", *schema.Items.Schema, refURI) - } - for i, sch := range schema.Items.Schemas { - s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items")) - } - } -} - -// SecurityRequirement is a representation of a security requirement for an operation -type SecurityRequirement struct { - Name string - Scopes []string -} - -// SecurityRequirementsFor gets the security requirements for the operation -func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) []SecurityRequirement { - if s.spec.Security == nil && operation.Security == nil { - return nil - } - - schemes := s.spec.Security - if operation.Security != nil { - schemes = operation.Security - } - - unique := make(map[string]SecurityRequirement) - for _, scheme := range schemes { - for k, v := range scheme { - if _, ok := unique[k]; !ok { - unique[k] = SecurityRequirement{Name: k, Scopes: v} - } - } - } - - var result []SecurityRequirement - for _, v := range unique { - result = append(result, v) - } - return result -} - -// SecurityDefinitionsFor gets the matching security definitions for a set of requirements -func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme { - requirements := s.SecurityRequirementsFor(operation) - if len(requirements) == 0 { - return nil - } - result := make(map[string]spec.SecurityScheme) - for _, v := range requirements { - if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { - if definition != nil { - result[v.Name] = *definition - } - } - } - return result -} - -// ConsumesFor gets the mediatypes for the operation -func (s *Spec) ConsumesFor(operation *spec.Operation) []string { - - if len(operation.Consumes) == 0 { - cons := make(map[string]struct{}, len(s.spec.Consumes)) - for _, k := range s.spec.Consumes { - cons[k] = struct{}{} - } - return s.structMapKeys(cons) - } - - cons := make(map[string]struct{}, len(operation.Consumes)) - for _, c := range operation.Consumes { - cons[c] = struct{}{} - } - return s.structMapKeys(cons) -} - -// ProducesFor gets the mediatypes for the operation -func (s *Spec) ProducesFor(operation *spec.Operation) []string { - if len(operation.Produces) == 0 { - prod := make(map[string]struct{}, len(s.spec.Produces)) - for _, k := range s.spec.Produces { - prod[k] = struct{}{} - } - return s.structMapKeys(prod) - } - - prod := make(map[string]struct{}, len(operation.Produces)) - for _, c := range operation.Produces { - prod[c] = struct{}{} - } - return s.structMapKeys(prod) -} - -func mapKeyFromParam(param *spec.Parameter) string { - return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param)) -} - -func fieldNameFromParam(param *spec.Parameter) string { - if nm, ok := param.Extensions.GetString("go-name"); ok { - return nm - } - return swag.ToGoName(param.Name) -} - -func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter) { - for _, param := range parameters { - pr := param - if pr.Ref.String() != "" { - obj, _, err := pr.Ref.GetPointer().Get(s.spec) - if err != nil { - panic(err) - } - pr = obj.(spec.Parameter) - } - res[mapKeyFromParam(&pr)] = pr - } -} - -// ParametersFor the specified operation id -func (s *Spec) ParametersFor(operationID string) []spec.Parameter { - gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { - bag := make(map[string]spec.Parameter) - s.paramsAsMap(pi.Parameters, bag) - s.paramsAsMap(op.Parameters, bag) - - var res []spec.Parameter - for _, v := range bag { - res = append(res, v) - } - return res - } - for _, pi := range s.spec.Paths.Paths { - if pi.Get != nil && pi.Get.ID == operationID { - return gatherParams(&pi, pi.Get) - } - if pi.Head != nil && pi.Head.ID == operationID { - return gatherParams(&pi, pi.Head) - } - if pi.Options != nil && pi.Options.ID == operationID { - return gatherParams(&pi, pi.Options) - } - if pi.Post != nil && pi.Post.ID == operationID { - return gatherParams(&pi, pi.Post) - } - if pi.Patch != nil && pi.Patch.ID == operationID { - return gatherParams(&pi, pi.Patch) - } - if pi.Put != nil && pi.Put.ID == operationID { - return gatherParams(&pi, pi.Put) - } - if pi.Delete != nil && pi.Delete.ID == operationID { - return gatherParams(&pi, pi.Delete) - } - } - return nil -} - -// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that -// apply for the method and path. -func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { - res := make(map[string]spec.Parameter) - if pi, ok := s.spec.Paths.Paths[path]; ok { - s.paramsAsMap(pi.Parameters, res) - s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res) - } - return res -} - -// OperationForName gets the operation for the given id -func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) { - for method, pathItem := range s.operations { - for path, op := range pathItem { - if operationID == op.ID { - return method, path, op, true - } - } - } - return "", "", nil, false -} - -// OperationFor the given method and path -func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { - if mp, ok := s.operations[strings.ToUpper(method)]; ok { - op, fn := mp[path] - return op, fn - } - return nil, false -} - -// Operations gathers all the operations specified in the spec document -func (s *Spec) Operations() map[string]map[string]*spec.Operation { - return s.operations -} - -func (s *Spec) structMapKeys(mp map[string]struct{}) []string { - if len(mp) == 0 { - return nil - } - - result := make([]string, 0, len(mp)) - for k := range mp { - result = append(result, k) - } - return result -} - -// AllPaths returns all the paths in the swagger spec -func (s *Spec) AllPaths() map[string]spec.PathItem { - if s.spec == nil || s.spec.Paths == nil { - return nil - } - return s.spec.Paths.Paths -} - -// OperationIDs gets all the operation ids based on method an dpath -func (s *Spec) OperationIDs() []string { - if len(s.operations) == 0 { - return nil - } - result := make([]string, 0, len(s.operations)) - for method, v := range s.operations { - for p, o := range v { - if o.ID != "" { - result = append(result, o.ID) - } else { - result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) - } - } - } - return result -} - -// RequiredConsumes gets all the distinct consumes that are specified in the specification document -func (s *Spec) RequiredConsumes() []string { - return s.structMapKeys(s.consumes) -} - -// RequiredProduces gets all the distinct produces that are specified in the specification document -func (s *Spec) RequiredProduces() []string { - return s.structMapKeys(s.produces) -} - -// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec -func (s *Spec) RequiredSecuritySchemes() []string { - return s.structMapKeys(s.authSchemes) -} - -// SchemaRef is a reference to a schema -type SchemaRef struct { - Name string - Ref spec.Ref - Schema *spec.Schema -} - -// SchemasWithAllOf returns schema references to all schemas that are defined -// with an allOf key -func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { - for _, v := range s.allOfs { - result = append(result, v) - } - return -} - -// AllDefinitions returns schema references for all the definitions that were discovered -func (s *Spec) AllDefinitions() (result []SchemaRef) { - for _, v := range s.allSchemas { - result = append(result, v) - } - return -} - -// AllDefinitionReferences returns json refs for all the discovered schemas -func (s *Spec) AllDefinitionReferences() (result []string) { - for _, v := range s.references.schemas { - result = append(result, v.String()) - } - return -} - -// AllParameterReferences returns json refs for all the discovered parameters -func (s *Spec) AllParameterReferences() (result []string) { - for _, v := range s.references.parameters { - result = append(result, v.String()) - } - return -} - -// AllResponseReferences returns json refs for all the discovered responses -func (s *Spec) AllResponseReferences() (result []string) { - for _, v := range s.references.responses { - result = append(result, v.String()) - } - return -} - -// AllItemsReferences returns the references for all the items -func (s *Spec) AllItemsReferences() (result []string) { - for _, v := range s.references.items { - result = append(result, v.String()) - } - return -} - -// AllReferences returns all the references found in the document -func (s *Spec) AllReferences() (result []string) { - for _, v := range s.references.allRefs { - result = append(result, v.String()) - } - return -} - -// AllRefs returns all the unique references found in the document -func (s *Spec) AllRefs() (result []spec.Ref) { - set := make(map[string]struct{}) - for _, v := range s.references.allRefs { - a := v.String() - if a == "" { - continue - } - if _, ok := set[a]; !ok { - set[a] = struct{}{} - result = append(result, v) - } - } - return -} diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md deleted file mode 100644 index 9d5c8999..00000000 --- a/vendor/github.com/go-openapi/loads/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Loads OAI specs [![Build Status](https://ci.vmware.run/api/badges/go-openapi/loads/status.svg)](https://ci.vmware.run/go-openapi/loads) [![Coverage](https://coverage.vmware.run/badges/go-openapi/loads/coverage.svg)](https://coverage.vmware.run/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) - -Loading of OAI specification documents from local or remote locations. diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go deleted file mode 100644 index ff1ee1c9..00000000 --- a/vendor/github.com/go-openapi/loads/spec.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package loads - -import ( - "encoding/json" - "fmt" - "net/url" - - "github.com/go-openapi/analysis" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -// JSONDoc loads a json document from either a file or a remote url -func JSONDoc(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil -} - -// DocLoader represents a doc loader type -type DocLoader func(string) (json.RawMessage, error) - -// DocMatcher represents a predicate to check if a loader matches -type DocMatcher func(string) bool - -var loaders = &loader{Match: func(_ string) bool { return true }, Fn: JSONDoc} - -// AddLoader for a document -func AddLoader(predicate DocMatcher, load DocLoader) { - prev := loaders - loaders = &loader{ - Match: predicate, - Fn: load, - Next: prev, - } - -} - -type loader struct { - Fn DocLoader - Match DocMatcher - Next *loader -} - -// JSONSpec loads a spec from a json document -func JSONSpec(path string) (*Document, error) { - data, err := JSONDoc(path) - if err != nil { - return nil, err - } - // convert to json - return Analyzed(json.RawMessage(data), "") -} - -// Document represents a swagger spec document -type Document struct { - // specAnalyzer - Analyzer *analysis.Spec - spec *spec.Swagger - origSpec *spec.Swagger - schema *spec.Schema - raw json.RawMessage -} - -// Spec loads a new spec document -func Spec(path string) (*Document, error) { - specURL, err := url.Parse(path) - if err != nil { - return nil, err - } - for l := loaders.Next; l != nil; l = l.Next { - if loaders.Match(specURL.Path) { - b, err2 := loaders.Fn(path) - if err2 != nil { - return nil, err2 - } - return Analyzed(b, "") - } - } - b, err := loaders.Fn(path) - if err != nil { - return nil, err - } - return Analyzed(b, "") -} - -var swag20Schema = spec.MustLoadSwagger20Schema() - -// Analyzed creates a new analyzed spec document -func Analyzed(data json.RawMessage, version string) (*Document, error) { - if version == "" { - version = "2.0" - } - if version != "2.0" { - return nil, fmt.Errorf("spec version %q is not supported", version) - } - - swspec := new(spec.Swagger) - if err := json.Unmarshal(data, swspec); err != nil { - return nil, err - } - - origsqspec := new(spec.Swagger) - if err := json.Unmarshal(data, origsqspec); err != nil { - return nil, err - } - - d := &Document{ - Analyzer: analysis.New(swspec), - schema: swag20Schema, - spec: swspec, - raw: data, - origSpec: origsqspec, - } - return d, nil -} - -// Expanded expands the ref fields in the spec document and returns a new spec document -func (d *Document) Expanded() (*Document, error) { - swspec := new(spec.Swagger) - if err := json.Unmarshal(d.raw, swspec); err != nil { - return nil, err - } - if err := spec.ExpandSpec(swspec); err != nil { - return nil, err - } - - dd := &Document{ - Analyzer: analysis.New(swspec), - spec: swspec, - schema: swag20Schema, - raw: d.raw, - origSpec: d.origSpec, - } - return dd, nil -} - -// BasePath the base path for this spec -func (d *Document) BasePath() string { - return d.spec.BasePath -} - -// Version returns the version of this spec -func (d *Document) Version() string { - return d.spec.Swagger -} - -// Schema returns the swagger 2.0 schema -func (d *Document) Schema() *spec.Schema { - return d.schema -} - -// Spec returns the swagger spec object model -func (d *Document) Spec() *spec.Swagger { - return d.spec -} - -// Host returns the host for the API -func (d *Document) Host() string { - return d.spec.Host -} - -// Raw returns the raw swagger spec as json bytes -func (d *Document) Raw() json.RawMessage { - return d.raw -} - -func (d *Document) OrigSpec() *spec.Swagger { - return d.origSpec -} - -// ResetDefinitions gives a shallow copy with the models reset -func (d *Document) ResetDefinitions() *Document { - defs := make(map[string]spec.Schema, len(d.origSpec.Definitions)) - for k, v := range d.origSpec.Definitions { - defs[k] = v - } - - d.spec.Definitions = defs - return d -} - -// Pristine creates a new pristine document instance based on the input data -func (d *Document) Pristine() *Document { - dd, _ := Analyzed(d.Raw(), d.Version()) - return dd -} diff --git a/vendor/github.com/godbus/dbus/README.markdown b/vendor/github.com/godbus/dbus/README.markdown index 0a6e7e5b..d37f4e2e 100644 --- a/vendor/github.com/godbus/dbus/README.markdown +++ b/vendor/github.com/godbus/dbus/README.markdown @@ -1,3 +1,5 @@ +[![Build Status](https://travis-ci.org/godbus/dbus.svg?branch=master)](https://travis-ci.org/godbus/dbus) + dbus ---- @@ -29,6 +31,7 @@ gives a short overview over the basic usage. #### Projects using godbus - [notify](https://github.com/esiqveland/notify) provides desktop notifications over dbus into a library. +- [go-bluetooth](https://github.com/muka/go-bluetooth) provides a bluetooth client over bluez dbus API. Please note that the API is considered unstable for now and may change without further notice. diff --git a/vendor/github.com/godbus/dbus/conn.go b/vendor/github.com/godbus/dbus/conn.go index 9aa2e128..5720e2eb 100644 --- a/vendor/github.com/godbus/dbus/conn.go +++ b/vendor/github.com/godbus/dbus/conn.go @@ -9,8 +9,6 @@ import ( "sync" ) -const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket" - var ( systemBus *Conn systemBusLck sync.Mutex @@ -47,15 +45,13 @@ type Conn struct { calls map[uint32]*Call callsLck sync.RWMutex - handlers map[ObjectPath]map[string]exportedObj - handlersLck sync.RWMutex + handler Handler out chan *Message closed bool outLck sync.RWMutex - signals []chan<- *Signal - signalsLck sync.Mutex + signalHandler SignalHandler eavesdropped chan<- *Message eavesdroppedLck sync.Mutex @@ -90,16 +86,33 @@ func SessionBus() (conn *Conn, err error) { return } -// SessionBusPrivate returns a new private connection to the session bus. -func SessionBusPrivate() (*Conn, error) { +func getSessionBusAddress() (string, error) { sessionEnvLck.Lock() defer sessionEnvLck.Unlock() address := os.Getenv("DBUS_SESSION_BUS_ADDRESS") if address != "" && address != "autolaunch:" { - return Dial(address) + return address, nil + } + return getSessionBusPlatformAddress() +} + +// SessionBusPrivate returns a new private connection to the session bus. +func SessionBusPrivate() (*Conn, error) { + address, err := getSessionBusAddress() + if err != nil { + return nil, err } - return sessionBusPlatform() + return Dial(address) +} + +// SessionBusPrivate returns a new private connection to the session bus. +func SessionBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) { + address, err := getSessionBusAddress() + if err != nil { + return nil, err + } + return DialHandler(address, handler, signalHandler) } // SystemBus returns a shared connection to the system bus, connecting to it if @@ -133,11 +146,12 @@ func SystemBus() (conn *Conn, err error) { // SystemBusPrivate returns a new private connection to the system bus. func SystemBusPrivate() (*Conn, error) { - address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS") - if address != "" { - return Dial(address) - } - return Dial(defaultSystemBusAddress) + return Dial(getSystemBusPlatformAddress()) +} + +// SystemBusPrivateHandler returns a new private connection to the system bus, using the provided handlers. +func SystemBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) { + return DialHandler(getSystemBusPlatformAddress(), handler, signalHandler) } // Dial establishes a new private connection to the message bus specified by address. @@ -146,21 +160,36 @@ func Dial(address string) (*Conn, error) { if err != nil { return nil, err } - return newConn(tr) + return newConn(tr, NewDefaultHandler(), NewDefaultSignalHandler()) +} + +// DialHandler establishes a new private connection to the message bus specified by address, using the supplied handlers. +func DialHandler(address string, handler Handler, signalHandler SignalHandler) (*Conn, error) { + tr, err := getTransport(address) + if err != nil { + return nil, err + } + return newConn(tr, handler, signalHandler) } // NewConn creates a new private *Conn from an already established connection. func NewConn(conn io.ReadWriteCloser) (*Conn, error) { - return newConn(genericTransport{conn}) + return NewConnHandler(conn, NewDefaultHandler(), NewDefaultSignalHandler()) +} + +// NewConnHandler creates a new private *Conn from an already established connection, using the supplied handlers. +func NewConnHandler(conn io.ReadWriteCloser, handler Handler, signalHandler SignalHandler) (*Conn, error) { + return newConn(genericTransport{conn}, handler, signalHandler) } // newConn creates a new *Conn from a transport. -func newConn(tr transport) (*Conn, error) { +func newConn(tr transport, handler Handler, signalHandler SignalHandler) (*Conn, error) { conn := new(Conn) conn.transport = tr conn.calls = make(map[uint32]*Call) conn.out = make(chan *Message, 10) - conn.handlers = make(map[ObjectPath]map[string]exportedObj) + conn.handler = handler + conn.signalHandler = signalHandler conn.nextSerial = 1 conn.serialUsed = map[uint32]bool{0: true} conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus") @@ -188,16 +217,21 @@ func (conn *Conn) Close() error { close(conn.out) conn.closed = true conn.outLck.Unlock() - conn.signalsLck.Lock() - for _, ch := range conn.signals { - close(ch) + + if term, ok := conn.signalHandler.(Terminator); ok { + term.Terminate() } - conn.signalsLck.Unlock() + + if term, ok := conn.handler.(Terminator); ok { + term.Terminate() + } + conn.eavesdroppedLck.Lock() if conn.eavesdropped != nil { close(conn.eavesdropped) } conn.eavesdroppedLck.Unlock() + return conn.transport.Close() } @@ -334,17 +368,7 @@ func (conn *Conn) inWorker() { conn.namesLck.Unlock() } } - signal := &Signal{ - Sender: sender, - Path: msg.Headers[FieldPath].value.(ObjectPath), - Name: iface + "." + member, - Body: msg.Body, - } - conn.signalsLck.Lock() - for _, ch := range conn.signals { - ch <- signal - } - conn.signalsLck.Unlock() + conn.handleSignal(msg) case TypeMethodCall: go conn.handleCall(msg) } @@ -365,6 +389,21 @@ func (conn *Conn) inWorker() { } } +func (conn *Conn) handleSignal(msg *Message) { + iface := msg.Headers[FieldInterface].value.(string) + member := msg.Headers[FieldMember].value.(string) + // as per http://dbus.freedesktop.org/doc/dbus-specification.html , + // sender is optional for signals. + sender, _ := msg.Headers[FieldSender].value.(string) + signal := &Signal{ + Sender: sender, + Path: msg.Headers[FieldPath].value.(ObjectPath), + Name: iface + "." + member, + Body: msg.Body, + } + conn.signalHandler.DeliverSignal(iface, member, signal) +} + // Names returns the list of all names that are currently owned by this // connection. The slice is always at least one element long, the first element // being the unique name of the connection. @@ -455,7 +494,19 @@ func (conn *Conn) Send(msg *Message, ch chan *Call) *Call { // sendError creates an error message corresponding to the parameters and sends // it to conn.out. -func (conn *Conn) sendError(e Error, dest string, serial uint32) { +func (conn *Conn) sendError(err error, dest string, serial uint32) { + var e *Error + switch em := err.(type) { + case Error: + e = &em + case *Error: + e = em + case DBusError: + name, body := em.DBusError() + e = NewError(name, body) + default: + e = MakeFailedError(err) + } msg := new(Message) msg.Type = TypeError msg.serial = conn.getSerial() @@ -498,6 +549,14 @@ func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) { conn.outLck.RUnlock() } +func (conn *Conn) defaultSignalAction(fn func(h *defaultSignalHandler, ch chan<- *Signal), ch chan<- *Signal) { + if !isDefaultSignalHandler(conn.signalHandler) { + return + } + handler := conn.signalHandler.(*defaultSignalHandler) + fn(handler, ch) +} + // Signal registers the given channel to be passed all received signal messages. // The caller has to make sure that ch is sufficiently buffered; if a message // arrives when a write to c is not possible, it is discarded. @@ -508,22 +567,12 @@ func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) { // channel for eavesdropped messages, this channel receives all signals, and // none of the channels passed to Signal will receive any signals. func (conn *Conn) Signal(ch chan<- *Signal) { - conn.signalsLck.Lock() - conn.signals = append(conn.signals, ch) - conn.signalsLck.Unlock() + conn.defaultSignalAction((*defaultSignalHandler).addSignal, ch) } // RemoveSignal removes the given channel from the list of the registered channels. func (conn *Conn) RemoveSignal(ch chan<- *Signal) { - conn.signalsLck.Lock() - for i := len(conn.signals) - 1; i >= 0; i-- { - if ch == conn.signals[i] { - copy(conn.signals[i:], conn.signals[i+1:]) - conn.signals[len(conn.signals)-1] = nil - conn.signals = conn.signals[:len(conn.signals)-1] - } - } - conn.signalsLck.Unlock() + conn.defaultSignalAction((*defaultSignalHandler).removeSignal, ch) } // SupportsUnixFDs returns whether the underlying transport supports passing of diff --git a/vendor/github.com/godbus/dbus/conn_darwin.go b/vendor/github.com/godbus/dbus/conn_darwin.go index b67bb1b8..c015f80c 100644 --- a/vendor/github.com/godbus/dbus/conn_darwin.go +++ b/vendor/github.com/godbus/dbus/conn_darwin.go @@ -2,20 +2,32 @@ package dbus import ( "errors" + "fmt" + "os" "os/exec" ) -func sessionBusPlatform() (*Conn, error) { +const defaultSystemBusAddress = "unix:path=/opt/local/var/run/dbus/system_bus_socket" + +func getSessionBusPlatformAddress() (string, error) { cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET") b, err := cmd.CombinedOutput() if err != nil { - return nil, err + return "", err } if len(b) == 0 { - return nil, errors.New("dbus: couldn't determine address of session bus") + return "", errors.New("dbus: couldn't determine address of session bus") } - return Dial("unix:path=" + string(b[:len(b)-1])) + return "unix:path=" + string(b[:len(b)-1]), nil +} + +func getSystemBusPlatformAddress() string { + address := os.Getenv("DBUS_LAUNCHD_SESSION_BUS_SOCKET") + if address != "" { + return fmt.Sprintf("unix:path=%s", address) + } + return defaultSystemBusAddress } diff --git a/vendor/github.com/godbus/dbus/conn_other.go b/vendor/github.com/godbus/dbus/conn_other.go index 289e8c5d..254c9f2e 100644 --- a/vendor/github.com/godbus/dbus/conn_other.go +++ b/vendor/github.com/godbus/dbus/conn_other.go @@ -5,27 +5,38 @@ package dbus import ( "bytes" "errors" + "fmt" "os" "os/exec" ) -func sessionBusPlatform() (*Conn, error) { +const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket" + +func getSessionBusPlatformAddress() (string, error) { cmd := exec.Command("dbus-launch") b, err := cmd.CombinedOutput() if err != nil { - return nil, err + return "", err } i := bytes.IndexByte(b, '=') j := bytes.IndexByte(b, '\n') if i == -1 || j == -1 { - return nil, errors.New("dbus: couldn't determine address of session bus") + return "", errors.New("dbus: couldn't determine address of session bus") } env, addr := string(b[0:i]), string(b[i+1:j]) os.Setenv(env, addr) - return Dial(addr) + return addr, nil +} + +func getSystemBusPlatformAddress() string { + address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS") + if address != "" { + return fmt.Sprintf("unix:path=%s", address) + } + return defaultSystemBusAddress } diff --git a/vendor/github.com/godbus/dbus/dbus.go b/vendor/github.com/godbus/dbus/dbus.go index 2ce68735..c6d0d3ce 100644 --- a/vendor/github.com/godbus/dbus/dbus.go +++ b/vendor/github.com/godbus/dbus/dbus.go @@ -2,6 +2,7 @@ package dbus import ( "errors" + "fmt" "reflect" "strings" ) @@ -12,6 +13,8 @@ var ( uint8Type = reflect.TypeOf(uint8(0)) int16Type = reflect.TypeOf(int16(0)) uint16Type = reflect.TypeOf(uint16(0)) + intType = reflect.TypeOf(int(0)) + uintType = reflect.TypeOf(uint(0)) int32Type = reflect.TypeOf(int32(0)) uint32Type = reflect.TypeOf(uint32(0)) int64Type = reflect.TypeOf(int64(0)) @@ -22,6 +25,7 @@ var ( objectPathType = reflect.TypeOf(ObjectPath("")) variantType = reflect.TypeOf(Variant{Signature{""}, nil}) interfacesType = reflect.TypeOf([]interface{}{}) + interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() unixFDType = reflect.TypeOf(UnixFD(0)) unixFDIndexType = reflect.TypeOf(UnixFDIndex(0)) ) @@ -46,86 +50,251 @@ func Store(src []interface{}, dest ...interface{}) error { } for i := range src { - if err := store(src[i], dest[i]); err != nil { + if err := storeInterfaces(src[i], dest[i]); err != nil { return err } } return nil } -func store(src, dest interface{}) error { - if reflect.TypeOf(dest).Elem() == reflect.TypeOf(src) { - reflect.ValueOf(dest).Elem().Set(reflect.ValueOf(src)) - return nil - } else if hasStruct(dest) { - rv := reflect.ValueOf(dest).Elem() - switch rv.Kind() { - case reflect.Struct: - vs, ok := src.([]interface{}) - if !ok { - return errors.New("dbus.Store: type mismatch") - } - t := rv.Type() - ndest := make([]interface{}, 0, rv.NumField()) - for i := 0; i < rv.NumField(); i++ { - field := t.Field(i) - if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { - ndest = append(ndest, rv.Field(i).Addr().Interface()) - } - } - if len(vs) != len(ndest) { - return errors.New("dbus.Store: type mismatch") - } - err := Store(vs, ndest...) - if err != nil { - return errors.New("dbus.Store: type mismatch") - } - case reflect.Slice: - sv := reflect.ValueOf(src) - if sv.Kind() != reflect.Slice { - return errors.New("dbus.Store: type mismatch") - } - rv.Set(reflect.MakeSlice(rv.Type(), sv.Len(), sv.Len())) - for i := 0; i < sv.Len(); i++ { - if err := store(sv.Index(i).Interface(), rv.Index(i).Addr().Interface()); err != nil { - return err - } - } - case reflect.Map: - sv := reflect.ValueOf(src) - if sv.Kind() != reflect.Map { - return errors.New("dbus.Store: type mismatch") - } - keys := sv.MapKeys() - rv.Set(reflect.MakeMap(sv.Type())) - for _, key := range keys { - v := reflect.New(sv.Type().Elem()) - if err := store(v, sv.MapIndex(key).Interface()); err != nil { - return err - } - rv.SetMapIndex(key, v.Elem()) - } - default: - return errors.New("dbus.Store: type mismatch") - } - return nil - } else { - return errors.New("dbus.Store: type mismatch") +func storeInterfaces(src, dest interface{}) error { + return store(reflect.ValueOf(dest), reflect.ValueOf(src)) +} + +func store(dest, src reflect.Value) error { + if dest.Kind() == reflect.Ptr { + return store(dest.Elem(), src) + } + switch src.Kind() { + case reflect.Slice: + return storeSlice(dest, src) + case reflect.Map: + return storeMap(dest, src) + default: + return storeBase(dest, src) } } -func hasStruct(v interface{}) bool { - t := reflect.TypeOf(v) - for { - switch t.Kind() { - case reflect.Struct: - return true - case reflect.Slice, reflect.Ptr, reflect.Map: - t = t.Elem() - default: - return false +func storeBase(dest, src reflect.Value) error { + return setDest(dest, src) +} + +func setDest(dest, src reflect.Value) error { + if !isVariant(src.Type()) && isVariant(dest.Type()) { + //special conversion for dbus.Variant + dest.Set(reflect.ValueOf(MakeVariant(src.Interface()))) + return nil + } + if isVariant(src.Type()) && !isVariant(dest.Type()) { + src = getVariantValue(src) + } + if !src.Type().ConvertibleTo(dest.Type()) { + return fmt.Errorf( + "dbus.Store: type mismatch: cannot convert %s to %s", + src.Type(), dest.Type()) + } + dest.Set(src.Convert(dest.Type())) + return nil +} + +func kindsAreCompatible(dest, src reflect.Type) bool { + switch { + case isVariant(dest): + return true + case dest.Kind() == reflect.Interface: + return true + default: + return dest.Kind() == src.Kind() + } +} + +func isConvertibleTo(dest, src reflect.Type) bool { + switch { + case isVariant(dest): + return true + case dest.Kind() == reflect.Interface: + return true + case dest.Kind() == reflect.Slice: + return src.Kind() == reflect.Slice && + isConvertibleTo(dest.Elem(), src.Elem()) + case dest.Kind() == reflect.Struct: + return src == interfacesType + default: + return src.ConvertibleTo(dest) + } +} + +func storeMap(dest, src reflect.Value) error { + switch { + case !kindsAreCompatible(dest.Type(), src.Type()): + return fmt.Errorf( + "dbus.Store: type mismatch: "+ + "map: cannot store a value of %s into %s", + src.Type(), dest.Type()) + case isVariant(dest.Type()): + return storeMapIntoVariant(dest, src) + case dest.Kind() == reflect.Interface: + return storeMapIntoInterface(dest, src) + case isConvertibleTo(dest.Type().Key(), src.Type().Key()) && + isConvertibleTo(dest.Type().Elem(), src.Type().Elem()): + return storeMapIntoMap(dest, src) + default: + return fmt.Errorf( + "dbus.Store: type mismatch: "+ + "map: cannot convert a value of %s into %s", + src.Type(), dest.Type()) + } +} + +func storeMapIntoVariant(dest, src reflect.Value) error { + dv := reflect.MakeMap(src.Type()) + err := store(dv, src) + if err != nil { + return err + } + return storeBase(dest, dv) +} + +func storeMapIntoInterface(dest, src reflect.Value) error { + var dv reflect.Value + if isVariant(src.Type().Elem()) { + //Convert variants to interface{} recursively when converting + //to interface{} + dv = reflect.MakeMap( + reflect.MapOf(src.Type().Key(), interfaceType)) + } else { + dv = reflect.MakeMap(src.Type()) + } + err := store(dv, src) + if err != nil { + return err + } + return storeBase(dest, dv) +} + +func storeMapIntoMap(dest, src reflect.Value) error { + if dest.IsNil() { + dest.Set(reflect.MakeMap(dest.Type())) + } + keys := src.MapKeys() + for _, key := range keys { + dkey := key.Convert(dest.Type().Key()) + dval := reflect.New(dest.Type().Elem()).Elem() + err := store(dval, getVariantValue(src.MapIndex(key))) + if err != nil { + return err + } + dest.SetMapIndex(dkey, dval) + } + return nil +} + +func storeSlice(dest, src reflect.Value) error { + switch { + case src.Type() == interfacesType && dest.Kind() == reflect.Struct: + //The decoder always decodes structs as slices of interface{} + return storeStruct(dest, src) + case !kindsAreCompatible(dest.Type(), src.Type()): + return fmt.Errorf( + "dbus.Store: type mismatch: "+ + "slice: cannot store a value of %s into %s", + src.Type(), dest.Type()) + case isVariant(dest.Type()): + return storeSliceIntoVariant(dest, src) + case dest.Kind() == reflect.Interface: + return storeSliceIntoInterface(dest, src) + case isConvertibleTo(dest.Type().Elem(), src.Type().Elem()): + return storeSliceIntoSlice(dest, src) + default: + return fmt.Errorf( + "dbus.Store: type mismatch: "+ + "slice: cannot convert a value of %s into %s", + src.Type(), dest.Type()) + } +} + +func storeStruct(dest, src reflect.Value) error { + if isVariant(dest.Type()) { + return storeBase(dest, src) + } + dval := make([]interface{}, 0, dest.NumField()) + dtype := dest.Type() + for i := 0; i < dest.NumField(); i++ { + field := dest.Field(i) + ftype := dtype.Field(i) + if ftype.PkgPath != "" { + continue + } + if ftype.Tag.Get("dbus") == "-" { + continue + } + dval = append(dval, field.Addr().Interface()) + } + if src.Len() != len(dval) { + return fmt.Errorf( + "dbus.Store: type mismatch: "+ + "destination struct does not have "+ + "enough fields need: %d have: %d", + src.Len(), len(dval)) + } + return Store(src.Interface().([]interface{}), dval...) +} + +func storeSliceIntoVariant(dest, src reflect.Value) error { + dv := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + err := store(dv, src) + if err != nil { + return err + } + return storeBase(dest, dv) +} + +func storeSliceIntoInterface(dest, src reflect.Value) error { + var dv reflect.Value + if isVariant(src.Type().Elem()) { + //Convert variants to interface{} recursively when converting + //to interface{} + dv = reflect.MakeSlice(reflect.SliceOf(interfaceType), + src.Len(), src.Cap()) + } else { + dv = reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + } + err := store(dv, src) + if err != nil { + return err + } + return storeBase(dest, dv) +} + +func storeSliceIntoSlice(dest, src reflect.Value) error { + if dest.IsNil() || dest.Len() < src.Len() { + dest.Set(reflect.MakeSlice(dest.Type(), src.Len(), src.Cap())) + } + if dest.Len() != src.Len() { + return fmt.Errorf( + "dbus.Store: type mismatch: "+ + "slices are different lengths "+ + "need: %d have: %d", + src.Len(), dest.Len()) + } + for i := 0; i < src.Len(); i++ { + err := store(dest.Index(i), getVariantValue(src.Index(i))) + if err != nil { + return err } } + return nil +} + +func getVariantValue(in reflect.Value) reflect.Value { + if isVariant(in.Type()) { + return reflect.ValueOf(in.Interface().(Variant).Value()) + } + return in +} + +func isVariant(t reflect.Type) bool { + return t == variantType } // An ObjectPath is an object path as defined by the D-Bus spec. @@ -177,15 +346,15 @@ func alignment(t reflect.Type) int { return 4 case signatureType: return 1 - case interfacesType: // sometimes used for structs - return 8 + case interfacesType: + return 4 } switch t.Kind() { case reflect.Uint8: return 1 case reflect.Uint16, reflect.Int16: return 2 - case reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map: + case reflect.Uint, reflect.Int, reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map: return 4 case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct: return 8 @@ -200,7 +369,7 @@ func isKeyType(t reflect.Type) bool { switch t.Kind() { case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64, - reflect.String: + reflect.String, reflect.Uint, reflect.Int: return true } diff --git a/vendor/github.com/godbus/dbus/default_handler.go b/vendor/github.com/godbus/dbus/default_handler.go new file mode 100644 index 00000000..e81f73ac --- /dev/null +++ b/vendor/github.com/godbus/dbus/default_handler.go @@ -0,0 +1,291 @@ +package dbus + +import ( + "bytes" + "reflect" + "strings" + "sync" +) + +func newIntrospectIntf(h *defaultHandler) *exportedIntf { + methods := make(map[string]Method) + methods["Introspect"] = exportedMethod{ + reflect.ValueOf(func(msg Message) (string, *Error) { + path := msg.Headers[FieldPath].value.(ObjectPath) + return h.introspectPath(path), nil + }), + } + return newExportedIntf(methods, true) +} + +//NewDefaultHandler returns an instance of the default +//call handler. This is useful if you want to implement only +//one of the two handlers but not both. +func NewDefaultHandler() *defaultHandler { + h := &defaultHandler{ + objects: make(map[ObjectPath]*exportedObj), + defaultIntf: make(map[string]*exportedIntf), + } + h.defaultIntf["org.freedesktop.DBus.Introspectable"] = newIntrospectIntf(h) + return h +} + +type defaultHandler struct { + sync.RWMutex + objects map[ObjectPath]*exportedObj + defaultIntf map[string]*exportedIntf +} + +func (h *defaultHandler) PathExists(path ObjectPath) bool { + _, ok := h.objects[path] + return ok +} + +func (h *defaultHandler) introspectPath(path ObjectPath) string { + subpath := make(map[string]struct{}) + var xml bytes.Buffer + xml.WriteString("") + for obj, _ := range h.objects { + p := string(path) + if p != "/" { + p += "/" + } + if strings.HasPrefix(string(obj), p) { + node_name := strings.Split(string(obj[len(p):]), "/")[0] + subpath[node_name] = struct{}{} + } + } + for s, _ := range subpath { + xml.WriteString("\n\t") + } + xml.WriteString("\n") + return xml.String() +} + +func (h *defaultHandler) LookupObject(path ObjectPath) (ServerObject, bool) { + h.RLock() + defer h.RUnlock() + object, ok := h.objects[path] + if ok { + return object, ok + } + + // If an object wasn't found for this exact path, + // look for a matching subtree registration + subtreeObject := newExportedObject() + path = path[:strings.LastIndex(string(path), "/")] + for len(path) > 0 { + object, ok = h.objects[path] + if ok { + for name, iface := range object.interfaces { + // Only include this handler if it registered for the subtree + if iface.isFallbackInterface() { + subtreeObject.interfaces[name] = iface + } + } + break + } + + path = path[:strings.LastIndex(string(path), "/")] + } + + for name, intf := range h.defaultIntf { + if _, exists := subtreeObject.interfaces[name]; exists { + continue + } + subtreeObject.interfaces[name] = intf + } + + return subtreeObject, true +} + +func (h *defaultHandler) AddObject(path ObjectPath, object *exportedObj) { + h.Lock() + h.objects[path] = object + h.Unlock() +} + +func (h *defaultHandler) DeleteObject(path ObjectPath) { + h.Lock() + delete(h.objects, path) + h.Unlock() +} + +type exportedMethod struct { + reflect.Value +} + +func (m exportedMethod) Call(args ...interface{}) ([]interface{}, error) { + t := m.Type() + + params := make([]reflect.Value, len(args)) + for i := 0; i < len(args); i++ { + params[i] = reflect.ValueOf(args[i]).Elem() + } + + ret := m.Value.Call(params) + + err := ret[t.NumOut()-1].Interface().(*Error) + ret = ret[:t.NumOut()-1] + out := make([]interface{}, len(ret)) + for i, val := range ret { + out[i] = val.Interface() + } + if err == nil { + //concrete type to interface nil is a special case + return out, nil + } + return out, err +} + +func (m exportedMethod) NumArguments() int { + return m.Value.Type().NumIn() +} + +func (m exportedMethod) ArgumentValue(i int) interface{} { + return reflect.Zero(m.Type().In(i)).Interface() +} + +func (m exportedMethod) NumReturns() int { + return m.Value.Type().NumOut() +} + +func (m exportedMethod) ReturnValue(i int) interface{} { + return reflect.Zero(m.Type().Out(i)).Interface() +} + +func newExportedObject() *exportedObj { + return &exportedObj{ + interfaces: make(map[string]*exportedIntf), + } +} + +type exportedObj struct { + interfaces map[string]*exportedIntf +} + +func (obj *exportedObj) LookupInterface(name string) (Interface, bool) { + if name == "" { + return obj, true + } + intf, exists := obj.interfaces[name] + return intf, exists +} + +func (obj *exportedObj) AddInterface(name string, iface *exportedIntf) { + obj.interfaces[name] = iface +} + +func (obj *exportedObj) DeleteInterface(name string) { + delete(obj.interfaces, name) +} + +func (obj *exportedObj) LookupMethod(name string) (Method, bool) { + for _, intf := range obj.interfaces { + method, exists := intf.LookupMethod(name) + if exists { + return method, exists + } + } + return nil, false +} + +func (obj *exportedObj) isFallbackInterface() bool { + return false +} + +func newExportedIntf(methods map[string]Method, includeSubtree bool) *exportedIntf { + return &exportedIntf{ + methods: methods, + includeSubtree: includeSubtree, + } +} + +type exportedIntf struct { + methods map[string]Method + + // Whether or not this export is for the entire subtree + includeSubtree bool +} + +func (obj *exportedIntf) LookupMethod(name string) (Method, bool) { + out, exists := obj.methods[name] + return out, exists +} + +func (obj *exportedIntf) isFallbackInterface() bool { + return obj.includeSubtree +} + +//NewDefaultSignalHandler returns an instance of the default +//signal handler. This is useful if you want to implement only +//one of the two handlers but not both. +func NewDefaultSignalHandler() *defaultSignalHandler { + return &defaultSignalHandler{} +} + +func isDefaultSignalHandler(handler SignalHandler) bool { + _, ok := handler.(*defaultSignalHandler) + return ok +} + +type defaultSignalHandler struct { + sync.RWMutex + closed bool + signals []chan<- *Signal +} + +func (sh *defaultSignalHandler) DeliverSignal(intf, name string, signal *Signal) { + go func() { + sh.RLock() + defer sh.RUnlock() + if sh.closed { + return + } + for _, ch := range sh.signals { + ch <- signal + } + }() +} + +func (sh *defaultSignalHandler) Init() error { + sh.Lock() + sh.signals = make([]chan<- *Signal, 0) + sh.Unlock() + return nil +} + +func (sh *defaultSignalHandler) Terminate() { + sh.Lock() + sh.closed = true + for _, ch := range sh.signals { + close(ch) + } + sh.signals = nil + sh.Unlock() +} + +func (sh *defaultSignalHandler) addSignal(ch chan<- *Signal) { + sh.Lock() + defer sh.Unlock() + if sh.closed { + return + } + sh.signals = append(sh.signals, ch) + +} + +func (sh *defaultSignalHandler) removeSignal(ch chan<- *Signal) { + sh.Lock() + defer sh.Unlock() + if sh.closed { + return + } + for i := len(sh.signals) - 1; i >= 0; i-- { + if ch == sh.signals[i] { + copy(sh.signals[i:], sh.signals[i+1:]) + sh.signals[len(sh.signals)-1] = nil + sh.signals = sh.signals[:len(sh.signals)-1] + } + } +} diff --git a/vendor/github.com/godbus/dbus/doc.go b/vendor/github.com/godbus/dbus/doc.go index deff554a..895036a8 100644 --- a/vendor/github.com/godbus/dbus/doc.go +++ b/vendor/github.com/godbus/dbus/doc.go @@ -19,6 +19,8 @@ respective D-Bus equivalents: bool | BOOLEAN int16 | INT16 uint16 | UINT16 + int | INT32 + uint | UINT32 int32 | INT32 uint32 | UINT32 int64 | INT64 @@ -28,6 +30,7 @@ respective D-Bus equivalents: ObjectPath | OBJECT_PATH Signature | SIGNATURE Variant | VARIANT + interface{} | VARIANT UnixFDIndex | UNIX_FD Slices and arrays encode as ARRAYs of their element type. @@ -41,6 +44,9 @@ be skipped. Pointers encode as the value they're pointed to. +Types convertible to one of the base types above will be mapped as the +base type. + Trying to encode any other type or a slice, map or struct containing an unsupported type will result in an InvalidTypeError. diff --git a/vendor/github.com/godbus/dbus/encoder.go b/vendor/github.com/godbus/dbus/encoder.go index 9f0a9e89..8bb71776 100644 --- a/vendor/github.com/godbus/dbus/encoder.go +++ b/vendor/github.com/godbus/dbus/encoder.go @@ -96,10 +96,10 @@ func (enc *encoder) encode(v reflect.Value, depth int) { case reflect.Uint16: enc.binwrite(uint16(v.Uint())) enc.pos += 2 - case reflect.Int32: + case reflect.Int, reflect.Int32: enc.binwrite(int32(v.Int())) enc.pos += 4 - case reflect.Uint32: + case reflect.Uint, reflect.Uint32: enc.binwrite(uint32(v.Uint())) enc.pos += 4 case reflect.Int64: @@ -202,6 +202,8 @@ func (enc *encoder) encode(v reflect.Value, depth int) { panic(err) } enc.pos += length + case reflect.Interface: + enc.encode(reflect.ValueOf(MakeVariant(v.Interface())), depth) default: panic(InvalidTypeError{v.Type()}) } diff --git a/vendor/github.com/godbus/dbus/export.go b/vendor/github.com/godbus/dbus/export.go index 6c335220..aae97088 100644 --- a/vendor/github.com/godbus/dbus/export.go +++ b/vendor/github.com/godbus/dbus/export.go @@ -1,7 +1,6 @@ package dbus import ( - "bytes" "errors" "fmt" "reflect" @@ -9,32 +8,29 @@ import ( ) var ( - errmsgInvalidArg = Error{ + ErrMsgInvalidArg = Error{ "org.freedesktop.DBus.Error.InvalidArgs", []interface{}{"Invalid type / number of args"}, } - errmsgNoObject = Error{ + ErrMsgNoObject = Error{ "org.freedesktop.DBus.Error.NoSuchObject", []interface{}{"No such object"}, } - errmsgUnknownMethod = Error{ + ErrMsgUnknownMethod = Error{ "org.freedesktop.DBus.Error.UnknownMethod", []interface{}{"Unknown / invalid method"}, } + ErrMsgUnknownInterface = Error{ + "org.freedesktop.DBus.Error.UnknownInterface", + []interface{}{"Object does not implement the interface"}, + } ) -// exportedObj represents an exported object. It stores a precomputed -// method table that represents the methods exported on the bus. -type exportedObj struct { - methods map[string]reflect.Value - - // Whether or not this export is for the entire subtree - includeSubtree bool -} - -func (obj exportedObj) Method(name string) (reflect.Value, bool) { - out, exists := obj.methods[name] - return out, exists +func MakeFailedError(err error) *Error { + return &Error{ + "org.freedesktop.DBus.Error.Failed", + []interface{}{err.Error()}, + } } // Sender is a type which can be used in exported methods to receive the message @@ -63,7 +59,7 @@ func getMethods(in interface{}, mapping map[string]string) map[string]reflect.Va // only track valid methods must return *Error as last arg // and must be exported if t.NumOut() == 0 || - t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) || + t.Out(t.NumOut()-1) != reflect.TypeOf(&ErrMsgInvalidArg) || methtype.PkgPath != "" { continue } @@ -73,119 +69,12 @@ func getMethods(in interface{}, mapping map[string]string) map[string]reflect.Va return methods } -// searchHandlers will look through all registered handlers looking for one -// to handle the given path. If a verbatim one isn't found, it will check for -// a subtree registration for the path as well. -func (conn *Conn) searchHandlers(path ObjectPath) (map[string]exportedObj, bool) { - conn.handlersLck.RLock() - defer conn.handlersLck.RUnlock() +func standardMethodArgumentDecode(m Method, sender string, msg *Message, body []interface{}) ([]interface{}, error) { + pointers := make([]interface{}, m.NumArguments()) + decode := make([]interface{}, 0, len(body)) - handlers, ok := conn.handlers[path] - if ok { - return handlers, ok - } - - // If handlers weren't found for this exact path, look for a matching subtree - // registration - handlers = make(map[string]exportedObj) - path = path[:strings.LastIndex(string(path), "/")] - for len(path) > 0 { - var subtreeHandlers map[string]exportedObj - subtreeHandlers, ok = conn.handlers[path] - if ok { - for iface, handler := range subtreeHandlers { - // Only include this handler if it registered for the subtree - if handler.includeSubtree { - handlers[iface] = handler - } - } - - break - } - - path = path[:strings.LastIndex(string(path), "/")] - } - - return handlers, ok -} - -// handleCall handles the given method call (i.e. looks if it's one of the -// pre-implemented ones and searches for a corresponding handler if not). -func (conn *Conn) handleCall(msg *Message) { - name := msg.Headers[FieldMember].value.(string) - path := msg.Headers[FieldPath].value.(ObjectPath) - ifaceName, hasIface := msg.Headers[FieldInterface].value.(string) - sender, hasSender := msg.Headers[FieldSender].value.(string) - serial := msg.serial - if ifaceName == "org.freedesktop.DBus.Peer" { - switch name { - case "Ping": - conn.sendReply(sender, serial) - case "GetMachineId": - conn.sendReply(sender, serial, conn.uuid) - default: - conn.sendError(errmsgUnknownMethod, sender, serial) - } - return - } else if ifaceName == "org.freedesktop.DBus.Introspectable" && name == "Introspect" { - if _, ok := conn.handlers[path]; !ok { - subpath := make(map[string]struct{}) - var xml bytes.Buffer - xml.WriteString("") - for h, _ := range conn.handlers { - p := string(path) - if p != "/" { - p += "/" - } - if strings.HasPrefix(string(h), p) { - node_name := strings.Split(string(h[len(p):]), "/")[0] - subpath[node_name] = struct{}{} - } - } - for s, _ := range subpath { - xml.WriteString("\n\t") - } - xml.WriteString("\n") - conn.sendReply(sender, serial, xml.String()) - return - } - } - if len(name) == 0 { - conn.sendError(errmsgUnknownMethod, sender, serial) - } - - // Find the exported handler (if any) for this path - handlers, ok := conn.searchHandlers(path) - if !ok { - conn.sendError(errmsgNoObject, sender, serial) - return - } - - var m reflect.Value - var exists bool - if hasIface { - iface := handlers[ifaceName] - m, exists = iface.Method(name) - } else { - for _, v := range handlers { - m, exists = v.Method(name) - if exists { - break - } - } - } - - if !exists { - conn.sendError(errmsgUnknownMethod, sender, serial) - return - } - - t := m.Type() - vs := msg.Body - pointers := make([]interface{}, t.NumIn()) - decode := make([]interface{}, 0, len(vs)) - for i := 0; i < t.NumIn(); i++ { - tp := t.In(i) + for i := 0; i < m.NumArguments(); i++ { + tp := reflect.TypeOf(m.ArgumentValue(i)) val := reflect.New(tp) pointers[i] = val.Interface() if tp == reflect.TypeOf((*Sender)(nil)).Elem() { @@ -197,26 +86,73 @@ func (conn *Conn) handleCall(msg *Message) { } } - if len(decode) != len(vs) { - conn.sendError(errmsgInvalidArg, sender, serial) + if len(decode) != len(body) { + return nil, ErrMsgInvalidArg + } + + if err := Store(body, decode...); err != nil { + return nil, ErrMsgInvalidArg + } + + return pointers, nil +} + +func (conn *Conn) decodeArguments(m Method, sender string, msg *Message) ([]interface{}, error) { + if decoder, ok := m.(ArgumentDecoder); ok { + return decoder.DecodeArguments(conn, sender, msg, msg.Body) + } + return standardMethodArgumentDecode(m, sender, msg, msg.Body) +} + +// handleCall handles the given method call (i.e. looks if it's one of the +// pre-implemented ones and searches for a corresponding handler if not). +func (conn *Conn) handleCall(msg *Message) { + name := msg.Headers[FieldMember].value.(string) + path := msg.Headers[FieldPath].value.(ObjectPath) + ifaceName, _ := msg.Headers[FieldInterface].value.(string) + sender, hasSender := msg.Headers[FieldSender].value.(string) + serial := msg.serial + if ifaceName == "org.freedesktop.DBus.Peer" { + switch name { + case "Ping": + conn.sendReply(sender, serial) + case "GetMachineId": + conn.sendReply(sender, serial, conn.uuid) + default: + conn.sendError(ErrMsgUnknownMethod, sender, serial) + } + return + } + if len(name) == 0 { + conn.sendError(ErrMsgUnknownMethod, sender, serial) + } + + object, ok := conn.handler.LookupObject(path) + if !ok { + conn.sendError(ErrMsgNoObject, sender, serial) return } - if err := Store(vs, decode...); err != nil { - conn.sendError(errmsgInvalidArg, sender, serial) + iface, exists := object.LookupInterface(ifaceName) + if !exists { + conn.sendError(ErrMsgUnknownInterface, sender, serial) return } - // Extract parameters - params := make([]reflect.Value, len(pointers)) - for i := 0; i < len(pointers); i++ { - params[i] = reflect.ValueOf(pointers[i]).Elem() + m, exists := iface.LookupMethod(name) + if !exists { + conn.sendError(ErrMsgUnknownMethod, sender, serial) + return + } + args, err := conn.decodeArguments(m, sender, msg) + if err != nil { + conn.sendError(err, sender, serial) + return } - // Call method - ret := m.Call(params) - if em := ret[t.NumOut()-1].Interface().(*Error); em != nil { - conn.sendError(*em, sender, serial) + ret, err := m.Call(args...) + if err != nil { + conn.sendError(err, sender, serial) return } @@ -229,13 +165,11 @@ func (conn *Conn) handleCall(msg *Message) { reply.Headers[FieldDestination] = msg.Headers[FieldSender] } reply.Headers[FieldReplySerial] = MakeVariant(msg.serial) - reply.Body = make([]interface{}, len(ret)-1) - for i := 0; i < len(ret)-1; i++ { - reply.Body[i] = ret[i].Interface() - } - if len(ret) != 1 { - reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...)) + reply.Body = make([]interface{}, len(ret)) + for i := 0; i < len(ret); i++ { + reply.Body[i] = ret[i] } + reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...)) conn.outLck.RLock() if !conn.closed { conn.out <- reply @@ -375,7 +309,7 @@ func (conn *Conn) exportMethodTable(methods map[string]interface{}, path ObjectP t := rval.Type() // only track valid methods must return *Error as last arg if t.NumOut() == 0 || - t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) { + t.Out(t.NumOut()-1) != reflect.TypeOf(&ErrMsgInvalidArg) { continue } out[name] = rval @@ -383,38 +317,49 @@ func (conn *Conn) exportMethodTable(methods map[string]interface{}, path ObjectP return conn.export(out, path, iface, includeSubtree) } +func (conn *Conn) unexport(h *defaultHandler, path ObjectPath, iface string) error { + if h.PathExists(path) { + obj := h.objects[path] + obj.DeleteInterface(iface) + if len(obj.interfaces) == 0 { + h.DeleteObject(path) + } + } + return nil +} + // exportWithMap is the worker function for all exports/registrations. func (conn *Conn) export(methods map[string]reflect.Value, path ObjectPath, iface string, includeSubtree bool) error { + h, ok := conn.handler.(*defaultHandler) + if !ok { + return fmt.Errorf( + `dbus: export only allowed on the default hander handler have %T"`, + conn.handler) + } + if !path.IsValid() { return fmt.Errorf(`dbus: Invalid path name: "%s"`, path) } - conn.handlersLck.Lock() - defer conn.handlersLck.Unlock() - // Remove a previous export if the interface is nil if methods == nil { - if _, ok := conn.handlers[path]; ok { - delete(conn.handlers[path], iface) - if len(conn.handlers[path]) == 0 { - delete(conn.handlers, path) - } - } - - return nil + return conn.unexport(h, path, iface) } // If this is the first handler for this path, make a new map to hold all // handlers for this path. - if _, ok := conn.handlers[path]; !ok { - conn.handlers[path] = make(map[string]exportedObj) + if !h.PathExists(path) { + h.AddObject(path, newExportedObject()) + } + + exportedMethods := make(map[string]Method) + for name, method := range methods { + exportedMethods[name] = exportedMethod{method} } // Finally, save this handler - conn.handlers[path][iface] = exportedObj{ - methods: methods, - includeSubtree: includeSubtree, - } + obj := h.objects[path] + obj.AddInterface(iface, newExportedIntf(exportedMethods, includeSubtree)) return nil } diff --git a/vendor/github.com/godbus/dbus/object.go b/vendor/github.com/godbus/dbus/object.go index 9573b709..6d95583d 100644 --- a/vendor/github.com/godbus/dbus/object.go +++ b/vendor/github.com/godbus/dbus/object.go @@ -43,7 +43,8 @@ func (o *Object) AddMatchSignal(iface, member string) *Call { // will be allocated. Otherwise, ch has to be buffered or Go will panic. // // If the flags include FlagNoReplyExpected, ch is ignored and a Call structure -// is returned of which only the Err member is valid. +// is returned with any error in Err and a closed channel in Done containing +// the returned Call as it's one entry. // // If the method parameter contains a dot ('.'), the part before the last dot // specifies the interface on which the method is called. @@ -97,11 +98,21 @@ func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface } o.conn.outLck.RLock() defer o.conn.outLck.RUnlock() + done := make(chan *Call, 1) + call := &Call{ + Err: nil, + Done: done, + } + defer func() { + call.Done <- call + close(done) + }() if o.conn.closed { - return &Call{Err: ErrClosed} + call.Err = ErrClosed + return call } o.conn.out <- msg - return &Call{Err: nil} + return call } // GetProperty calls org.freedesktop.DBus.Properties.GetProperty on the given @@ -125,12 +136,12 @@ func (o *Object) GetProperty(p string) (Variant, error) { return result, nil } -// Destination returns the destination that calls on o are sent to. +// Destination returns the destination that calls on (o *Object) are sent to. func (o *Object) Destination() string { return o.dest } -// Path returns the path that calls on o are sent to. +// Path returns the path that calls on (o *Object") are sent to. func (o *Object) Path() ObjectPath { return o.path } diff --git a/vendor/github.com/godbus/dbus/server_interfaces.go b/vendor/github.com/godbus/dbus/server_interfaces.go new file mode 100644 index 00000000..091948ae --- /dev/null +++ b/vendor/github.com/godbus/dbus/server_interfaces.go @@ -0,0 +1,89 @@ +package dbus + +// Terminator allows a handler to implement a shutdown mechanism that +// is called when the connection terminates. +type Terminator interface { + Terminate() +} + +// Handler is the representation of a D-Bus Application. +// +// The Handler must have a way to lookup objects given +// an ObjectPath. The returned object must implement the +// ServerObject interface. +type Handler interface { + LookupObject(path ObjectPath) (ServerObject, bool) +} + +// ServerObject is the representation of an D-Bus Object. +// +// Objects are registered at a path for a given Handler. +// The Objects implement D-Bus interfaces. The semantics +// of Interface lookup is up to the implementation of +// the ServerObject. The ServerObject implementation may +// choose to implement empty string as a valid interface +// represeting all methods or not per the D-Bus specification. +type ServerObject interface { + LookupInterface(name string) (Interface, bool) +} + +// An Interface is the representation of a D-Bus Interface. +// +// Interfaces are a grouping of methods implemented by the Objects. +// Interfaces are responsible for routing method calls. +type Interface interface { + LookupMethod(name string) (Method, bool) +} + +// A Method represents the exposed methods on D-Bus. +type Method interface { + // Call requires that all arguments are decoded before being passed to it. + Call(args ...interface{}) ([]interface{}, error) + NumArguments() int + NumReturns() int + // ArgumentValue returns a representative value for the argument at position + // it should be of the proper type. reflect.Zero would be a good mechanism + // to use for this Value. + ArgumentValue(position int) interface{} + // ReturnValue returns a representative value for the return at position + // it should be of the proper type. reflect.Zero would be a good mechanism + // to use for this Value. + ReturnValue(position int) interface{} +} + +// An Argument Decoder can decode arguments using the non-standard mechanism +// +// If a method implements this interface then the non-standard +// decoder will be used. +// +// Method arguments must be decoded from the message. +// The mechanism for doing this will vary based on the +// implementation of the method. A normal approach is provided +// as part of this library, but may be replaced with +// any other decoding scheme. +type ArgumentDecoder interface { + // To decode the arguments of a method the sender and message are + // provided incase the semantics of the implementer provides access + // to these as part of the method invocation. + DecodeArguments(conn *Conn, sender string, msg *Message, args []interface{}) ([]interface{}, error) +} + +// A SignalHandler is responsible for delivering a signal. +// +// Signal delivery may be changed from the default channel +// based approach by Handlers implementing the SignalHandler +// interface. +type SignalHandler interface { + DeliverSignal(iface, name string, signal *Signal) +} + +// A DBusError is used to convert a generic object to a D-Bus error. +// +// Any custom error mechanism may implement this interface to provide +// a custom encoding of the error on D-Bus. By default if a normal +// error is returned, it will be encoded as the generic +// "org.freedesktop.DBus.Error.Failed" error. By implementing this +// interface as well a custom encoding may be provided. +type DBusError interface { + DBusError() (string, []interface{}) +} diff --git a/vendor/github.com/godbus/dbus/sig.go b/vendor/github.com/godbus/dbus/sig.go index f45b53ce..c1b80920 100644 --- a/vendor/github.com/godbus/dbus/sig.go +++ b/vendor/github.com/godbus/dbus/sig.go @@ -57,12 +57,12 @@ func getSignature(t reflect.Type) string { return "n" case reflect.Uint16: return "q" - case reflect.Int32: + case reflect.Int, reflect.Int32: if t == unixFDType { return "h" } return "i" - case reflect.Uint32: + case reflect.Uint, reflect.Uint32: if t == unixFDIndexType { return "h" } @@ -101,6 +101,8 @@ func getSignature(t reflect.Type) string { panic(InvalidTypeError{t}) } return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}" + case reflect.Interface: + return "v" } panic(InvalidTypeError{t}) } @@ -162,7 +164,7 @@ func (e SignatureError) Error() string { return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason) } -// Try to read a single type from this string. If it was successfull, err is nil +// Try to read a single type from this string. If it was successful, err is nil // and rem is the remaining unparsed part. Otherwise, err is a non-nil // SignatureError and rem is "". depth is the current recursion depth which may // not be greater than 64 and should be given as 0 on the first call. diff --git a/vendor/github.com/godbus/dbus/transport_generic.go b/vendor/github.com/godbus/dbus/transport_generic.go index 46f8f49d..3fad859a 100644 --- a/vendor/github.com/godbus/dbus/transport_generic.go +++ b/vendor/github.com/godbus/dbus/transport_generic.go @@ -4,8 +4,23 @@ import ( "encoding/binary" "errors" "io" + "unsafe" ) +var nativeEndian binary.ByteOrder + +func detectEndianness() binary.ByteOrder { + var x uint32 = 0x01020304 + if *(*byte)(unsafe.Pointer(&x)) == 0x01 { + return binary.BigEndian + } + return binary.LittleEndian +} + +func init() { + nativeEndian = detectEndianness() +} + type genericTransport struct { io.ReadWriteCloser } @@ -31,5 +46,5 @@ func (t genericTransport) SendMessage(msg *Message) error { return errors.New("dbus: unix fd passing not enabled") } } - return msg.EncodeTo(t, binary.LittleEndian) + return msg.EncodeTo(t, nativeEndian) } diff --git a/vendor/github.com/godbus/dbus/transport_unix.go b/vendor/github.com/godbus/dbus/transport_unix.go index a1d00cbc..e56d5ca9 100644 --- a/vendor/github.com/godbus/dbus/transport_unix.go +++ b/vendor/github.com/godbus/dbus/transport_unix.go @@ -175,7 +175,7 @@ func (t *unixTransport) SendMessage(msg *Message) error { msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds))) oob := syscall.UnixRights(fds...) buf := new(bytes.Buffer) - msg.EncodeTo(buf, binary.LittleEndian) + msg.EncodeTo(buf, nativeEndian) n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil) if err != nil { return err @@ -184,7 +184,7 @@ func (t *unixTransport) SendMessage(msg *Message) error { return io.ErrShortWrite } } else { - if err := msg.EncodeTo(t, binary.LittleEndian); err != nil { + if err := msg.EncodeTo(t, nativeEndian); err != nil { return nil } } diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_freebsd.go b/vendor/github.com/godbus/dbus/transport_unixcred_freebsd.go new file mode 100644 index 00000000..0fc5b927 --- /dev/null +++ b/vendor/github.com/godbus/dbus/transport_unixcred_freebsd.go @@ -0,0 +1,91 @@ +// The UnixCredentials system call is currently only implemented on Linux +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// https://golang.org/s/go1.4-syscall +// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys + +// Local implementation of the UnixCredentials system call for FreeBSD + +package dbus + +/* +const int sizeofPtr = sizeof(void*); +#define _WANT_UCRED +#include +*/ +import "C" + +import ( + "io" + "os" + "syscall" + "unsafe" +) + +// http://golang.org/src/pkg/syscall/ztypes_linux_amd64.go +// https://golang.org/src/syscall/ztypes_freebsd_amd64.go +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +// http://golang.org/src/pkg/syscall/types_linux.go +// https://golang.org/src/syscall/types_freebsd.go +// https://github.com/freebsd/freebsd/blob/master/sys/sys/ucred.h +const ( + SizeofUcred = C.sizeof_struct_ucred +) + +// http://golang.org/src/pkg/syscall/sockcmsg_unix.go +func cmsgAlignOf(salen int) int { + salign := C.sizeofPtr + + return (salen + salign - 1) & ^(salign - 1) +} + +// http://golang.org/src/pkg/syscall/sockcmsg_unix.go +func cmsgData(h *syscall.Cmsghdr) unsafe.Pointer { + return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(syscall.SizeofCmsghdr))) +} + +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// UnixCredentials encodes credentials into a socket control message +// for sending to another process. This can be used for +// authentication. +func UnixCredentials(ucred *Ucred) []byte { + b := make([]byte, syscall.CmsgSpace(SizeofUcred)) + h := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = syscall.SOL_SOCKET + h.Type = syscall.SCM_CREDS + h.SetLen(syscall.CmsgLen(SizeofUcred)) + *((*Ucred)(cmsgData(h))) = *ucred + return b +} + +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// ParseUnixCredentials decodes a socket control message that contains +// credentials in a Ucred structure. To receive such a message, the +// SO_PASSCRED option must be enabled on the socket. +func ParseUnixCredentials(m *syscall.SocketControlMessage) (*Ucred, error) { + if m.Header.Level != syscall.SOL_SOCKET { + return nil, syscall.EINVAL + } + if m.Header.Type != syscall.SCM_CREDS { + return nil, syscall.EINVAL + } + ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) + return &ucred, nil +} + +func (t *unixTransport) SendNullByte() error { + ucred := &Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())} + b := UnixCredentials(ucred) + _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil) + if err != nil { + return err + } + if oobn != len(b) { + return io.ErrShortWrite + } + return nil +} diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_openbsd.go b/vendor/github.com/godbus/dbus/transport_unixcred_openbsd.go new file mode 100644 index 00000000..af7bafdf --- /dev/null +++ b/vendor/github.com/godbus/dbus/transport_unixcred_openbsd.go @@ -0,0 +1,14 @@ +package dbus + +import "io" + +func (t *unixTransport) SendNullByte() error { + n, _, err := t.UnixConn.WriteMsgUnix([]byte{0}, nil, nil) + if err != nil { + return err + } + if n != 1 { + return io.ErrShortWrite + } + return nil +} diff --git a/vendor/github.com/godbus/dbus/variant.go b/vendor/github.com/godbus/dbus/variant.go index b7b13ae9..0ca123b0 100644 --- a/vendor/github.com/godbus/dbus/variant.go +++ b/vendor/github.com/godbus/dbus/variant.go @@ -17,7 +17,12 @@ type Variant struct { // MakeVariant converts the given value to a Variant. It panics if v cannot be // represented as a D-Bus type. func MakeVariant(v interface{}) Variant { - return Variant{SignatureOf(v), v} + return MakeVariantWithSignature(v, SignatureOf(v)) +} + +// MakeVariantWithSignature converts the given value to a Variant. +func MakeVariantWithSignature(v interface{}, s Signature) Variant { + return Variant{s, v} } // ParseVariant parses the given string as a variant as described at diff --git a/vendor/github.com/gogo/protobuf/README b/vendor/github.com/gogo/protobuf/README index b4accc0c..0ad51363 100644 --- a/vendor/github.com/gogo/protobuf/README +++ b/vendor/github.com/gogo/protobuf/README @@ -207,6 +207,50 @@ the --go_out argument to protoc: protoc --gogo_out=plugins=grpc:. *.proto +## Compatibility ## + +The library and the generated code are expected to be stable over time. +However, we reserve the right to make breaking changes without notice for the +following reasons: + +- Security. A security issue in the specification or implementation may come to + light whose resolution requires breaking compatibility. We reserve the right + to address such security issues. +- Unspecified behavior. There are some aspects of the Protocol Buffers + specification that are undefined. Programs that depend on such unspecified + behavior may break in future releases. +- Specification errors or changes. If it becomes necessary to address an + inconsistency, incompleteness, or change in the Protocol Buffers + specification, resolving the issue could affect the meaning or legality of + existing programs. We reserve the right to address such issues, including + updating the implementations. +- Bugs. If the library has a bug that violates the specification, a program + that depends on the buggy behavior may break if the bug is fixed. We reserve + the right to fix such bugs. +- Adding methods or fields to generated structs. These may conflict with field + names that already exist in a schema, causing applications to break. When the + code generator encounters a field in the schema that would collide with a + generated field or method name, the code generator will append an underscore + to the generated field or method name. +- Adding, removing, or changing methods or fields in generated structs that + start with `XXX`. These parts of the generated code are exported out of + necessity, but should not be considered part of the public API. +- Adding, removing, or changing unexported symbols in generated code. + +Any breaking changes outside of these will be announced 6 months in advance to +protobuf@googlegroups.com. + +You should, whenever possible, use generated code created by the `protoc-gen-go` +tool built at the same commit as the `proto` package. The `proto` package +declares package-level constants in the form `ProtoPackageIsVersionX`. +Application code and generated code may depend on one of these constants to +ensure that compilation will fail if the available version of the proto library +is too old. Whenever we make a change to the generated code that requires newer +library support, in the same commit we will increment the version number of the +generated code and declare a new package-level constant whose name incorporates +the latest version number. Removing a compatibility constant is considered a +breaking change and would be subject to the announcement policy stated above. + ## Plugins ## The `protoc-gen-go/generator` package exposes a plugin interface, diff --git a/vendor/github.com/gogo/protobuf/Readme.md b/vendor/github.com/gogo/protobuf/Readme.md index 00b346f2..e97bb1ba 100644 --- a/vendor/github.com/gogo/protobuf/Readme.md +++ b/vendor/github.com/gogo/protobuf/Readme.md @@ -5,7 +5,7 @@ gogoprotobuf is a fork of golang/protobuf with extra code generation features. This code generation is used to achieve: - + - fast marshalling and unmarshalling - more canonical Go structures - goprotobuf compatibility @@ -20,22 +20,25 @@ Keeping track of how up to date gogoprotobuf is relative to golang/protobuf is d These projects use gogoprotobuf: - - etcd - blog + - etcd - blog - sample proto file - spacemonkey - blog - - bazil - - badoo - - mesos-go - - heka - - cockroachdb - - go-ipfs - - rkive-go + - badoo - sample proto file + - mesos-go - sample proto file + - heka - the switch from golang/protobuf to gogo/protobuf when it was still on code.google.com + - cockroachdb - sample proto file + - go-ipfs - sample proto file + - rkive-go - sample proto file - dropbox - - srclib - sample proto file + - srclib - sample proto file - adyoulike - - cloudfoundry - - kubernetes + - cloudfoundry - sample proto file + - kubernetes - go2idl built on top of gogoprotobuf - dgraph - release notes - benchmarks - centrifugo - release notes - blog + - docker swarmkit - sample proto file + - nats.io - go-nats-streaming + - tidb - Communication between tidb and tikv + - protoactor-go - vanity command that also generates actors from service definitions Please lets us know if you are using gogoprotobuf by posting on our GoogleGroup. @@ -45,21 +48,21 @@ Please lets us know if you are using gogoprotobuf by posting on our gophercon - alecthomas' go serialization benchmarks -## Getting Started +## Getting Started There are several ways to use gogoprotobuf, but for all you need to install go and protoc. After that you can choose: - + - Speed - More Speed and more generated code - Most Speed and most customization ### Installation -To install it, you must first have Go (at least version 1.3.3) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Go 1.4.2, 1.5.4, 1.6.3 and 1.7 are continuously tested. +To install it, you must first have Go (at least version 1.6.3) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Go 1.7.1 and 1.8 are continuously tested. Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf). -Most versions from 2.3.1 should not give any problems, but 2.5.0, 2.6.1 and 3 are continuously tested. +Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.2.0 are continuously tested. ### Speed @@ -106,11 +109,6 @@ Install protoc-gen-gogo: go get github.com/gogo/protobuf/protoc-gen-gogo go get github.com/gogo/protobuf/gogoproto -## Proto3 - -Proto3 is supported, but the new well known types are not supported yet. -[See Proto3 Issue](https://github.com/gogo/protobuf/issues/57) for more details. - ## GRPC It works the same as golang/protobuf, simply specify the plugin. diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go index 5ecfae11..147b5ecc 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/doc.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -148,6 +148,7 @@ The enumprefix, getters and stringer extensions can be used to remove some of th - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). Less Typing and Peace of Mind is explained in their specific plugin folders godoc: diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go index 6da0e3e7..9506b6fb 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -34,6 +34,7 @@ var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ Field: 62001, Name: "gogoproto.goproto_enum_prefix", Tag: "varint,62001,opt,name=goproto_enum_prefix,json=goprotoEnumPrefix", + Filename: "gogo.proto", } var E_GoprotoEnumStringer = &proto.ExtensionDesc{ @@ -42,6 +43,7 @@ var E_GoprotoEnumStringer = &proto.ExtensionDesc{ Field: 62021, Name: "gogoproto.goproto_enum_stringer", Tag: "varint,62021,opt,name=goproto_enum_stringer,json=goprotoEnumStringer", + Filename: "gogo.proto", } var E_EnumStringer = &proto.ExtensionDesc{ @@ -50,6 +52,7 @@ var E_EnumStringer = &proto.ExtensionDesc{ Field: 62022, Name: "gogoproto.enum_stringer", Tag: "varint,62022,opt,name=enum_stringer,json=enumStringer", + Filename: "gogo.proto", } var E_EnumCustomname = &proto.ExtensionDesc{ @@ -58,6 +61,16 @@ var E_EnumCustomname = &proto.ExtensionDesc{ Field: 62023, Name: "gogoproto.enum_customname", Tag: "bytes,62023,opt,name=enum_customname,json=enumCustomname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", } var E_EnumvalueCustomname = &proto.ExtensionDesc{ @@ -66,6 +79,7 @@ var E_EnumvalueCustomname = &proto.ExtensionDesc{ Field: 66001, Name: "gogoproto.enumvalue_customname", Tag: "bytes,66001,opt,name=enumvalue_customname,json=enumvalueCustomname", + Filename: "gogo.proto", } var E_GoprotoGettersAll = &proto.ExtensionDesc{ @@ -74,6 +88,7 @@ var E_GoprotoGettersAll = &proto.ExtensionDesc{ Field: 63001, Name: "gogoproto.goproto_getters_all", Tag: "varint,63001,opt,name=goproto_getters_all,json=goprotoGettersAll", + Filename: "gogo.proto", } var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ @@ -82,6 +97,7 @@ var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ Field: 63002, Name: "gogoproto.goproto_enum_prefix_all", Tag: "varint,63002,opt,name=goproto_enum_prefix_all,json=goprotoEnumPrefixAll", + Filename: "gogo.proto", } var E_GoprotoStringerAll = &proto.ExtensionDesc{ @@ -90,6 +106,7 @@ var E_GoprotoStringerAll = &proto.ExtensionDesc{ Field: 63003, Name: "gogoproto.goproto_stringer_all", Tag: "varint,63003,opt,name=goproto_stringer_all,json=goprotoStringerAll", + Filename: "gogo.proto", } var E_VerboseEqualAll = &proto.ExtensionDesc{ @@ -98,6 +115,7 @@ var E_VerboseEqualAll = &proto.ExtensionDesc{ Field: 63004, Name: "gogoproto.verbose_equal_all", Tag: "varint,63004,opt,name=verbose_equal_all,json=verboseEqualAll", + Filename: "gogo.proto", } var E_FaceAll = &proto.ExtensionDesc{ @@ -106,6 +124,7 @@ var E_FaceAll = &proto.ExtensionDesc{ Field: 63005, Name: "gogoproto.face_all", Tag: "varint,63005,opt,name=face_all,json=faceAll", + Filename: "gogo.proto", } var E_GostringAll = &proto.ExtensionDesc{ @@ -114,6 +133,7 @@ var E_GostringAll = &proto.ExtensionDesc{ Field: 63006, Name: "gogoproto.gostring_all", Tag: "varint,63006,opt,name=gostring_all,json=gostringAll", + Filename: "gogo.proto", } var E_PopulateAll = &proto.ExtensionDesc{ @@ -122,6 +142,7 @@ var E_PopulateAll = &proto.ExtensionDesc{ Field: 63007, Name: "gogoproto.populate_all", Tag: "varint,63007,opt,name=populate_all,json=populateAll", + Filename: "gogo.proto", } var E_StringerAll = &proto.ExtensionDesc{ @@ -130,6 +151,7 @@ var E_StringerAll = &proto.ExtensionDesc{ Field: 63008, Name: "gogoproto.stringer_all", Tag: "varint,63008,opt,name=stringer_all,json=stringerAll", + Filename: "gogo.proto", } var E_OnlyoneAll = &proto.ExtensionDesc{ @@ -138,6 +160,7 @@ var E_OnlyoneAll = &proto.ExtensionDesc{ Field: 63009, Name: "gogoproto.onlyone_all", Tag: "varint,63009,opt,name=onlyone_all,json=onlyoneAll", + Filename: "gogo.proto", } var E_EqualAll = &proto.ExtensionDesc{ @@ -146,6 +169,7 @@ var E_EqualAll = &proto.ExtensionDesc{ Field: 63013, Name: "gogoproto.equal_all", Tag: "varint,63013,opt,name=equal_all,json=equalAll", + Filename: "gogo.proto", } var E_DescriptionAll = &proto.ExtensionDesc{ @@ -154,6 +178,7 @@ var E_DescriptionAll = &proto.ExtensionDesc{ Field: 63014, Name: "gogoproto.description_all", Tag: "varint,63014,opt,name=description_all,json=descriptionAll", + Filename: "gogo.proto", } var E_TestgenAll = &proto.ExtensionDesc{ @@ -162,6 +187,7 @@ var E_TestgenAll = &proto.ExtensionDesc{ Field: 63015, Name: "gogoproto.testgen_all", Tag: "varint,63015,opt,name=testgen_all,json=testgenAll", + Filename: "gogo.proto", } var E_BenchgenAll = &proto.ExtensionDesc{ @@ -170,6 +196,7 @@ var E_BenchgenAll = &proto.ExtensionDesc{ Field: 63016, Name: "gogoproto.benchgen_all", Tag: "varint,63016,opt,name=benchgen_all,json=benchgenAll", + Filename: "gogo.proto", } var E_MarshalerAll = &proto.ExtensionDesc{ @@ -178,6 +205,7 @@ var E_MarshalerAll = &proto.ExtensionDesc{ Field: 63017, Name: "gogoproto.marshaler_all", Tag: "varint,63017,opt,name=marshaler_all,json=marshalerAll", + Filename: "gogo.proto", } var E_UnmarshalerAll = &proto.ExtensionDesc{ @@ -186,6 +214,7 @@ var E_UnmarshalerAll = &proto.ExtensionDesc{ Field: 63018, Name: "gogoproto.unmarshaler_all", Tag: "varint,63018,opt,name=unmarshaler_all,json=unmarshalerAll", + Filename: "gogo.proto", } var E_StableMarshalerAll = &proto.ExtensionDesc{ @@ -194,6 +223,7 @@ var E_StableMarshalerAll = &proto.ExtensionDesc{ Field: 63019, Name: "gogoproto.stable_marshaler_all", Tag: "varint,63019,opt,name=stable_marshaler_all,json=stableMarshalerAll", + Filename: "gogo.proto", } var E_SizerAll = &proto.ExtensionDesc{ @@ -202,6 +232,7 @@ var E_SizerAll = &proto.ExtensionDesc{ Field: 63020, Name: "gogoproto.sizer_all", Tag: "varint,63020,opt,name=sizer_all,json=sizerAll", + Filename: "gogo.proto", } var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ @@ -210,6 +241,7 @@ var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ Field: 63021, Name: "gogoproto.goproto_enum_stringer_all", Tag: "varint,63021,opt,name=goproto_enum_stringer_all,json=goprotoEnumStringerAll", + Filename: "gogo.proto", } var E_EnumStringerAll = &proto.ExtensionDesc{ @@ -218,6 +250,7 @@ var E_EnumStringerAll = &proto.ExtensionDesc{ Field: 63022, Name: "gogoproto.enum_stringer_all", Tag: "varint,63022,opt,name=enum_stringer_all,json=enumStringerAll", + Filename: "gogo.proto", } var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ @@ -226,6 +259,7 @@ var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ Field: 63023, Name: "gogoproto.unsafe_marshaler_all", Tag: "varint,63023,opt,name=unsafe_marshaler_all,json=unsafeMarshalerAll", + Filename: "gogo.proto", } var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ @@ -234,6 +268,7 @@ var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ Field: 63024, Name: "gogoproto.unsafe_unmarshaler_all", Tag: "varint,63024,opt,name=unsafe_unmarshaler_all,json=unsafeUnmarshalerAll", + Filename: "gogo.proto", } var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ @@ -242,6 +277,7 @@ var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ Field: 63025, Name: "gogoproto.goproto_extensions_map_all", Tag: "varint,63025,opt,name=goproto_extensions_map_all,json=goprotoExtensionsMapAll", + Filename: "gogo.proto", } var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ @@ -250,6 +286,7 @@ var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ Field: 63026, Name: "gogoproto.goproto_unrecognized_all", Tag: "varint,63026,opt,name=goproto_unrecognized_all,json=goprotoUnrecognizedAll", + Filename: "gogo.proto", } var E_GogoprotoImport = &proto.ExtensionDesc{ @@ -258,6 +295,7 @@ var E_GogoprotoImport = &proto.ExtensionDesc{ Field: 63027, Name: "gogoproto.gogoproto_import", Tag: "varint,63027,opt,name=gogoproto_import,json=gogoprotoImport", + Filename: "gogo.proto", } var E_ProtosizerAll = &proto.ExtensionDesc{ @@ -266,6 +304,7 @@ var E_ProtosizerAll = &proto.ExtensionDesc{ Field: 63028, Name: "gogoproto.protosizer_all", Tag: "varint,63028,opt,name=protosizer_all,json=protosizerAll", + Filename: "gogo.proto", } var E_CompareAll = &proto.ExtensionDesc{ @@ -274,6 +313,34 @@ var E_CompareAll = &proto.ExtensionDesc{ Field: 63029, Name: "gogoproto.compare_all", Tag: "varint,63029,opt,name=compare_all,json=compareAll", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all,json=typedeclAll", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all,json=enumdeclAll", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration,json=goprotoRegistration", + Filename: "gogo.proto", } var E_GoprotoGetters = &proto.ExtensionDesc{ @@ -282,6 +349,7 @@ var E_GoprotoGetters = &proto.ExtensionDesc{ Field: 64001, Name: "gogoproto.goproto_getters", Tag: "varint,64001,opt,name=goproto_getters,json=goprotoGetters", + Filename: "gogo.proto", } var E_GoprotoStringer = &proto.ExtensionDesc{ @@ -290,6 +358,7 @@ var E_GoprotoStringer = &proto.ExtensionDesc{ Field: 64003, Name: "gogoproto.goproto_stringer", Tag: "varint,64003,opt,name=goproto_stringer,json=goprotoStringer", + Filename: "gogo.proto", } var E_VerboseEqual = &proto.ExtensionDesc{ @@ -298,6 +367,7 @@ var E_VerboseEqual = &proto.ExtensionDesc{ Field: 64004, Name: "gogoproto.verbose_equal", Tag: "varint,64004,opt,name=verbose_equal,json=verboseEqual", + Filename: "gogo.proto", } var E_Face = &proto.ExtensionDesc{ @@ -306,6 +376,7 @@ var E_Face = &proto.ExtensionDesc{ Field: 64005, Name: "gogoproto.face", Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", } var E_Gostring = &proto.ExtensionDesc{ @@ -314,6 +385,7 @@ var E_Gostring = &proto.ExtensionDesc{ Field: 64006, Name: "gogoproto.gostring", Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", } var E_Populate = &proto.ExtensionDesc{ @@ -322,6 +394,7 @@ var E_Populate = &proto.ExtensionDesc{ Field: 64007, Name: "gogoproto.populate", Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", } var E_Stringer = &proto.ExtensionDesc{ @@ -330,6 +403,7 @@ var E_Stringer = &proto.ExtensionDesc{ Field: 67008, Name: "gogoproto.stringer", Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", } var E_Onlyone = &proto.ExtensionDesc{ @@ -338,6 +412,7 @@ var E_Onlyone = &proto.ExtensionDesc{ Field: 64009, Name: "gogoproto.onlyone", Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", } var E_Equal = &proto.ExtensionDesc{ @@ -346,6 +421,7 @@ var E_Equal = &proto.ExtensionDesc{ Field: 64013, Name: "gogoproto.equal", Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", } var E_Description = &proto.ExtensionDesc{ @@ -354,6 +430,7 @@ var E_Description = &proto.ExtensionDesc{ Field: 64014, Name: "gogoproto.description", Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", } var E_Testgen = &proto.ExtensionDesc{ @@ -362,6 +439,7 @@ var E_Testgen = &proto.ExtensionDesc{ Field: 64015, Name: "gogoproto.testgen", Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", } var E_Benchgen = &proto.ExtensionDesc{ @@ -370,6 +448,7 @@ var E_Benchgen = &proto.ExtensionDesc{ Field: 64016, Name: "gogoproto.benchgen", Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", } var E_Marshaler = &proto.ExtensionDesc{ @@ -378,6 +457,7 @@ var E_Marshaler = &proto.ExtensionDesc{ Field: 64017, Name: "gogoproto.marshaler", Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", } var E_Unmarshaler = &proto.ExtensionDesc{ @@ -386,6 +466,7 @@ var E_Unmarshaler = &proto.ExtensionDesc{ Field: 64018, Name: "gogoproto.unmarshaler", Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", } var E_StableMarshaler = &proto.ExtensionDesc{ @@ -394,6 +475,7 @@ var E_StableMarshaler = &proto.ExtensionDesc{ Field: 64019, Name: "gogoproto.stable_marshaler", Tag: "varint,64019,opt,name=stable_marshaler,json=stableMarshaler", + Filename: "gogo.proto", } var E_Sizer = &proto.ExtensionDesc{ @@ -402,6 +484,7 @@ var E_Sizer = &proto.ExtensionDesc{ Field: 64020, Name: "gogoproto.sizer", Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", } var E_UnsafeMarshaler = &proto.ExtensionDesc{ @@ -410,6 +493,7 @@ var E_UnsafeMarshaler = &proto.ExtensionDesc{ Field: 64023, Name: "gogoproto.unsafe_marshaler", Tag: "varint,64023,opt,name=unsafe_marshaler,json=unsafeMarshaler", + Filename: "gogo.proto", } var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ @@ -418,6 +502,7 @@ var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ Field: 64024, Name: "gogoproto.unsafe_unmarshaler", Tag: "varint,64024,opt,name=unsafe_unmarshaler,json=unsafeUnmarshaler", + Filename: "gogo.proto", } var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ @@ -426,6 +511,7 @@ var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ Field: 64025, Name: "gogoproto.goproto_extensions_map", Tag: "varint,64025,opt,name=goproto_extensions_map,json=goprotoExtensionsMap", + Filename: "gogo.proto", } var E_GoprotoUnrecognized = &proto.ExtensionDesc{ @@ -434,6 +520,7 @@ var E_GoprotoUnrecognized = &proto.ExtensionDesc{ Field: 64026, Name: "gogoproto.goproto_unrecognized", Tag: "varint,64026,opt,name=goproto_unrecognized,json=goprotoUnrecognized", + Filename: "gogo.proto", } var E_Protosizer = &proto.ExtensionDesc{ @@ -442,6 +529,7 @@ var E_Protosizer = &proto.ExtensionDesc{ Field: 64028, Name: "gogoproto.protosizer", Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", } var E_Compare = &proto.ExtensionDesc{ @@ -450,6 +538,16 @@ var E_Compare = &proto.ExtensionDesc{ Field: 64029, Name: "gogoproto.compare", Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", } var E_Nullable = &proto.ExtensionDesc{ @@ -458,6 +556,7 @@ var E_Nullable = &proto.ExtensionDesc{ Field: 65001, Name: "gogoproto.nullable", Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", } var E_Embed = &proto.ExtensionDesc{ @@ -466,6 +565,7 @@ var E_Embed = &proto.ExtensionDesc{ Field: 65002, Name: "gogoproto.embed", Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", } var E_Customtype = &proto.ExtensionDesc{ @@ -474,6 +574,7 @@ var E_Customtype = &proto.ExtensionDesc{ Field: 65003, Name: "gogoproto.customtype", Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", } var E_Customname = &proto.ExtensionDesc{ @@ -482,6 +583,7 @@ var E_Customname = &proto.ExtensionDesc{ Field: 65004, Name: "gogoproto.customname", Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", } var E_Jsontag = &proto.ExtensionDesc{ @@ -490,6 +592,7 @@ var E_Jsontag = &proto.ExtensionDesc{ Field: 65005, Name: "gogoproto.jsontag", Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", } var E_Moretags = &proto.ExtensionDesc{ @@ -498,6 +601,7 @@ var E_Moretags = &proto.ExtensionDesc{ Field: 65006, Name: "gogoproto.moretags", Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", } var E_Casttype = &proto.ExtensionDesc{ @@ -506,6 +610,7 @@ var E_Casttype = &proto.ExtensionDesc{ Field: 65007, Name: "gogoproto.casttype", Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", } var E_Castkey = &proto.ExtensionDesc{ @@ -514,6 +619,7 @@ var E_Castkey = &proto.ExtensionDesc{ Field: 65008, Name: "gogoproto.castkey", Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", } var E_Castvalue = &proto.ExtensionDesc{ @@ -522,6 +628,25 @@ var E_Castvalue = &proto.ExtensionDesc{ Field: 65009, Name: "gogoproto.castvalue", Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", } func init() { @@ -529,6 +654,7 @@ func init() { proto.RegisterExtension(E_GoprotoEnumStringer) proto.RegisterExtension(E_EnumStringer) proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) proto.RegisterExtension(E_EnumvalueCustomname) proto.RegisterExtension(E_GoprotoGettersAll) proto.RegisterExtension(E_GoprotoEnumPrefixAll) @@ -556,6 +682,9 @@ func init() { proto.RegisterExtension(E_GogoprotoImport) proto.RegisterExtension(E_ProtosizerAll) proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) proto.RegisterExtension(E_GoprotoGetters) proto.RegisterExtension(E_GoprotoStringer) proto.RegisterExtension(E_VerboseEqual) @@ -578,6 +707,7 @@ func init() { proto.RegisterExtension(E_GoprotoUnrecognized) proto.RegisterExtension(E_Protosizer) proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) proto.RegisterExtension(E_Nullable) proto.RegisterExtension(E_Embed) proto.RegisterExtension(E_Customtype) @@ -587,79 +717,88 @@ func init() { proto.RegisterExtension(E_Casttype) proto.RegisterExtension(E_Castkey) proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) } func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) } var fileDescriptorGogo = []byte{ - // 1098 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x97, 0xc9, 0x6f, 0x1c, 0x45, - 0x14, 0x87, 0x85, 0x70, 0xe4, 0x99, 0xe7, 0x0d, 0x8f, 0x8d, 0x09, 0x11, 0x88, 0xe4, 0xc6, 0xc9, - 0x39, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0xa3, 0x20, 0x0c, 0x23, 0x13, 0x07, 0x10, 0x87, - 0x51, 0xcf, 0xb8, 0xdc, 0x19, 0xe8, 0xee, 0x6a, 0xba, 0xba, 0xa3, 0x38, 0x37, 0x14, 0x16, 0x21, - 0xc4, 0x8e, 0x04, 0x09, 0x09, 0xcb, 0x81, 0x7d, 0x0d, 0xcb, 0x9d, 0x0b, 0x70, 0xe6, 0x7f, 0xe0, - 0x02, 0x98, 0x4d, 0xf2, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x1e, 0x8f, 0x54, 0x35, 0xb7, - 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xbf, 0x69, 0x00, 0x5f, 0xf9, 0x6a, 0x31, 0x4e, - 0x54, 0xaa, 0x1a, 0x75, 0xbc, 0xce, 0x2f, 0x8f, 0x1c, 0xf5, 0x95, 0xf2, 0x03, 0x79, 0x3c, 0xff, - 0xab, 0x93, 0x6d, 0x1f, 0xdf, 0x92, 0xba, 0x9b, 0xf4, 0xe2, 0x54, 0x25, 0xc5, 0x62, 0xf1, 0x20, - 0xcc, 0xd1, 0xe2, 0xb6, 0x8c, 0xb2, 0xb0, 0x1d, 0x27, 0x72, 0xbb, 0x77, 0xa9, 0x71, 0xd7, 0x62, - 0x41, 0x2e, 0x32, 0xb9, 0xb8, 0x16, 0x65, 0xe1, 0x43, 0x71, 0xda, 0x53, 0x91, 0x3e, 0x7c, 0xf3, - 0xb7, 0x5b, 0x8f, 0xde, 0x72, 0x6f, 0x6d, 0x63, 0x96, 0x50, 0xfc, 0x5f, 0x2b, 0x07, 0xc5, 0x06, - 0xdc, 0x5e, 0xf1, 0xe9, 0x34, 0xe9, 0x45, 0xbe, 0x4c, 0x2c, 0xc6, 0x9f, 0xc8, 0x38, 0x67, 0x18, - 0x1f, 0x26, 0x54, 0xac, 0xc2, 0xd4, 0x28, 0xae, 0x9f, 0xc9, 0x35, 0x29, 0x4d, 0x49, 0x13, 0x66, - 0x72, 0x49, 0x37, 0xd3, 0xa9, 0x0a, 0x23, 0x2f, 0x94, 0x16, 0xcd, 0x2f, 0xb9, 0xa6, 0xbe, 0x31, - 0x8d, 0xd8, 0x6a, 0x49, 0x89, 0xf3, 0x30, 0x8f, 0x9f, 0x5c, 0xf4, 0x82, 0x4c, 0x9a, 0xb6, 0x63, - 0x43, 0x6d, 0xe7, 0x71, 0x19, 0x2b, 0x7f, 0xbd, 0x32, 0x96, 0x2b, 0xe7, 0x4a, 0x81, 0xe1, 0x35, - 0x3a, 0xe1, 0xcb, 0x34, 0x95, 0x89, 0x6e, 0x7b, 0x41, 0x30, 0x64, 0x93, 0x67, 0x7a, 0x41, 0x69, - 0xbc, 0xba, 0x5b, 0xed, 0x44, 0xb3, 0x20, 0x57, 0x82, 0x40, 0x6c, 0xc2, 0x1d, 0x43, 0x3a, 0xeb, - 0xe0, 0xbc, 0x46, 0xce, 0xf9, 0x03, 0xdd, 0x45, 0x6d, 0x0b, 0xf8, 0xf3, 0xb2, 0x1f, 0x0e, 0xce, - 0x77, 0xc8, 0xd9, 0x20, 0x96, 0xdb, 0x82, 0xc6, 0xfb, 0x61, 0xf6, 0xa2, 0x4c, 0x3a, 0x4a, 0xcb, - 0xb6, 0x7c, 0x2a, 0xf3, 0x02, 0x07, 0xdd, 0x75, 0xd2, 0xcd, 0x10, 0xb8, 0x86, 0x1c, 0xba, 0x4e, - 0x42, 0x6d, 0xdb, 0xeb, 0x4a, 0x07, 0xc5, 0x0d, 0x52, 0x8c, 0xe3, 0x7a, 0x44, 0x57, 0x60, 0xd2, - 0x57, 0xc5, 0x2d, 0x39, 0xe0, 0xef, 0x12, 0x3e, 0xc1, 0x0c, 0x29, 0x62, 0x15, 0x67, 0x81, 0x97, - 0xba, 0xec, 0xe0, 0x3d, 0x56, 0x30, 0x43, 0x8a, 0x11, 0xca, 0xfa, 0x3e, 0x2b, 0xb4, 0x51, 0xcf, - 0x65, 0x98, 0x50, 0x51, 0xb0, 0xa3, 0x22, 0x97, 0x4d, 0x7c, 0x40, 0x06, 0x20, 0x04, 0x05, 0x4b, - 0x50, 0x77, 0x6d, 0xc4, 0x87, 0x84, 0xd7, 0x24, 0x77, 0xa0, 0x09, 0x33, 0x3c, 0x64, 0x7a, 0x2a, - 0x72, 0x50, 0x7c, 0x44, 0x8a, 0x69, 0x03, 0xa3, 0xdb, 0x48, 0xa5, 0x4e, 0x7d, 0xe9, 0x22, 0xf9, - 0x98, 0x6f, 0x83, 0x10, 0x2a, 0x65, 0x47, 0x46, 0xdd, 0x0b, 0x6e, 0x86, 0x4f, 0xb8, 0x94, 0xcc, - 0xa0, 0x62, 0x15, 0xa6, 0x42, 0x2f, 0xd1, 0x17, 0xbc, 0xc0, 0xa9, 0x1d, 0x9f, 0x92, 0x63, 0xb2, - 0x84, 0xa8, 0x22, 0x59, 0x34, 0x8a, 0xe6, 0x33, 0xae, 0x88, 0x81, 0xd1, 0xd1, 0xd3, 0xa9, 0xd7, - 0x09, 0x64, 0x7b, 0x14, 0xdb, 0xe7, 0x7c, 0xf4, 0x0a, 0x76, 0xdd, 0x34, 0x2e, 0x41, 0x5d, 0xf7, - 0x2e, 0x3b, 0x69, 0xbe, 0xe0, 0x4e, 0xe7, 0x00, 0xc2, 0x8f, 0xc1, 0x9d, 0x43, 0x47, 0xbd, 0x83, - 0xec, 0x4b, 0x92, 0x2d, 0x0c, 0x19, 0xf7, 0x34, 0x12, 0x46, 0x55, 0x7e, 0xc5, 0x23, 0x41, 0x0e, - 0xb8, 0x5a, 0x30, 0x9f, 0x45, 0xda, 0xdb, 0x1e, 0xad, 0x6a, 0x5f, 0x73, 0xd5, 0x0a, 0xb6, 0x52, - 0xb5, 0x73, 0xb0, 0x40, 0xc6, 0xd1, 0xfa, 0xfa, 0x0d, 0x0f, 0xd6, 0x82, 0xde, 0xac, 0x76, 0xf7, - 0x71, 0x38, 0x52, 0x96, 0xf3, 0x52, 0x2a, 0x23, 0x8d, 0x4c, 0x3b, 0xf4, 0x62, 0x07, 0xf3, 0x4d, - 0x32, 0xf3, 0xc4, 0x5f, 0x2b, 0x05, 0xeb, 0x5e, 0x8c, 0xf2, 0x47, 0xe1, 0x30, 0xcb, 0xb3, 0x28, - 0x91, 0x5d, 0xe5, 0x47, 0xbd, 0xcb, 0x72, 0xcb, 0x41, 0xfd, 0xed, 0x40, 0xab, 0x36, 0x0d, 0x1c, - 0xcd, 0x67, 0xe1, 0xb6, 0xf2, 0xf7, 0x46, 0xbb, 0x17, 0xc6, 0x2a, 0x49, 0x2d, 0xc6, 0xef, 0xb8, - 0x53, 0x25, 0x77, 0x36, 0xc7, 0xc4, 0x1a, 0x4c, 0xe7, 0x7f, 0xba, 0x3e, 0x92, 0xdf, 0x93, 0x68, - 0xaa, 0x4f, 0xd1, 0xe0, 0xe8, 0xaa, 0x30, 0xf6, 0x12, 0x97, 0xf9, 0xf7, 0x03, 0x0f, 0x0e, 0x42, - 0x8a, 0xa7, 0x6f, 0x66, 0x20, 0x89, 0x1b, 0xf7, 0x1c, 0x90, 0xac, 0x4b, 0xad, 0x3d, 0xbf, 0xf4, - 0x3c, 0xbd, 0x47, 0x67, 0xb6, 0x1a, 0xc4, 0xe2, 0x01, 0x2c, 0x4f, 0x35, 0x2e, 0xed, 0xb2, 0x2b, - 0x7b, 0x65, 0x85, 0x2a, 0x69, 0x29, 0xce, 0xc0, 0x54, 0x25, 0x2a, 0xed, 0xaa, 0x67, 0x48, 0x35, - 0x69, 0x26, 0xa5, 0x38, 0x01, 0x63, 0x18, 0x7b, 0x76, 0xfc, 0x59, 0xc2, 0xf3, 0xe5, 0xe2, 0x14, - 0xd4, 0x38, 0xee, 0xec, 0xe8, 0x73, 0x84, 0x96, 0x08, 0xe2, 0x1c, 0x75, 0x76, 0xfc, 0x79, 0xc6, - 0x19, 0x41, 0xdc, 0xbd, 0x84, 0x3f, 0xbe, 0x38, 0x46, 0xe3, 0x8a, 0x6b, 0xb7, 0x04, 0xe3, 0x94, - 0x71, 0x76, 0xfa, 0x05, 0xfa, 0x72, 0x26, 0xc4, 0x7d, 0x70, 0xc8, 0xb1, 0xe0, 0x2f, 0x11, 0x5a, - 0xac, 0x17, 0xab, 0x30, 0x61, 0xe4, 0x9a, 0x1d, 0x7f, 0x99, 0x70, 0x93, 0xc2, 0xad, 0x53, 0xae, - 0xd9, 0x05, 0xaf, 0xf0, 0xd6, 0x89, 0xc0, 0xb2, 0x71, 0xa4, 0xd9, 0xe9, 0x57, 0xb9, 0xea, 0x8c, - 0x88, 0x65, 0xa8, 0x97, 0x63, 0xca, 0xce, 0xbf, 0x46, 0x7c, 0x9f, 0xc1, 0x0a, 0x18, 0x63, 0xd2, - 0xae, 0x78, 0x9d, 0x2b, 0x60, 0x50, 0x78, 0x8c, 0x06, 0xa3, 0xcf, 0x6e, 0x7a, 0x83, 0x8f, 0xd1, - 0x40, 0xf2, 0x61, 0x37, 0xf3, 0x69, 0x61, 0x57, 0xbc, 0xc9, 0xdd, 0xcc, 0xd7, 0xe3, 0x36, 0x06, - 0xb3, 0xc4, 0xee, 0x78, 0x8b, 0xb7, 0x31, 0x10, 0x25, 0xa2, 0x05, 0x8d, 0x83, 0x39, 0x62, 0xf7, - 0xbd, 0x4d, 0xbe, 0xd9, 0x03, 0x31, 0x22, 0x1e, 0x81, 0x85, 0xe1, 0x19, 0x62, 0xb7, 0x5e, 0xdd, - 0x1b, 0xf8, 0xd5, 0x6f, 0x46, 0x88, 0x38, 0xd7, 0xff, 0xd5, 0x6f, 0xe6, 0x87, 0x5d, 0x7b, 0x6d, - 0xaf, 0xfa, 0x62, 0x67, 0xc6, 0x87, 0x58, 0x01, 0xe8, 0x8f, 0x6e, 0xbb, 0xeb, 0x3a, 0xb9, 0x0c, - 0x08, 0x8f, 0x06, 0x4d, 0x6e, 0x3b, 0x7f, 0x83, 0x8f, 0x06, 0x11, 0x62, 0x09, 0x6a, 0x51, 0x16, - 0x04, 0xf8, 0x70, 0x34, 0xee, 0x1e, 0x12, 0x13, 0x32, 0xd8, 0x62, 0xf6, 0xf7, 0x7d, 0x3a, 0x18, - 0x0c, 0x88, 0x13, 0x70, 0x48, 0x86, 0x1d, 0xb9, 0x65, 0x23, 0xff, 0xd8, 0xe7, 0x81, 0x80, 0xab, - 0xc5, 0x32, 0x40, 0xf1, 0xd2, 0x98, 0xee, 0xc4, 0xd6, 0x6f, 0xfd, 0x73, 0xbf, 0x78, 0x07, 0x35, - 0x90, 0xbe, 0x20, 0x7f, 0xeb, 0xb4, 0x08, 0x76, 0xab, 0x82, 0xfc, 0x45, 0xf3, 0x24, 0x8c, 0x3f, - 0xa1, 0x55, 0x94, 0x7a, 0xbe, 0x8d, 0xfe, 0x8b, 0x68, 0x5e, 0x8f, 0x05, 0x0b, 0x55, 0x22, 0x53, - 0xcf, 0xd7, 0x36, 0xf6, 0x6f, 0x62, 0x4b, 0x00, 0xe1, 0xae, 0xa7, 0x53, 0x97, 0xfb, 0xfe, 0x87, - 0x61, 0x06, 0x70, 0xd3, 0x78, 0xfd, 0xa4, 0xdc, 0xb1, 0xb1, 0xff, 0xf2, 0xa6, 0x69, 0xbd, 0x38, - 0x05, 0x75, 0xbc, 0xcc, 0xdf, 0xb7, 0x6d, 0xf0, 0x7f, 0x04, 0xf7, 0x89, 0xd3, 0xc7, 0x60, 0xae, - 0xab, 0xc2, 0x41, 0xec, 0x34, 0x34, 0x55, 0x53, 0xb5, 0xf2, 0x07, 0xf1, 0xff, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x87, 0x5c, 0xee, 0x2b, 0x7e, 0x11, 0x00, 0x00, + // 1201 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0xcb, 0x6f, 0x1c, 0x45, + 0x13, 0xc0, 0xf5, 0xe9, 0x73, 0x64, 0x6f, 0xf9, 0x85, 0xd7, 0xc6, 0x84, 0x08, 0x44, 0x72, 0xe3, + 0xe4, 0x9c, 0x22, 0x94, 0xb6, 0x22, 0xcb, 0xb1, 0x1c, 0x2b, 0x11, 0x06, 0x63, 0xe2, 0x00, 0xe2, + 0xb0, 0x9a, 0xdd, 0x6d, 0x4f, 0x06, 0x66, 0xa6, 0x87, 0x99, 0x9e, 0x28, 0xce, 0x0d, 0x85, 0x87, + 0x10, 0xe2, 0x8d, 0x04, 0x09, 0x49, 0x80, 0x03, 0xef, 0x67, 0x78, 0x1f, 0xb9, 0xf0, 0xb8, 0xf2, + 0x3f, 0x70, 0x01, 0xcc, 0xdb, 0x37, 0x5f, 0x50, 0xcd, 0x56, 0xcd, 0xf6, 0xac, 0x57, 0xea, 0xde, + 0xdb, 0xec, 0xba, 0x7f, 0xbf, 0xad, 0xa9, 0x9a, 0xae, 0xea, 0x31, 0x80, 0xaf, 0x7c, 0x35, 0x97, + 0xa4, 0x4a, 0xab, 0x7a, 0x0d, 0xaf, 0x8b, 0xcb, 0x03, 0x07, 0x7d, 0xa5, 0xfc, 0x50, 0x1e, 0x2e, + 0x3e, 0x35, 0xf3, 0xcd, 0xc3, 0x6d, 0x99, 0xb5, 0xd2, 0x20, 0xd1, 0x2a, 0xed, 0x2c, 0x16, 0x77, + 0xc1, 0x34, 0x2d, 0x6e, 0xc8, 0x38, 0x8f, 0x1a, 0x49, 0x2a, 0x37, 0x83, 0xf3, 0xf5, 0x5b, 0xe6, + 0x3a, 0xe4, 0x1c, 0x93, 0x73, 0xcb, 0x71, 0x1e, 0xdd, 0x9d, 0xe8, 0x40, 0xc5, 0xd9, 0xfe, 0xeb, + 0x3f, 0xff, 0xff, 0xe0, 0xff, 0x6e, 0x1f, 0x59, 0x9f, 0x22, 0x14, 0xff, 0xb6, 0x56, 0x80, 0x62, + 0x1d, 0x6e, 0xac, 0xf8, 0x32, 0x9d, 0x06, 0xb1, 0x2f, 0x53, 0x8b, 0xf1, 0x3b, 0x32, 0x4e, 0x1b, + 0xc6, 0x7b, 0x09, 0x15, 0x4b, 0x30, 0x3e, 0x88, 0xeb, 0x7b, 0x72, 0x8d, 0x49, 0x53, 0xb2, 0x02, + 0x93, 0x85, 0xa4, 0x95, 0x67, 0x5a, 0x45, 0xb1, 0x17, 0x49, 0x8b, 0xe6, 0x87, 0x42, 0x53, 0x5b, + 0x9f, 0x40, 0x6c, 0xa9, 0xa4, 0x84, 0x80, 0x11, 0xfc, 0xa6, 0x2d, 0x5b, 0xa1, 0xc5, 0xf0, 0x23, + 0x05, 0x52, 0xae, 0x17, 0x67, 0x60, 0x06, 0xaf, 0xcf, 0x79, 0x61, 0x2e, 0xcd, 0x48, 0x0e, 0xf5, + 0xf5, 0x9c, 0xc1, 0x65, 0x2c, 0xfb, 0xe9, 0xe2, 0x50, 0x11, 0xce, 0x74, 0x29, 0x30, 0x62, 0x32, + 0xaa, 0xe8, 0x4b, 0xad, 0x65, 0x9a, 0x35, 0xbc, 0xb0, 0x5f, 0x78, 0x27, 0x82, 0xb0, 0x34, 0x5e, + 0xda, 0xae, 0x56, 0x71, 0xa5, 0x43, 0x2e, 0x86, 0xa1, 0xd8, 0x80, 0x9b, 0xfa, 0x3c, 0x15, 0x0e, + 0xce, 0xcb, 0xe4, 0x9c, 0xd9, 0xf3, 0x64, 0xa0, 0x76, 0x0d, 0xf8, 0xfb, 0xb2, 0x96, 0x0e, 0xce, + 0xd7, 0xc8, 0x59, 0x27, 0x96, 0x4b, 0x8a, 0xc6, 0x53, 0x30, 0x75, 0x4e, 0xa6, 0x4d, 0x95, 0xc9, + 0x86, 0x7c, 0x24, 0xf7, 0x42, 0x07, 0xdd, 0x15, 0xd2, 0x4d, 0x12, 0xb8, 0x8c, 0x1c, 0xba, 0x8e, + 0xc2, 0xc8, 0xa6, 0xd7, 0x92, 0x0e, 0x8a, 0xab, 0xa4, 0x18, 0xc6, 0xf5, 0x88, 0x2e, 0xc2, 0x98, + 0xaf, 0x3a, 0xb7, 0xe4, 0x80, 0x5f, 0x23, 0x7c, 0x94, 0x19, 0x52, 0x24, 0x2a, 0xc9, 0x43, 0x4f, + 0xbb, 0x44, 0xf0, 0x3a, 0x2b, 0x98, 0x21, 0xc5, 0x00, 0x69, 0x7d, 0x83, 0x15, 0x99, 0x91, 0xcf, + 0x05, 0x18, 0x55, 0x71, 0xb8, 0xa5, 0x62, 0x97, 0x20, 0xde, 0x24, 0x03, 0x10, 0x82, 0x82, 0x79, + 0xa8, 0xb9, 0x16, 0xe2, 0xad, 0x6d, 0xde, 0x1e, 0x5c, 0x81, 0x15, 0x98, 0xe4, 0x06, 0x15, 0xa8, + 0xd8, 0x41, 0xf1, 0x36, 0x29, 0x26, 0x0c, 0x8c, 0x6e, 0x43, 0xcb, 0x4c, 0xfb, 0xd2, 0x45, 0xf2, + 0x0e, 0xdf, 0x06, 0x21, 0x94, 0xca, 0xa6, 0x8c, 0x5b, 0x67, 0xdd, 0x0c, 0xef, 0x72, 0x2a, 0x99, + 0x41, 0xc5, 0x12, 0x8c, 0x47, 0x5e, 0x9a, 0x9d, 0xf5, 0x42, 0xa7, 0x72, 0xbc, 0x47, 0x8e, 0xb1, + 0x12, 0xa2, 0x8c, 0xe4, 0xf1, 0x20, 0x9a, 0xf7, 0x39, 0x23, 0x06, 0x46, 0x5b, 0x2f, 0xd3, 0x5e, + 0x33, 0x94, 0x8d, 0x41, 0x6c, 0x1f, 0xf0, 0xd6, 0xeb, 0xb0, 0xab, 0xa6, 0x71, 0x1e, 0x6a, 0x59, + 0x70, 0xc1, 0x49, 0xf3, 0x21, 0x57, 0xba, 0x00, 0x10, 0x7e, 0x00, 0x6e, 0xee, 0x3b, 0x26, 0x1c, + 0x64, 0x1f, 0x91, 0x6c, 0xb6, 0xcf, 0xa8, 0xa0, 0x96, 0x30, 0xa8, 0xf2, 0x63, 0x6e, 0x09, 0xb2, + 0xc7, 0xb5, 0x06, 0x33, 0x79, 0x9c, 0x79, 0x9b, 0x83, 0x65, 0xed, 0x13, 0xce, 0x5a, 0x87, 0xad, + 0x64, 0xed, 0x34, 0xcc, 0x92, 0x71, 0xb0, 0xba, 0x7e, 0xca, 0x8d, 0xb5, 0x43, 0x6f, 0x54, 0xab, + 0xfb, 0x20, 0x1c, 0x28, 0xd3, 0x79, 0x5e, 0xcb, 0x38, 0x43, 0xa6, 0x11, 0x79, 0x89, 0x83, 0xf9, + 0x3a, 0x99, 0xb9, 0xe3, 0x2f, 0x97, 0x82, 0x55, 0x2f, 0x41, 0xf9, 0xfd, 0xb0, 0x9f, 0xe5, 0x79, + 0x9c, 0xca, 0x96, 0xf2, 0xe3, 0xe0, 0x82, 0x6c, 0x3b, 0xa8, 0x3f, 0xeb, 0x29, 0xd5, 0x86, 0x81, + 0xa3, 0xf9, 0x24, 0xdc, 0x50, 0x9e, 0x55, 0x1a, 0x41, 0x94, 0xa8, 0x54, 0x5b, 0x8c, 0x9f, 0x73, + 0xa5, 0x4a, 0xee, 0x64, 0x81, 0x89, 0x65, 0x98, 0x28, 0x3e, 0xba, 0x3e, 0x92, 0x5f, 0x90, 0x68, + 0xbc, 0x4b, 0x51, 0xe3, 0x68, 0xa9, 0x28, 0xf1, 0x52, 0x97, 0xfe, 0xf7, 0x25, 0x37, 0x0e, 0x42, + 0xa8, 0x71, 0xe8, 0xad, 0x44, 0xe2, 0xb4, 0x77, 0x30, 0x7c, 0xc5, 0x8d, 0x83, 0x19, 0x52, 0xf0, + 0x81, 0xc1, 0x41, 0xf1, 0x35, 0x2b, 0x98, 0x41, 0xc5, 0x3d, 0xdd, 0x41, 0x9b, 0x4a, 0x3f, 0xc8, + 0x74, 0xea, 0xe1, 0x6a, 0x8b, 0xea, 0x9b, 0xed, 0xea, 0x21, 0x6c, 0xdd, 0x40, 0xc5, 0x29, 0x98, + 0xec, 0x39, 0x62, 0xd4, 0x6f, 0xdb, 0x63, 0x5b, 0x95, 0x59, 0xe6, 0xf9, 0xa5, 0xf0, 0xd1, 0x1d, + 0x6a, 0x46, 0xd5, 0x13, 0x86, 0xb8, 0x13, 0xeb, 0x5e, 0x3d, 0x07, 0xd8, 0x65, 0x17, 0x77, 0xca, + 0xd2, 0x57, 0x8e, 0x01, 0xe2, 0x04, 0x8c, 0x57, 0xce, 0x00, 0x76, 0xd5, 0x63, 0xa4, 0x1a, 0x33, + 0x8f, 0x00, 0xe2, 0x08, 0x0c, 0xe1, 0x3c, 0xb7, 0xe3, 0x8f, 0x13, 0x5e, 0x2c, 0x17, 0xc7, 0x60, + 0x84, 0xe7, 0xb8, 0x1d, 0x7d, 0x82, 0xd0, 0x12, 0x41, 0x9c, 0x67, 0xb8, 0x1d, 0x7f, 0x92, 0x71, + 0x46, 0x10, 0x77, 0x4f, 0xe1, 0xb7, 0x4f, 0x0f, 0x51, 0x1f, 0xe6, 0xdc, 0xcd, 0xc3, 0x30, 0x0d, + 0x6f, 0x3b, 0xfd, 0x14, 0xfd, 0x38, 0x13, 0xe2, 0x0e, 0xd8, 0xe7, 0x98, 0xf0, 0x67, 0x08, 0xed, + 0xac, 0x17, 0x4b, 0x30, 0x6a, 0x0c, 0x6c, 0x3b, 0xfe, 0x2c, 0xe1, 0x26, 0x85, 0xa1, 0xd3, 0xc0, + 0xb6, 0x0b, 0x9e, 0xe3, 0xd0, 0x89, 0xc0, 0xb4, 0xf1, 0xac, 0xb6, 0xd3, 0xcf, 0x73, 0xd6, 0x19, + 0x11, 0x0b, 0x50, 0x2b, 0xfb, 0xaf, 0x9d, 0x7f, 0x81, 0xf8, 0x2e, 0x83, 0x19, 0x30, 0xfa, 0xbf, + 0x5d, 0xf1, 0x22, 0x67, 0xc0, 0xa0, 0x70, 0x1b, 0xf5, 0xce, 0x74, 0xbb, 0xe9, 0x25, 0xde, 0x46, + 0x3d, 0x23, 0x1d, 0xab, 0x59, 0xb4, 0x41, 0xbb, 0xe2, 0x65, 0xae, 0x66, 0xb1, 0x1e, 0xc3, 0xe8, + 0x1d, 0x92, 0x76, 0xc7, 0x2b, 0x1c, 0x46, 0xcf, 0x8c, 0x14, 0x6b, 0x50, 0xdf, 0x3b, 0x20, 0xed, + 0xbe, 0x57, 0xc9, 0x37, 0xb5, 0x67, 0x3e, 0x8a, 0xfb, 0x60, 0xb6, 0xff, 0x70, 0xb4, 0x5b, 0x2f, + 0xed, 0xf4, 0xbc, 0xce, 0x98, 0xb3, 0x51, 0x9c, 0xee, 0x76, 0x59, 0x73, 0x30, 0xda, 0xb5, 0x97, + 0x77, 0xaa, 0x8d, 0xd6, 0x9c, 0x8b, 0x62, 0x11, 0xa0, 0x3b, 0x93, 0xec, 0xae, 0x2b, 0xe4, 0x32, + 0x20, 0xdc, 0x1a, 0x34, 0x92, 0xec, 0xfc, 0x55, 0xde, 0x1a, 0x44, 0xe0, 0xd6, 0xe0, 0x69, 0x64, + 0xa7, 0xaf, 0xf1, 0xd6, 0x60, 0x44, 0xcc, 0xc3, 0x48, 0x9c, 0x87, 0x21, 0x3e, 0x5b, 0xf5, 0x5b, + 0xfb, 0x8c, 0x1b, 0x19, 0xb6, 0x19, 0xfe, 0x65, 0x97, 0x60, 0x06, 0xc4, 0x11, 0xd8, 0x27, 0xa3, + 0xa6, 0x6c, 0xdb, 0xc8, 0x5f, 0x77, 0xb9, 0x9f, 0xe0, 0x6a, 0xb1, 0x00, 0xd0, 0x79, 0x99, 0xc6, + 0x28, 0x6c, 0xec, 0x6f, 0xbb, 0x9d, 0xf7, 0x7a, 0x03, 0xe9, 0x0a, 0x8a, 0xb7, 0x71, 0x8b, 0x60, + 0xbb, 0x2a, 0x28, 0x5e, 0xc0, 0x8f, 0xc2, 0xf0, 0x43, 0x99, 0x8a, 0xb5, 0xe7, 0xdb, 0xe8, 0xdf, + 0x89, 0xe6, 0xf5, 0x98, 0xb0, 0x48, 0xa5, 0x52, 0x7b, 0x7e, 0x66, 0x63, 0xff, 0x20, 0xb6, 0x04, + 0x10, 0x6e, 0x79, 0x99, 0x76, 0xb9, 0xef, 0x3f, 0x19, 0x66, 0x00, 0x83, 0xc6, 0xeb, 0x87, 0xe5, + 0x96, 0x8d, 0xfd, 0x8b, 0x83, 0xa6, 0xf5, 0xe2, 0x18, 0xd4, 0xf0, 0xb2, 0xf8, 0x3f, 0x84, 0x0d, + 0xfe, 0x9b, 0xe0, 0x2e, 0x81, 0xbf, 0x9c, 0xe9, 0xb6, 0x0e, 0xec, 0xc9, 0xfe, 0x87, 0x2a, 0xcd, + 0xeb, 0xc5, 0x22, 0x8c, 0x66, 0xba, 0xdd, 0xce, 0xe9, 0x44, 0x63, 0xc1, 0xff, 0xdd, 0x2d, 0x5f, + 0x72, 0x4b, 0xe6, 0xf8, 0x21, 0x98, 0x6e, 0xa9, 0xa8, 0x17, 0x3c, 0x0e, 0x2b, 0x6a, 0x45, 0xad, + 0x15, 0xbb, 0xe8, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x9c, 0xec, 0xd8, 0x50, 0x13, 0x00, + 0x00, } diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto index 18a58c5d..fbca44cd 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -39,6 +39,7 @@ extend google.protobuf.EnumOptions { optional bool goproto_enum_stringer = 62021; optional bool enum_stringer = 62022; optional string enum_customname = 62023; + optional bool enumdecl = 62024; } extend google.protobuf.EnumValueOptions { @@ -77,6 +78,10 @@ extend google.protobuf.FileOptions { optional bool gogoproto_import = 63027; optional bool protosizer_all = 63028; optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; } extend google.protobuf.MessageOptions { @@ -107,6 +112,8 @@ extend google.protobuf.MessageOptions { optional bool protosizer = 64028; optional bool compare = 64029; + + optional bool typedecl = 64030; } extend google.protobuf.FieldOptions { @@ -119,4 +126,7 @@ extend google.protobuf.FieldOptions { optional string casttype = 65007; optional string castkey = 65008; optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; } diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go index 670021fe..6b851c56 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/helper.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -39,6 +39,14 @@ func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { return proto.GetBoolExtension(field.Options, E_Nullable, true) } +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { nullable := IsNullable(field) if field.IsMessage() || IsCustomType(field) { @@ -82,7 +90,18 @@ func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { return false } +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Customtype) if err == nil && v.(*string) != nil { @@ -93,6 +112,9 @@ func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { } func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Casttype) if err == nil && v.(*string) != nil { @@ -103,6 +125,9 @@ func GetCastType(field *google_protobuf.FieldDescriptorProto) string { } func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Castkey) if err == nil && v.(*string) != nil { @@ -113,6 +138,9 @@ func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { } func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Castvalue) if err == nil && v.(*string) != nil { @@ -147,6 +175,9 @@ func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool } func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Customname) if err == nil && v.(*string) != nil { @@ -157,6 +188,9 @@ func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { } func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_EnumCustomname) if err == nil && v.(*string) != nil { @@ -167,6 +201,9 @@ func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { } func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) if err == nil && v.(*string) != nil { @@ -177,6 +214,9 @@ func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) str } func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Jsontag) if err == nil && v.(*string) != nil { @@ -187,6 +227,9 @@ func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { } func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Moretags) if err == nil && v.(*string) != nil { @@ -308,3 +351,7 @@ func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) } + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go index 0d6634cc..737f2731 100644 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -61,7 +61,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. func DecodeVarint(buf []byte) (x uint64, n int) { - // x, n already 0 for shift := uint(0); shift < 64; shift += 7 { if n >= len(buf) { return 0, 0 @@ -78,13 +77,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) { return 0, 0 } -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - // x, err already 0 - +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { i := p.index l := len(p.buf) @@ -107,6 +100,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { return } +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + // DecodeFixed64 reads a 64-bit integer from the Buffer. // This is the format for the // fixed64, sfixed64, and double protocol buffer types. @@ -340,6 +434,8 @@ func (p *Buffer) DecodeGroup(pb Message) error { // Buffer and places the decoded result in pb. If the struct // underlying pb does not match the data in the buffer, the results can be // unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. if u, ok := pb.(Unmarshaler); ok { diff --git a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go index ecc63873..6fb74de4 100644 --- a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go @@ -98,7 +98,7 @@ func setPtrCustomType(base structPointer, f field, v interface{}) { if v == nil { return } - structPointer_SetStructPointer(base, f, structPointer(reflect.ValueOf(v).Pointer())) + structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v))) } func setCustomType(base structPointer, f field, value interface{}) { @@ -165,7 +165,8 @@ func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error } newBas := appendStructPointer(base, p.field, p.ctype) - setCustomType(newBas, 0, custom) + var zero field + setCustomType(newBas, zero, custom) return nil } diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 00000000..93464c91 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 00000000..18e2a5f7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,203 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} + +func (o *Buffer) decDuration() (time.Duration, error) { + b, err := o.DecodeRawBytes(true) + if err != nil { + return 0, err + } + dproto := &duration{} + if err := Unmarshal(b, dproto); err != nil { + return 0, err + } + return durationFromProto(dproto) +} + +func (o *Buffer) dec_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, uint64(d)) + return nil +} + +func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d)) + return nil +} + +func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))) + var zero field + setPtrCustomType(newBas, zero, &d) + return nil +} + +func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + structPointer_Word64Slice(base, p.field).Append(uint64(d)) + return nil +} + +func size_duration(p *Properties, base structPointer) (n int) { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + dur := structPointer_Interface(structp, durationType).(*time.Duration) + d := durationProto(*dur) + size := Size(d) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_duration(p *Properties, base structPointer) error { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + dur := structPointer_Interface(structp, durationType).(*time.Duration) + d := durationProto(*dur) + data, err := Marshal(d) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_ref_duration(p *Properties, base structPointer) (n int) { + dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) + d := durationProto(*dur) + size := Size(d) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error { + dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) + d := durationProto(*dur) + data, err := Marshal(d) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_slice_duration(p *Properties, base structPointer) (n int) { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + if durs[i] == nil { + return 0 + } + dproto := durationProto(*durs[i]) + size := Size(dproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + if durs[i] == nil { + return errRepeatedHasNil + } + dproto := durationProto(*durs[i]) + data, err := Marshal(dproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_slice_ref_duration(p *Properties, base structPointer) (n int) { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + dproto := durationProto(durs[i]) + size := Size(dproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + dproto := durationProto(durs[i]) + data, err := Marshal(dproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go index 8c1b8fd1..2b30f846 100644 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -234,10 +234,6 @@ func Marshal(pb Message) ([]byte, error) { } p := NewBuffer(nil) err := p.Marshal(pb) - var state errorState - if err != nil && !state.shouldContinue(err, nil) { - return nil, err - } if p.buf == nil && err == nil { // Return a non-nil slice on success. return []byte{}, nil @@ -266,11 +262,8 @@ func (p *Buffer) Marshal(pb Message) error { // Can the object marshal itself? if m, ok := pb.(Marshaler); ok { data, err := m.Marshal() - if err != nil { - return err - } p.buf = append(p.buf, data...) - return nil + return err } t, base, err := getbase(pb) @@ -282,7 +275,7 @@ func (p *Buffer) Marshal(pb Message) error { } if collectStats { - stats.Encode++ + (stats).Encode++ // Parens are to work around a goimports bug. } if len(p.buf) > maxMarshalSize { @@ -309,7 +302,7 @@ func Size(pb Message) (n int) { } if collectStats { - stats.Size++ + (stats).Size++ // Parens are to work around a goimports bug. } return @@ -1014,7 +1007,6 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) { if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, _ := m.Marshal() - n += len(p.tagcode) n += sizeRawBytes(data) continue } @@ -1083,10 +1075,17 @@ func (o *Buffer) enc_map(p *Properties, base structPointer) error { func (o *Buffer) enc_exts(p *Properties, base structPointer) error { exts := structPointer_Extensions(base, p.field) - if err := encodeExtensions(exts); err != nil { + + v, mu := exts.extensionsRead() + if v == nil { + return nil + } + + mu.Lock() + defer mu.Unlock() + if err := encodeExtensionsMap(v); err != nil { return err } - v, _ := exts.extensionsRead() return o.enc_map_body(v) } diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go index 66e7e163..32111b7f 100644 --- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -196,12 +196,10 @@ func size_ref_struct_message(p *Properties, base structPointer) int { // Encode a slice of references to message struct pointers ([]struct). func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { var state errorState - ss := structPointer_GetStructPointer(base, p.field) - ss1 := structPointer_GetRefStructPointer(ss, field(0)) - size := p.stype.Size() - l := structPointer_Len(base, p.field) + ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) + l := ss.Len() for i := 0; i < l; i++ { - structp := structPointer_Add(ss1, field(uintptr(i)*size)) + structp := ss.Index(i) if structPointer_IsNil(structp) { return errRepeatedHasNil } @@ -233,13 +231,11 @@ func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) //TODO this is only copied, please fix this func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { - ss := structPointer_GetStructPointer(base, p.field) - ss1 := structPointer_GetRefStructPointer(ss, field(0)) - size := p.stype.Size() - l := structPointer_Len(base, p.field) + ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) + l := ss.Len() n += l * len(p.tagcode) for i := 0; i < l; i++ { - structp := structPointer_Add(ss1, field(uintptr(i)*size)) + structp := ss.Index(i) if structPointer_IsNil(structp) { return // return the size up to this point } diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go index 8b16f951..2ed1cf59 100644 --- a/vendor/github.com/gogo/protobuf/proto/equal.go +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -54,13 +54,17 @@ Equality is defined in this way: in a proto3 .proto file, fields are not "set"; specifically, zero length proto3 "bytes" fields are equal (nil == {}). - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal (a "bytes" field, - although represented by []byte, is not a repeated field) + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. - Two unset fields are equal. - Two unknown field sets are equal if their current encoded state is equal. - Two extension sets are equal iff they have corresponding elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. - Every other combination of things are not equal. The return value is undefined if a and b are not protocol buffers. diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go index f7384baa..0dfcb538 100644 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -167,6 +167,7 @@ type ExtensionDesc struct { Field int32 // field number Name string // fully-qualified name of extension, for text formatting Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined } func (ed *ExtensionDesc) repeated() bool { @@ -587,6 +588,9 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { registeredExtensions := RegisteredExtensions(pb) emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } mu.Lock() defer mu.Unlock() extensions := make([]*ExtensionDesc, 0, len(emap)) diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go index 2c30d709..7580bb45 100644 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -308,7 +308,7 @@ func GetStats() Stats { return stats } // temporary Buffer and are fine for most applications. type Buffer struct { buf []byte // encode/decode byte stream - index int // write point + index int // read point // pools of basic types to amortize allocation. bools []bool diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 00000000..1763a5f2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,85 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +package proto + +import ( + "reflect" +) + +func structPointer_FieldPointer(p structPointer, f field) structPointer { + panic("not implemented") +} + +func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { + panic("not implemented") +} + +func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { + panic("not implemented") +} + +func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { + panic("not implemented") +} + +func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { + panic("not implemented") +} + +func structPointer_Add(p structPointer, size field) structPointer { + panic("not implemented") +} + +func structPointer_Len(p structPointer, f field) int { + panic("not implemented") +} + +func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { + panic("not implemented") +} + +func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { + panic("not implemented") +} + +func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { + panic("not implemented") +} + +type structRefSlice struct{} + +func (v *structRefSlice) Len() int { + panic("not implemented") +} + +func (v *structRefSlice) Index(i int) structPointer { + panic("not implemented") +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go index ad7c8517..f156a29f 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -26,7 +26,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine +// +build !appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -105,3 +105,24 @@ func structPointer_Add(p structPointer, size field) structPointer { func structPointer_Len(p structPointer, f field) int { return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) } + +func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { + return &structRefSlice{p: p, f: f, size: size} +} + +// A structRefSlice represents a slice of structs (themselves submessages or groups). +type structRefSlice struct { + p structPointer + f field + size uintptr +} + +func (v *structRefSlice) Len() int { + return structPointer_Len(v.p, v.f) +} + +func (v *structRefSlice) Index(i int) structPointer { + ss := structPointer_GetStructPointer(v.p, v.f) + ss1 := structPointer_GetRefStructPointer(ss, 0) + return structPointer_Add(ss1, field(uintptr(i)*v.size)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go index 3e4cad03..44b33205 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -190,10 +190,11 @@ type Properties struct { proto3 bool // whether this is known to be a proto3 field; set for []byte only oneof bool // whether this is a oneof field - Default string // default value - HasDefault bool // whether an explicit default was provided - CustomType string - def_uint64 uint64 + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + StdTime bool + StdDuration bool enc encoder valEnc valueEncoder // set for bool and numeric types only @@ -340,6 +341,10 @@ func (p *Properties) Parse(s string) { p.OrigName = strings.Split(f, "=")[1] case strings.HasPrefix(f, "customtype="): p.CustomType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true } } } @@ -355,11 +360,22 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock p.enc = nil p.dec = nil p.size = nil - if len(p.CustomType) > 0 { + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { p.setCustomEncAndDec(typ) p.setTag(lockGetProp) return } + if p.StdTime && !isMap { + p.setTimeEncAndDec(typ) + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setDurationEncAndDec(typ) + p.setTag(lockGetProp) + return + } switch t1 := typ; t1.Kind() { default: fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) @@ -630,6 +646,10 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock // so we need encoders for the pointer to this type. vtype = reflect.PtrTo(vtype) } + + p.mvalprop.CustomType = p.CustomType + p.mvalprop.StdDuration = p.StdDuration + p.mvalprop.StdTime = p.StdTime p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } p.setTag(lockGetProp) @@ -920,7 +940,15 @@ func RegisterType(x Message, name string) { } // MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} // MessageType returns the message type (pointer to struct) for a named message. func MessageType(name string) reflect.Type { return protoTypes[name] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go index 4607a975..b6b7176c 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -51,6 +51,51 @@ func (p *Properties) setCustomEncAndDec(typ reflect.Type) { } } +func (p *Properties) setDurationEncAndDec(typ reflect.Type) { + if p.Repeated { + if typ.Elem().Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_slice_duration + p.dec = (*Buffer).dec_slice_duration + p.size = size_slice_duration + } else { + p.enc = (*Buffer).enc_slice_ref_duration + p.dec = (*Buffer).dec_slice_ref_duration + p.size = size_slice_ref_duration + } + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_duration + p.dec = (*Buffer).dec_duration + p.size = size_duration + } else { + p.enc = (*Buffer).enc_ref_duration + p.dec = (*Buffer).dec_ref_duration + p.size = size_ref_duration + } +} + +func (p *Properties) setTimeEncAndDec(typ reflect.Type) { + if p.Repeated { + if typ.Elem().Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_slice_time + p.dec = (*Buffer).dec_slice_time + p.size = size_slice_time + } else { + p.enc = (*Buffer).enc_slice_ref_time + p.dec = (*Buffer).dec_slice_ref_time + p.size = size_slice_ref_time + } + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_time + p.dec = (*Buffer).dec_time + p.size = size_time + } else { + p.enc = (*Buffer).enc_ref_time + p.dec = (*Buffer).dec_ref_time + p.size = size_ref_time + } + +} + func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { t2 := typ.Elem() p.sstype = typ diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go index b3e12e26..d63732fc 100644 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -51,6 +51,7 @@ import ( "sort" "strings" "sync" + "time" ) var ( @@ -181,7 +182,93 @@ type raw interface { Bytes() []byte } -func writeStruct(w *textWriter, sv reflect.Value) error { +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } st := sv.Type() sprops := GetProperties(st) for i := 0; i < sv.NumField(); i++ { @@ -234,10 +321,10 @@ func writeStruct(w *textWriter, sv reflect.Value) error { continue } if len(props.Enum) > 0 { - if err := writeEnum(w, v, props); err != nil { + if err := tm.writeEnum(w, v, props); err != nil { return err } - } else if err := writeAny(w, v, props); err != nil { + } else if err := tm.writeAny(w, v, props); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -279,7 +366,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := writeAny(w, key, props.mkeyprop); err != nil { + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -296,7 +383,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := writeAny(w, val, props.mvalprop); err != nil { + if err := tm.writeAny(w, val, props.mvalprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -368,10 +455,10 @@ func writeStruct(w *textWriter, sv reflect.Value) error { } if len(props.Enum) > 0 { - if err := writeEnum(w, fv, props); err != nil { + if err := tm.writeEnum(w, fv, props); err != nil { return err } - } else if err := writeAny(w, fv, props); err != nil { + } else if err := tm.writeAny(w, fv, props); err != nil { return err } @@ -389,7 +476,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { pv.Elem().Set(sv) } if pv.Type().Implements(extensionRangeType) { - if err := writeExtensions(w, pv); err != nil { + if err := tm.writeExtensions(w, pv); err != nil { return err } } @@ -419,20 +506,45 @@ func writeRaw(w *textWriter, b []byte) error { } // writeAny writes an arbitrary field. -func writeAny(w *textWriter, v reflect.Value, props *Properties) error { +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) - if props != nil && len(props.CustomType) > 0 { - custom, ok := v.Interface().(Marshaler) - if ok { - data, err := custom.Marshal() + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) if err != nil { return err } - if err := writeString(w, string(data)); err != nil { - return err + props.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), props) + props.StdTime = true + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) } - return nil + dproto := durationProto(d) + props.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), props) + props.StdDuration = true + return err } } @@ -482,15 +594,15 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error { } } w.indent() - if tm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() if err != nil { return err } if _, err = w.Write(text); err != nil { return err } - } else if err := writeStruct(w, v); err != nil { + } else if err := tm.writeStruct(w, v); err != nil { return err } w.unindent() @@ -634,7 +746,7 @@ func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // writeExtensions writes all the extensions in pv. // pv is assumed to be a pointer to a protocol message struct that is extendable. -func writeExtensions(w *textWriter, pv reflect.Value) error { +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { emap := extensionMaps[pv.Type().Elem()] e := pv.Interface().(Message) @@ -689,13 +801,13 @@ func writeExtensions(w *textWriter, pv reflect.Value) error { // Repeated extensions will appear as a slice. if !desc.repeated() { - if err := writeExtension(w, desc.Name, pb); err != nil { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { return err } } else { v := reflect.ValueOf(pb) for i := 0; i < v.Len(); i++ { - if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { return err } } @@ -704,7 +816,7 @@ func writeExtensions(w *textWriter, pv reflect.Value) error { return nil } -func writeExtension(w *textWriter, name string, pb interface{}) error { +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { return err } @@ -713,7 +825,7 @@ func writeExtension(w *textWriter, name string, pb interface{}) error { return err } } - if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -740,12 +852,13 @@ func (w *textWriter) writeIndent() { // TextMarshaler is a configurable text format marshaler. type TextMarshaler struct { - Compact bool // use compact text format (one line). + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types } // Marshal writes a given protocol buffer in text format. // The only errors returned are from w. -func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error { +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { val := reflect.ValueOf(pb) if pb == nil || val.IsNil() { w.Write([]byte("")) @@ -760,11 +873,11 @@ func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error { aw := &textWriter{ w: ww, complete: true, - compact: m.Compact, + compact: tm.Compact, } - if tm, ok := pb.(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() if err != nil { return err } @@ -778,7 +891,7 @@ func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error { } // Dereference the received pointer so we don't have outer < and >. v := reflect.Indirect(val) - if err := writeStruct(aw, v); err != nil { + if err := tm.writeStruct(aw, v); err != nil { return err } if bw != nil { @@ -788,9 +901,9 @@ func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error { } // Text is the same as Marshal, but returns the string directly. -func (m *TextMarshaler) Text(pb Message) string { +func (tm *TextMarshaler) Text(pb Message) string { var buf bytes.Buffer - m.Marshal(&buf, pb) + tm.Marshal(&buf, pb) return buf.String() } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go index 58926741..1d6c6aa0 100644 --- a/vendor/github.com/gogo/protobuf/proto/text_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -33,10 +33,10 @@ import ( "reflect" ) -func writeEnum(w *textWriter, v reflect.Value, props *Properties) error { +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { m, ok := enumStringMaps[props.Enum] if !ok { - if err := writeAny(w, v, props); err != nil { + if err := tm.writeAny(w, v, props); err != nil { return err } } @@ -48,7 +48,7 @@ func writeEnum(w *textWriter, v reflect.Value, props *Properties) error { } s, ok := m[key] if !ok { - if err := writeAny(w, v, props); err != nil { + if err := tm.writeAny(w, v, props); err != nil { return err } } diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go index bcd732c3..9db12e96 100644 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -46,9 +46,13 @@ import ( "reflect" "strconv" "strings" + "time" "unicode/utf8" ) +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + type ParseError struct { Message string Line int // 1-based line number @@ -168,7 +172,7 @@ func (p *textParser) advance() { p.cur.offset, p.cur.line = p.offset, p.line p.cur.unquoted = "" switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',': + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': // Single symbol p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] case '"', '\'': @@ -456,7 +460,10 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { fieldSet := make(map[string]bool) // A struct is a sequence of "name: value", terminated by one of // '>' or '}', or the end of the input. A name may also be - // "[extension]". + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > for { tok := p.next() if tok.err != nil { @@ -466,33 +473,74 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { break } if tok.value == "[" { - // Looks like an extension. + // Looks like an extension or an Any. // // TODO: Check whether we need to handle // namespace rooted names (e.g. ".something.Foo"). - tok = p.next() - if tok.err != nil { - return tok.err + extName, err := p.consumeExtName() + if err != nil { + return err } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + var desc *ExtensionDesc // This could be faster, but it's functional. // TODO: Do something smarter than a linear scan. for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == tok.value { + if d.Name == extName { desc = d break } } if desc == nil { - return p.errorf("unrecognized extension %q", tok.value) - } - // Check the extension terminator. - tok = p.next() - if tok.err != nil { - return tok.err - } - if tok.value != "]" { - return p.errorf("unrecognized extension terminator %q", tok.value) + return p.errorf("unrecognized extension %q", extName) } props := &Properties{} @@ -550,7 +598,11 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { props = oop.Prop nv := reflect.New(oop.Type.Elem()) dst = nv.Elem().Field(0) - sv.Field(oop.Field).Set(nv) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) } if !dst.IsValid() { return p.errorf("unknown field name %q in %v", name, st) @@ -657,6 +709,35 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { return reqFieldErr } +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + // consumeOptionalSeparator consumes an optional semicolon or comma. // It is used in readStruct to provide backward compatibility. func (p *textParser) consumeOptionalSeparator() error { @@ -717,6 +798,80 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { } return nil } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } switch fv := v; fv.Kind() { case reflect.Slice: at := v.Type() @@ -759,12 +914,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) return p.readAny(fv.Index(fv.Len()-1), props) case reflect.Bool: - // Either "true", "false", 1 or 0. + // true/1/t/True or false/f/0/False. switch tok.value { - case "true", "1": + case "true", "1", "t", "True": fv.SetBool(true) return nil - case "false", "0": + case "false", "0", "f", "False": fv.SetBool(false) return nil } diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 00000000..9324f654 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 00000000..d4276474 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,229 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} + +func (o *Buffer) decTimestamp() (time.Time, error) { + b, err := o.DecodeRawBytes(true) + if err != nil { + return time.Time{}, err + } + tproto := ×tamp{} + if err := Unmarshal(b, tproto); err != nil { + return time.Time{}, err + } + return timestampFromProto(tproto) +} + +func (o *Buffer) dec_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + setPtrCustomType(base, p.field, &t) + return nil +} + +func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + setCustomType(base, p.field, &t) + return nil +} + +func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))) + var zero field + setPtrCustomType(newBas, zero, &t) + return nil +} + +func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType)) + var zero field + setCustomType(newBas, zero, &t) + return nil +} + +func size_time(p *Properties, base structPointer) (n int) { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + tim := structPointer_Interface(structp, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return 0 + } + size := Size(t) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_time(p *Properties, base structPointer) error { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + tim := structPointer_Interface(structp, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return err + } + data, err := Marshal(t) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_ref_time(p *Properties, base structPointer) (n int) { + tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return 0 + } + size := Size(t) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error { + tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return err + } + data, err := Marshal(t) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_slice_time(p *Properties, base structPointer) (n int) { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + if tims[i] == nil { + return 0 + } + tproto, err := timestampProto(*tims[i]) + if err != nil { + return 0 + } + size := Size(tproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + if tims[i] == nil { + return errRepeatedHasNil + } + tproto, err := timestampProto(*tims[i]) + if err != nil { + return err + } + data, err := Marshal(tproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_slice_ref_time(p *Properties, base structPointer) (n int) { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + tproto, err := timestampProto(tims[i]) + if err != nil { + return 0 + } + size := Size(tproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + tproto, err := timestampProto(tims[i]) + if err != nil { + return err + } + data, err := Marshal(tproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto new file mode 100644 index 00000000..7eaf2291 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto @@ -0,0 +1,139 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto index acaee1f4..6e4da2c1 100644 --- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto @@ -53,6 +53,16 @@ option go_package = "plugin_go"; import "google/protobuf/descriptor.proto"; +// The version number of protocol compiler. +message Version { + optional int32 major = 1; + optional int32 minor = 2; + optional int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + optional string suffix = 4; +} + // An encoded CodeGeneratorRequest is written to the plugin's stdin. message CodeGeneratorRequest { // The .proto files that were explicitly listed on the command-line. The @@ -75,6 +85,9 @@ message CodeGeneratorRequest { // is not similarly optimized on protoc's end -- it will store all fields in // memory at once before sending them to the plugin. repeated FileDescriptorProto proto_file = 15; + + // The version number of protocol compiler. + optional Version compiler_version = 3; } // The plugin writes an encoded CodeGeneratorResponse to stdout. diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto index c59a6022..2cc496b5 100644 --- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto @@ -139,7 +139,11 @@ message FieldDescriptorProto { TYPE_FIXED32 = 7; TYPE_BOOL = 8; TYPE_STRING = 9; - TYPE_GROUP = 10; // Tag-delimited aggregate. + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; TYPE_MESSAGE = 11; // Length-delimited aggregate. // New in version 2. @@ -157,7 +161,6 @@ message FieldDescriptorProto { LABEL_OPTIONAL = 1; LABEL_REQUIRED = 2; LABEL_REPEATED = 3; - // TODO(sanjay): Should we add LABEL_MAP? }; optional string name = 1; @@ -202,6 +205,7 @@ message FieldDescriptorProto { // Describes a oneof. message OneofDescriptorProto { optional string name = 1; + optional OneofOptions options = 2; } // Describes an enum type. @@ -304,19 +308,8 @@ message FileOptions { // top-level extensions defined in the file. optional bool java_multiple_files = 10 [default=false]; - // If set true, then the Java code generator will generate equals() and - // hashCode() methods for all messages defined in the .proto file. - // This increases generated code size, potentially substantially for large - // protos, which may harm a memory-constrained application. - // - In the full runtime this is a speed optimization, as the - // AbstractMessage base class includes reflection-based implementations of - // these methods. - // - In the lite runtime, setting this option changes the semantics of - // equals() and hashCode() to more closely match those of the full runtime; - // the generated methods compute their results based on field values rather - // than object identity. (Implementations should not assume that hashcodes - // will be consistent across runtimes or versions of the protocol compiler.) - optional bool java_generate_equals_and_hash = 20 [default=false]; + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 @@ -377,15 +370,19 @@ message FileOptions { // Namespace for generated classes; defaults to the package. optional string csharp_namespace = 37; - // Whether the nano proto compiler should generate in the deprecated non-nano - // suffixed package. - optional bool javanano_use_deprecated_package = 38; + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; + + //reserved 38; } message MessageOptions { @@ -443,6 +440,9 @@ message MessageOptions { // parser. optional bool map_entry = 7; + //reserved 8; // javalite_serializable + + // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; @@ -471,7 +471,6 @@ message FieldOptions { // false will avoid using packed encoding. optional bool packed = 2; - // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types // (int64, uint64, sint64, fixed64, sfixed64). By default these types are @@ -512,7 +511,7 @@ message FieldOptions { // // // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outher message + // a lazy sub-message. That is, calling IsInitialized() on the outer message // may return true even if the inner message has missing required fields. // This is necessary because otherwise the inner message would have to be // parsed in order to perform the check, defeating the purpose of lazy @@ -533,6 +532,16 @@ message FieldOptions { optional bool weak = 10 [default=false]; + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + //reserved 4; // removed jtype +} + +message OneofOptions { // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; @@ -552,6 +561,7 @@ message EnumOptions { // is a formalization for deprecating enums. optional bool deprecated = 3 [default=false]; + // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; @@ -606,6 +616,17 @@ message MethodOptions { // this is a formalization for deprecating methods. optional bool deprecated = 33 [default=false]; + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = + 34 [default=IDEMPOTENCY_UNKNOWN]; + // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; @@ -777,3 +798,29 @@ message SourceCodeInfo { repeated string leading_detached_comments = 6; } } + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed=true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto new file mode 100644 index 00000000..318922b7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto @@ -0,0 +1,104 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto new file mode 100644 index 00000000..6057c852 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto @@ -0,0 +1,52 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto new file mode 100644 index 00000000..994af79f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto @@ -0,0 +1,246 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "FieldMaskProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "types"; + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, the existing +// repeated values in the target resource will be overwritten by the new values. +// Note that a repeated field is only allowed in the last position of a `paths` +// string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then the existing sub-message in the target resource is +// overwritten. Given the target message: +// +// f { +// b { +// d : 1 +// x : 2 +// } +// c : 1 +// } +// +// And an update message: +// +// f { +// b { +// d : 10 +// } +// } +// +// then if the field mask is: +// +// paths: "f.b" +// +// then the result will be: +// +// f { +// b { +// d : 10 +// } +// c : 1 +// } +// +// However, if the update mask was: +// +// paths: "f.b.d" +// +// then the result would be: +// +// f { +// b { +// d : 10 +// x : 2 +// } +// c : 1 +// } +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +message FieldMask { + // The set of field mask paths. + repeated string paths = 1; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto new file mode 100644 index 00000000..4f78641f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto new file mode 100644 index 00000000..c544c83e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto @@ -0,0 +1,108 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto new file mode 100644 index 00000000..c5632e5c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto @@ -0,0 +1,118 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 00000000..a85bf198 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go index 341b59c5..e2703901 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -21,12 +21,14 @@ It has these top-level messages: FileOptions MessageOptions FieldOptions + OneofOptions EnumOptions EnumValueOptions ServiceOptions MethodOptions UninterpretedOption SourceCodeInfo + GeneratedCodeInfo */ package descriptor @@ -63,6 +65,10 @@ const ( FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // New in version 2. @@ -299,6 +305,48 @@ func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11, 1} } +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{16, 0} +} + // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { @@ -696,8 +744,9 @@ func (m *FieldDescriptorProto) GetOptions() *FieldOptions { // Describes a oneof. type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } @@ -712,6 +761,13 @@ func (m *OneofDescriptorProto) GetName() string { return "" } +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + // Describes an enum type. type EnumDescriptorProto struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` @@ -900,19 +956,8 @@ type FileOptions struct { // generated to contain the file's getDescriptor() method as well as any // top-level extensions defined in the file. JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` - // If set true, then the Java code generator will generate equals() and - // hashCode() methods for all messages defined in the .proto file. - // This increases generated code size, potentially substantially for large - // protos, which may harm a memory-constrained application. - // - In the full runtime this is a speed optimization, as the - // AbstractMessage base class includes reflection-based implementations of - // these methods. - // - In the lite runtime, setting this option changes the semantics of - // equals() and hashCode() to more closely match those of the full runtime; - // the generated methods compute their results based on field values rather - // than object identity. (Implementations should not assume that hashcodes - // will be consistent across runtimes or versions of the protocol compiler.) - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash,def=0" json:"java_generate_equals_and_hash,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 // byte sequence to a string field. @@ -953,9 +998,11 @@ type FileOptions struct { ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` // Namespace for generated classes; defaults to the package. CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` - // Whether the nano proto compiler should generate in the deprecated non-nano - // suffixed package. - JavananoUseDeprecatedPackage *bool `protobuf:"varint,38,opt,name=javanano_use_deprecated_package,json=javananoUseDeprecatedPackage" json:"javanano_use_deprecated_package,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` proto.XXX_InternalExtensions `json:"-"` @@ -976,7 +1023,6 @@ func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { } const Default_FileOptions_JavaMultipleFiles bool = false -const Default_FileOptions_JavaGenerateEqualsAndHash bool = false const Default_FileOptions_JavaStringCheckUtf8 bool = false const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED const Default_FileOptions_CcGenericServices bool = false @@ -1010,7 +1056,7 @@ func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { if m != nil && m.JavaGenerateEqualsAndHash != nil { return *m.JavaGenerateEqualsAndHash } - return Default_FileOptions_JavaGenerateEqualsAndHash + return false } func (m *FileOptions) GetJavaStringCheckUtf8() bool { @@ -1083,11 +1129,11 @@ func (m *FileOptions) GetCsharpNamespace() string { return "" } -func (m *FileOptions) GetJavananoUseDeprecatedPackage() bool { - if m != nil && m.JavananoUseDeprecatedPackage != nil { - return *m.JavananoUseDeprecatedPackage +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix } - return false + return "" } func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { @@ -1247,7 +1293,7 @@ type FieldOptions struct { // // // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outher message + // a lazy sub-message. That is, calling IsInitialized() on the outer message // may return true even if the inner message has missing required fields. // This is necessary because otherwise the inner message would have to be // parsed in order to perform the check, defeating the purpose of lazy @@ -1338,6 +1384,33 @@ func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { return nil } +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} } + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + type EnumOptions struct { // Set this option to true to allow mapping different tag names to the same // value. @@ -1356,7 +1429,7 @@ type EnumOptions struct { func (m *EnumOptions) Reset() { *m = EnumOptions{} } func (m *EnumOptions) String() string { return proto.CompactTextString(m) } func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} } +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} } var extRange_EnumOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1404,7 +1477,7 @@ type EnumValueOptions struct { func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} } +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} } var extRange_EnumValueOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1445,7 +1518,7 @@ type ServiceOptions struct { func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} } +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} } var extRange_ServiceOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1476,7 +1549,8 @@ type MethodOptions struct { // Depending on the target platform, this can emit Deprecated annotations // for the method, or it will be completely ignored; in the very least, // this is a formalization for deprecating methods. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` proto.XXX_InternalExtensions `json:"-"` @@ -1486,7 +1560,7 @@ type MethodOptions struct { func (m *MethodOptions) Reset() { *m = MethodOptions{} } func (m *MethodOptions) String() string { return proto.CompactTextString(m) } func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} } +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} } var extRange_MethodOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1497,6 +1571,7 @@ func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { } const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN func (m *MethodOptions) GetDeprecated() bool { if m != nil && m.Deprecated != nil { @@ -1505,6 +1580,13 @@ func (m *MethodOptions) GetDeprecated() bool { return Default_MethodOptions_Deprecated } +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { return m.UninterpretedOption @@ -1534,7 +1616,7 @@ type UninterpretedOption struct { func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} } +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} } func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { if m != nil { @@ -1600,7 +1682,7 @@ func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOptio func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption_NamePart) ProtoMessage() {} func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{16, 0} + return fileDescriptorDescriptor, []int{17, 0} } func (m *UninterpretedOption_NamePart) GetNamePart() string { @@ -1670,7 +1752,7 @@ type SourceCodeInfo struct { func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} } +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{18} } func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { if m != nil { @@ -1767,7 +1849,7 @@ func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo_Location) ProtoMessage() {} func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{17, 0} + return fileDescriptorDescriptor, []int{18, 0} } func (m *SourceCodeInfo_Location) GetPath() []int32 { @@ -1805,6 +1887,79 @@ func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { return nil } +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{19} } + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{19, 0} +} + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + func init() { proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") @@ -1820,6 +1975,7 @@ func init() { proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") @@ -1828,154 +1984,167 @@ func init() { proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) } func init() { proto.RegisterFile("descriptor.proto", fileDescriptorDescriptor) } var fileDescriptorDescriptor = []byte{ - // 2211 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x73, 0xdb, 0xc6, - 0x15, 0x0f, 0xf8, 0x4f, 0xe4, 0x23, 0x45, 0xad, 0x56, 0x8a, 0x03, 0xcb, 0x76, 0x2c, 0x33, 0x76, - 0x2c, 0xdb, 0xad, 0x9c, 0x91, 0xff, 0x44, 0x51, 0x3a, 0xe9, 0x50, 0x24, 0xac, 0xd0, 0x43, 0x89, - 0x2c, 0x28, 0xb6, 0x4e, 0x2e, 0x98, 0x15, 0xb0, 0xa4, 0x60, 0x83, 0x0b, 0x14, 0x00, 0x6d, 0x2b, - 0x27, 0xcf, 0xf4, 0xd4, 0x6f, 0xd0, 0x69, 0x3b, 0x3d, 0xe4, 0x92, 0x99, 0x7e, 0x80, 0x1e, 0x7a, - 0xef, 0xb5, 0x87, 0x9e, 0x7b, 0xec, 0x4c, 0xfb, 0x0d, 0x7a, 0xed, 0xec, 0x2e, 0x00, 0x82, 0x7f, - 0x14, 0xab, 0x99, 0x49, 0xd3, 0x93, 0xb4, 0xbf, 0xf7, 0x7b, 0x8f, 0x6f, 0xdf, 0xfe, 0xb0, 0xef, - 0x01, 0x80, 0x2c, 0x1a, 0x98, 0xbe, 0xed, 0x85, 0xae, 0xbf, 0xed, 0xf9, 0x6e, 0xe8, 0xe2, 0x95, - 0xa1, 0xeb, 0x0e, 0x1d, 0x2a, 0x57, 0x27, 0xe3, 0x41, 0xed, 0x10, 0x56, 0x9f, 0xd8, 0x0e, 0x6d, - 0x26, 0xc4, 0x1e, 0x0d, 0xf1, 0x2e, 0xe4, 0x06, 0xb6, 0x43, 0x55, 0x65, 0x33, 0xbb, 0x55, 0xde, - 0xb9, 0xb9, 0x3d, 0xe3, 0xb4, 0x3d, 0xed, 0xd1, 0xe5, 0xb0, 0x2e, 0x3c, 0x6a, 0xff, 0xc8, 0xc1, - 0xda, 0x02, 0x2b, 0xc6, 0x90, 0x63, 0x64, 0xc4, 0x23, 0x2a, 0x5b, 0x25, 0x5d, 0xfc, 0x8f, 0x55, - 0x58, 0xf2, 0x88, 0xf9, 0x82, 0x0c, 0xa9, 0x9a, 0x11, 0x70, 0xbc, 0xc4, 0xef, 0x03, 0x58, 0xd4, - 0xa3, 0xcc, 0xa2, 0xcc, 0x3c, 0x53, 0xb3, 0x9b, 0xd9, 0xad, 0x92, 0x9e, 0x42, 0xf0, 0x3d, 0x58, - 0xf5, 0xc6, 0x27, 0x8e, 0x6d, 0x1a, 0x29, 0x1a, 0x6c, 0x66, 0xb7, 0xf2, 0x3a, 0x92, 0x86, 0xe6, - 0x84, 0x7c, 0x1b, 0x56, 0x5e, 0x51, 0xf2, 0x22, 0x4d, 0x2d, 0x0b, 0x6a, 0x95, 0xc3, 0x29, 0x62, - 0x03, 0x2a, 0x23, 0x1a, 0x04, 0x64, 0x48, 0x8d, 0xf0, 0xcc, 0xa3, 0x6a, 0x4e, 0xec, 0x7e, 0x73, - 0x6e, 0xf7, 0xb3, 0x3b, 0x2f, 0x47, 0x5e, 0xc7, 0x67, 0x1e, 0xc5, 0x75, 0x28, 0x51, 0x36, 0x1e, - 0xc9, 0x08, 0xf9, 0x73, 0xea, 0xa7, 0xb1, 0xf1, 0x68, 0x36, 0x4a, 0x91, 0xbb, 0x45, 0x21, 0x96, - 0x02, 0xea, 0xbf, 0xb4, 0x4d, 0xaa, 0x16, 0x44, 0x80, 0xdb, 0x73, 0x01, 0x7a, 0xd2, 0x3e, 0x1b, - 0x23, 0xf6, 0xc3, 0x0d, 0x28, 0xd1, 0xd7, 0x21, 0x65, 0x81, 0xed, 0x32, 0x75, 0x49, 0x04, 0xb9, - 0xb5, 0xe0, 0x14, 0xa9, 0x63, 0xcd, 0x86, 0x98, 0xf8, 0xe1, 0xc7, 0xb0, 0xe4, 0x7a, 0xa1, 0xed, - 0xb2, 0x40, 0x2d, 0x6e, 0x2a, 0x5b, 0xe5, 0x9d, 0xab, 0x0b, 0x85, 0xd0, 0x91, 0x1c, 0x3d, 0x26, - 0xe3, 0x16, 0xa0, 0xc0, 0x1d, 0xfb, 0x26, 0x35, 0x4c, 0xd7, 0xa2, 0x86, 0xcd, 0x06, 0xae, 0x5a, - 0x12, 0x01, 0xae, 0xcf, 0x6f, 0x44, 0x10, 0x1b, 0xae, 0x45, 0x5b, 0x6c, 0xe0, 0xea, 0xd5, 0x60, - 0x6a, 0x8d, 0x2f, 0x41, 0x21, 0x38, 0x63, 0x21, 0x79, 0xad, 0x56, 0x84, 0x42, 0xa2, 0x55, 0xed, - 0xdf, 0x79, 0x58, 0xb9, 0x88, 0xc4, 0x3e, 0x85, 0xfc, 0x80, 0xef, 0x52, 0xcd, 0xfc, 0x37, 0x35, - 0x90, 0x3e, 0xd3, 0x45, 0x2c, 0x7c, 0xc7, 0x22, 0xd6, 0xa1, 0xcc, 0x68, 0x10, 0x52, 0x4b, 0x2a, - 0x22, 0x7b, 0x41, 0x4d, 0x81, 0x74, 0x9a, 0x97, 0x54, 0xee, 0x3b, 0x49, 0xea, 0x19, 0xac, 0x24, - 0x29, 0x19, 0x3e, 0x61, 0xc3, 0x58, 0x9b, 0xf7, 0xdf, 0x96, 0xc9, 0xb6, 0x16, 0xfb, 0xe9, 0xdc, - 0x4d, 0xaf, 0xd2, 0xa9, 0x35, 0x6e, 0x02, 0xb8, 0x8c, 0xba, 0x03, 0xc3, 0xa2, 0xa6, 0xa3, 0x16, - 0xcf, 0xa9, 0x52, 0x87, 0x53, 0xe6, 0xaa, 0xe4, 0x4a, 0xd4, 0x74, 0xf0, 0x27, 0x13, 0xa9, 0x2d, - 0x9d, 0xa3, 0x94, 0x43, 0xf9, 0x90, 0xcd, 0xa9, 0xad, 0x0f, 0x55, 0x9f, 0x72, 0xdd, 0x53, 0x2b, - 0xda, 0x59, 0x49, 0x24, 0xb1, 0xfd, 0xd6, 0x9d, 0xe9, 0x91, 0x9b, 0xdc, 0xd8, 0xb2, 0x9f, 0x5e, - 0xe2, 0x0f, 0x20, 0x01, 0x0c, 0x21, 0x2b, 0x10, 0xb7, 0x50, 0x25, 0x06, 0x8f, 0xc8, 0x88, 0x6e, - 0xec, 0x42, 0x75, 0xba, 0x3c, 0x78, 0x1d, 0xf2, 0x41, 0x48, 0xfc, 0x50, 0xa8, 0x30, 0xaf, 0xcb, - 0x05, 0x46, 0x90, 0xa5, 0xcc, 0x12, 0xb7, 0x5c, 0x5e, 0xe7, 0xff, 0x6e, 0x7c, 0x0c, 0xcb, 0x53, - 0x3f, 0x7f, 0x51, 0xc7, 0xda, 0x6f, 0x0a, 0xb0, 0xbe, 0x48, 0x73, 0x0b, 0xe5, 0x7f, 0x09, 0x0a, - 0x6c, 0x3c, 0x3a, 0xa1, 0xbe, 0x9a, 0x15, 0x11, 0xa2, 0x15, 0xae, 0x43, 0xde, 0x21, 0x27, 0xd4, - 0x51, 0x73, 0x9b, 0xca, 0x56, 0x75, 0xe7, 0xde, 0x85, 0x54, 0xbd, 0xdd, 0xe6, 0x2e, 0xba, 0xf4, - 0xc4, 0x9f, 0x41, 0x2e, 0xba, 0xe2, 0x78, 0x84, 0xbb, 0x17, 0x8b, 0xc0, 0xb5, 0xa8, 0x0b, 0x3f, - 0x7c, 0x05, 0x4a, 0xfc, 0xaf, 0xac, 0x6d, 0x41, 0xe4, 0x5c, 0xe4, 0x00, 0xaf, 0x2b, 0xde, 0x80, - 0xa2, 0x90, 0x99, 0x45, 0xe3, 0xd6, 0x90, 0xac, 0xf9, 0xc1, 0x58, 0x74, 0x40, 0xc6, 0x4e, 0x68, - 0xbc, 0x24, 0xce, 0x98, 0x0a, 0xc1, 0x94, 0xf4, 0x4a, 0x04, 0xfe, 0x9c, 0x63, 0xf8, 0x3a, 0x94, - 0xa5, 0x2a, 0x6d, 0x66, 0xd1, 0xd7, 0xe2, 0xf6, 0xc9, 0xeb, 0x52, 0xa8, 0x2d, 0x8e, 0xf0, 0x9f, - 0x7f, 0x1e, 0xb8, 0x2c, 0x3e, 0x5a, 0xf1, 0x13, 0x1c, 0x10, 0x3f, 0xff, 0xf1, 0xec, 0xc5, 0x77, - 0x6d, 0xf1, 0xf6, 0x66, 0xb5, 0x58, 0xfb, 0x53, 0x06, 0x72, 0xe2, 0x79, 0x5b, 0x81, 0xf2, 0xf1, - 0x17, 0x5d, 0xcd, 0x68, 0x76, 0xfa, 0xfb, 0x6d, 0x0d, 0x29, 0xb8, 0x0a, 0x20, 0x80, 0x27, 0xed, - 0x4e, 0xfd, 0x18, 0x65, 0x92, 0x75, 0xeb, 0xe8, 0xf8, 0xf1, 0x43, 0x94, 0x4d, 0x1c, 0xfa, 0x12, - 0xc8, 0xa5, 0x09, 0x0f, 0x76, 0x50, 0x1e, 0x23, 0xa8, 0xc8, 0x00, 0xad, 0x67, 0x5a, 0xf3, 0xf1, - 0x43, 0x54, 0x98, 0x46, 0x1e, 0xec, 0xa0, 0x25, 0xbc, 0x0c, 0x25, 0x81, 0xec, 0x77, 0x3a, 0x6d, - 0x54, 0x4c, 0x62, 0xf6, 0x8e, 0xf5, 0xd6, 0xd1, 0x01, 0x2a, 0x25, 0x31, 0x0f, 0xf4, 0x4e, 0xbf, - 0x8b, 0x20, 0x89, 0x70, 0xa8, 0xf5, 0x7a, 0xf5, 0x03, 0x0d, 0x95, 0x13, 0xc6, 0xfe, 0x17, 0xc7, - 0x5a, 0x0f, 0x55, 0xa6, 0xd2, 0x7a, 0xb0, 0x83, 0x96, 0x93, 0x9f, 0xd0, 0x8e, 0xfa, 0x87, 0xa8, - 0x8a, 0x57, 0x61, 0x59, 0xfe, 0x44, 0x9c, 0xc4, 0xca, 0x0c, 0xf4, 0xf8, 0x21, 0x42, 0x93, 0x44, - 0x64, 0x94, 0xd5, 0x29, 0xe0, 0xf1, 0x43, 0x84, 0x6b, 0x0d, 0xc8, 0x0b, 0x75, 0x61, 0x0c, 0xd5, - 0x76, 0x7d, 0x5f, 0x6b, 0x1b, 0x9d, 0xee, 0x71, 0xab, 0x73, 0x54, 0x6f, 0x23, 0x65, 0x82, 0xe9, - 0xda, 0xcf, 0xfa, 0x2d, 0x5d, 0x6b, 0xa2, 0x4c, 0x1a, 0xeb, 0x6a, 0xf5, 0x63, 0xad, 0x89, 0xb2, - 0xb5, 0xbb, 0xb0, 0xbe, 0xe8, 0x9e, 0x59, 0xf4, 0x64, 0xd4, 0xbe, 0x56, 0x60, 0x6d, 0xc1, 0x95, - 0xb9, 0xf0, 0x29, 0xfa, 0x29, 0xe4, 0xa5, 0xd2, 0x64, 0x13, 0xb9, 0xb3, 0xf0, 0xee, 0x15, 0xba, - 0x9b, 0x6b, 0x24, 0xc2, 0x2f, 0xdd, 0x48, 0xb3, 0xe7, 0x34, 0x52, 0x1e, 0x62, 0x4e, 0x4e, 0xbf, - 0x52, 0x40, 0x3d, 0x2f, 0xf6, 0x5b, 0x9e, 0xf7, 0xcc, 0xd4, 0xf3, 0xfe, 0xe9, 0x6c, 0x02, 0x37, - 0xce, 0xdf, 0xc3, 0x5c, 0x16, 0xdf, 0x28, 0x70, 0x69, 0xf1, 0xbc, 0xb1, 0x30, 0x87, 0xcf, 0xa0, - 0x30, 0xa2, 0xe1, 0xa9, 0x1b, 0xf7, 0xdc, 0x0f, 0x17, 0xdc, 0xe4, 0xdc, 0x3c, 0x5b, 0xab, 0xc8, - 0x2b, 0xdd, 0x0a, 0xb2, 0xe7, 0x0d, 0x0d, 0x32, 0x9b, 0xb9, 0x4c, 0x7f, 0x9d, 0x81, 0x77, 0x17, - 0x06, 0x5f, 0x98, 0xe8, 0x35, 0x00, 0x9b, 0x79, 0xe3, 0x50, 0xf6, 0x55, 0x79, 0xcd, 0x94, 0x04, - 0x22, 0x1e, 0x61, 0x7e, 0x85, 0x8c, 0xc3, 0xc4, 0x9e, 0x15, 0x76, 0x90, 0x90, 0x20, 0xec, 0x4e, - 0x12, 0xcd, 0x89, 0x44, 0xdf, 0x3f, 0x67, 0xa7, 0x73, 0x2d, 0xeb, 0x23, 0x40, 0xa6, 0x63, 0x53, - 0x16, 0x1a, 0x41, 0xe8, 0x53, 0x32, 0xb2, 0xd9, 0x50, 0xdc, 0xa3, 0xc5, 0xbd, 0xfc, 0x80, 0x38, - 0x01, 0xd5, 0x57, 0xa4, 0xb9, 0x17, 0x5b, 0xb9, 0x87, 0x68, 0x16, 0x7e, 0xca, 0xa3, 0x30, 0xe5, - 0x21, 0xcd, 0x89, 0x47, 0xed, 0x6f, 0x4b, 0x50, 0x4e, 0x4d, 0x67, 0xf8, 0x06, 0x54, 0x9e, 0x93, - 0x97, 0xc4, 0x88, 0x27, 0x6e, 0x59, 0x89, 0x32, 0xc7, 0xba, 0xd1, 0xd4, 0xfd, 0x11, 0xac, 0x0b, - 0x8a, 0x3b, 0x0e, 0xa9, 0x6f, 0x98, 0x0e, 0x09, 0x02, 0x51, 0xb4, 0xa2, 0xa0, 0x62, 0x6e, 0xeb, - 0x70, 0x53, 0x23, 0xb6, 0xe0, 0x47, 0xb0, 0x26, 0x3c, 0x46, 0x63, 0x27, 0xb4, 0x3d, 0x87, 0x1a, - 0xfc, 0x1d, 0x20, 0x10, 0xf7, 0x69, 0x92, 0xd9, 0x2a, 0x67, 0x1c, 0x46, 0x04, 0x9e, 0x51, 0x80, - 0x0f, 0xe0, 0x9a, 0x70, 0x1b, 0x52, 0x46, 0x7d, 0x12, 0x52, 0x83, 0xfe, 0x72, 0x4c, 0x9c, 0xc0, - 0x20, 0xcc, 0x32, 0x4e, 0x49, 0x70, 0xaa, 0xae, 0xa7, 0x03, 0x5c, 0xe6, 0xdc, 0x83, 0x88, 0xaa, - 0x09, 0x66, 0x9d, 0x59, 0x9f, 0x93, 0xe0, 0x14, 0xef, 0xc1, 0x25, 0x11, 0x28, 0x08, 0x7d, 0x9b, - 0x0d, 0x0d, 0xf3, 0x94, 0x9a, 0x2f, 0x8c, 0x71, 0x38, 0xd8, 0x55, 0xaf, 0xa4, 0x23, 0x88, 0x24, - 0x7b, 0x82, 0xd3, 0xe0, 0x94, 0x7e, 0x38, 0xd8, 0xc5, 0x3d, 0xa8, 0xf0, 0xf3, 0x18, 0xd9, 0x5f, - 0x51, 0x63, 0xe0, 0xfa, 0xa2, 0x47, 0x54, 0x17, 0x3c, 0xdc, 0xa9, 0x22, 0x6e, 0x77, 0x22, 0x87, - 0x43, 0xd7, 0xa2, 0x7b, 0xf9, 0x5e, 0x57, 0xd3, 0x9a, 0x7a, 0x39, 0x8e, 0xf2, 0xc4, 0xf5, 0xb9, - 0xa6, 0x86, 0x6e, 0x52, 0xe3, 0xb2, 0xd4, 0xd4, 0xd0, 0x8d, 0x2b, 0xfc, 0x08, 0xd6, 0x4c, 0x53, - 0x6e, 0xdb, 0x36, 0x8d, 0x68, 0x58, 0x0f, 0x54, 0x34, 0x55, 0x2f, 0xd3, 0x3c, 0x90, 0x84, 0x48, - 0xe6, 0x01, 0xfe, 0x04, 0xde, 0x9d, 0xd4, 0x2b, 0xed, 0xb8, 0x3a, 0xb7, 0xcb, 0x59, 0xd7, 0x47, - 0xb0, 0xe6, 0x9d, 0xcd, 0x3b, 0xe2, 0xa9, 0x5f, 0xf4, 0xce, 0x66, 0xdd, 0x6e, 0x89, 0x17, 0x30, - 0x9f, 0x9a, 0x24, 0xa4, 0x96, 0xfa, 0x5e, 0x9a, 0x9d, 0x32, 0xe0, 0xfb, 0x80, 0x4c, 0xd3, 0xa0, - 0x8c, 0x9c, 0x38, 0xd4, 0x20, 0x3e, 0x65, 0x24, 0x50, 0xaf, 0xa7, 0xc9, 0x55, 0xd3, 0xd4, 0x84, - 0xb5, 0x2e, 0x8c, 0xf8, 0x2e, 0xac, 0xba, 0x27, 0xcf, 0x4d, 0x29, 0x2e, 0xc3, 0xf3, 0xe9, 0xc0, - 0x7e, 0xad, 0xde, 0x14, 0x65, 0x5a, 0xe1, 0x06, 0x21, 0xad, 0xae, 0x80, 0xf1, 0x1d, 0x40, 0x66, - 0x70, 0x4a, 0x7c, 0x4f, 0x34, 0xe9, 0xc0, 0x23, 0x26, 0x55, 0x6f, 0x49, 0xaa, 0xc4, 0x8f, 0x62, - 0x18, 0x6b, 0x70, 0x9d, 0x6f, 0x9e, 0x11, 0xe6, 0x1a, 0xe3, 0x80, 0x1a, 0x93, 0x14, 0x93, 0xb3, - 0xf8, 0x90, 0xa7, 0xa5, 0x5f, 0x8d, 0x69, 0xfd, 0x80, 0x36, 0x13, 0x52, 0x7c, 0x3c, 0xcf, 0x60, - 0x7d, 0xcc, 0x6c, 0x16, 0x52, 0xdf, 0xf3, 0x29, 0x77, 0x96, 0x0f, 0xac, 0xfa, 0xcf, 0xa5, 0x73, - 0x86, 0xee, 0x7e, 0x9a, 0x2d, 0x45, 0xa2, 0xaf, 0x8d, 0xe7, 0xc1, 0xda, 0x1e, 0x54, 0xd2, 0xda, - 0xc1, 0x25, 0x90, 0xea, 0x41, 0x0a, 0xef, 0xa8, 0x8d, 0x4e, 0x93, 0xf7, 0xc2, 0x2f, 0x35, 0x94, - 0xe1, 0x3d, 0xb9, 0xdd, 0x3a, 0xd6, 0x0c, 0xbd, 0x7f, 0x74, 0xdc, 0x3a, 0xd4, 0x50, 0xf6, 0x6e, - 0xa9, 0xf8, 0xaf, 0x25, 0xf4, 0xe6, 0xcd, 0x9b, 0x37, 0x99, 0xda, 0x5f, 0x32, 0x50, 0x9d, 0x9e, - 0x83, 0xf1, 0x4f, 0xe0, 0xbd, 0xf8, 0xa5, 0x35, 0xa0, 0xa1, 0xf1, 0xca, 0xf6, 0x85, 0x9c, 0x47, - 0x44, 0x4e, 0x92, 0xc9, 0x49, 0xac, 0x47, 0xac, 0x1e, 0x0d, 0x7f, 0x61, 0xfb, 0x5c, 0xac, 0x23, - 0x12, 0xe2, 0x36, 0x5c, 0x67, 0xae, 0x11, 0x84, 0x84, 0x59, 0xc4, 0xb7, 0x8c, 0xc9, 0xe7, 0x02, - 0x83, 0x98, 0x26, 0x0d, 0x02, 0x57, 0x76, 0x92, 0x24, 0xca, 0x55, 0xe6, 0xf6, 0x22, 0xf2, 0xe4, - 0x8a, 0xad, 0x47, 0xd4, 0x19, 0xd5, 0x64, 0xcf, 0x53, 0xcd, 0x15, 0x28, 0x8d, 0x88, 0x67, 0x50, - 0x16, 0xfa, 0x67, 0x62, 0x7a, 0x2b, 0xea, 0xc5, 0x11, 0xf1, 0x34, 0xbe, 0xfe, 0xfe, 0xce, 0x20, - 0x5d, 0xc7, 0xbf, 0x67, 0xa1, 0x92, 0x9e, 0xe0, 0xf8, 0x40, 0x6c, 0x8a, 0x6b, 0x5e, 0x11, 0xb7, - 0xc0, 0x07, 0xdf, 0x3a, 0xef, 0x6d, 0x37, 0xf8, 0xfd, 0xbf, 0x57, 0x90, 0x73, 0x95, 0x2e, 0x3d, - 0x79, 0xef, 0xe5, 0x5a, 0xa3, 0x72, 0x5a, 0x2f, 0xea, 0xd1, 0x0a, 0x1f, 0x40, 0xe1, 0x79, 0x20, - 0x62, 0x17, 0x44, 0xec, 0x9b, 0xdf, 0x1e, 0xfb, 0x69, 0x4f, 0x04, 0x2f, 0x3d, 0xed, 0x19, 0x47, - 0x1d, 0xfd, 0xb0, 0xde, 0xd6, 0x23, 0x77, 0x7c, 0x19, 0x72, 0x0e, 0xf9, 0xea, 0x6c, 0xba, 0x53, - 0x08, 0xe8, 0xa2, 0x85, 0xbf, 0x0c, 0xb9, 0x57, 0x94, 0xbc, 0x98, 0xbe, 0x9f, 0x05, 0xf4, 0x3d, - 0x4a, 0xff, 0x3e, 0xe4, 0x45, 0xbd, 0x30, 0x40, 0x54, 0x31, 0xf4, 0x0e, 0x2e, 0x42, 0xae, 0xd1, - 0xd1, 0xb9, 0xfc, 0x11, 0x54, 0x24, 0x6a, 0x74, 0x5b, 0x5a, 0x43, 0x43, 0x99, 0xda, 0x23, 0x28, - 0xc8, 0x22, 0xf0, 0x47, 0x23, 0x29, 0x03, 0x7a, 0x27, 0x5a, 0x46, 0x31, 0x94, 0xd8, 0xda, 0x3f, - 0xdc, 0xd7, 0x74, 0x94, 0x49, 0x1f, 0xef, 0x9f, 0x15, 0x28, 0xa7, 0x06, 0x2a, 0xde, 0xca, 0x89, - 0xe3, 0xb8, 0xaf, 0x0c, 0xe2, 0xd8, 0x24, 0x88, 0xce, 0x07, 0x04, 0x54, 0xe7, 0xc8, 0x45, 0xeb, - 0xf7, 0x3f, 0xd1, 0xe6, 0x1f, 0x14, 0x40, 0xb3, 0xc3, 0xd8, 0x4c, 0x82, 0xca, 0x0f, 0x9a, 0xe0, - 0xef, 0x15, 0xa8, 0x4e, 0x4f, 0x60, 0x33, 0xe9, 0xdd, 0xf8, 0x41, 0xd3, 0xfb, 0x9d, 0x02, 0xcb, - 0x53, 0x73, 0xd7, 0xff, 0x55, 0x76, 0xbf, 0xcd, 0xc2, 0xda, 0x02, 0x3f, 0x5c, 0x8f, 0x06, 0x54, - 0x39, 0x33, 0xff, 0xf8, 0x22, 0xbf, 0xb5, 0xcd, 0xfb, 0x5f, 0x97, 0xf8, 0x61, 0x34, 0xcf, 0xde, - 0x01, 0x64, 0x5b, 0x94, 0x85, 0xf6, 0xc0, 0xa6, 0x7e, 0xf4, 0x6e, 0x2c, 0xa7, 0xd6, 0x95, 0x09, - 0x2e, 0x5f, 0x8f, 0x7f, 0x04, 0xd8, 0x73, 0x03, 0x3b, 0xb4, 0x5f, 0x52, 0xc3, 0x66, 0xf1, 0x8b, - 0x34, 0x9f, 0x62, 0x73, 0x3a, 0x8a, 0x2d, 0x2d, 0x16, 0x26, 0x6c, 0x46, 0x87, 0x64, 0x86, 0xcd, - 0xaf, 0xa1, 0xac, 0x8e, 0x62, 0x4b, 0xc2, 0xbe, 0x01, 0x15, 0xcb, 0x1d, 0xf3, 0x81, 0x40, 0xf2, - 0xf8, 0xad, 0xa7, 0xe8, 0x65, 0x89, 0x25, 0x94, 0x68, 0x62, 0x9b, 0xbc, 0xc1, 0x57, 0xf4, 0xb2, - 0xc4, 0x24, 0xe5, 0x36, 0xac, 0x90, 0xe1, 0xd0, 0xe7, 0xc1, 0xe3, 0x40, 0x72, 0x0c, 0xad, 0x26, - 0xb0, 0x20, 0x6e, 0x3c, 0x85, 0x62, 0x5c, 0x07, 0xde, 0x58, 0x78, 0x25, 0x0c, 0x4f, 0x7e, 0x47, - 0xc9, 0xf0, 0x97, 0x7a, 0x16, 0x1b, 0x6f, 0x40, 0xc5, 0x0e, 0x8c, 0xc9, 0x07, 0xbd, 0xcc, 0x66, - 0x66, 0xab, 0xa8, 0x97, 0xed, 0x20, 0xf9, 0x82, 0x53, 0xfb, 0x26, 0x03, 0xd5, 0xe9, 0x0f, 0x92, - 0xb8, 0x09, 0x45, 0xc7, 0x35, 0x89, 0x10, 0x82, 0xfc, 0x1a, 0xbe, 0xf5, 0x96, 0x6f, 0x98, 0xdb, - 0xed, 0x88, 0xaf, 0x27, 0x9e, 0x1b, 0x7f, 0x55, 0xa0, 0x18, 0xc3, 0xf8, 0x12, 0xe4, 0x3c, 0x12, - 0x9e, 0x8a, 0x70, 0xf9, 0xfd, 0x0c, 0x52, 0x74, 0xb1, 0xe6, 0x78, 0xe0, 0x11, 0x26, 0x24, 0x10, - 0xe1, 0x7c, 0xcd, 0xcf, 0xd5, 0xa1, 0xc4, 0x12, 0x03, 0xae, 0x3b, 0x1a, 0x51, 0x16, 0x06, 0xf1, - 0xb9, 0x46, 0x78, 0x23, 0x82, 0xf1, 0x3d, 0x58, 0x0d, 0x7d, 0x62, 0x3b, 0x53, 0xdc, 0x9c, 0xe0, - 0xa2, 0xd8, 0x90, 0x90, 0xf7, 0xe0, 0x72, 0x1c, 0xd7, 0xa2, 0x21, 0x31, 0x4f, 0xa9, 0x35, 0x71, - 0x2a, 0x88, 0xaf, 0x5d, 0xef, 0x45, 0x84, 0x66, 0x64, 0x8f, 0x7d, 0xf7, 0x9f, 0xc1, 0x9a, 0xe9, - 0x8e, 0x66, 0x2b, 0xb1, 0x8f, 0x66, 0xde, 0xbb, 0x82, 0xcf, 0x95, 0x2f, 0x61, 0x32, 0x54, 0x7c, - 0x9d, 0xc9, 0x1e, 0x74, 0xf7, 0xff, 0x98, 0xd9, 0x38, 0x90, 0x7e, 0xdd, 0xb8, 0x82, 0x3a, 0x1d, - 0x38, 0xd4, 0xe4, 0xd5, 0xf9, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x78, 0x42, 0x69, 0x71, 0xb3, - 0x18, 0x00, 0x00, + // 2379 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x73, 0xdb, 0xc6, + 0x15, 0x37, 0xc1, 0x0f, 0x91, 0x8f, 0x14, 0xb5, 0x5a, 0x29, 0x36, 0x2c, 0xc7, 0xb1, 0xcc, 0xd8, + 0xb5, 0x6c, 0xb7, 0x74, 0x46, 0xfe, 0x88, 0xa3, 0x74, 0xd2, 0xa1, 0x48, 0x58, 0xa1, 0x4b, 0x91, + 0x2c, 0x48, 0x35, 0x76, 0x2e, 0x98, 0x15, 0xb0, 0xa4, 0x60, 0x83, 0x00, 0x02, 0x80, 0xb6, 0x95, + 0x93, 0x67, 0x7a, 0xea, 0x7f, 0xd0, 0xe9, 0x74, 0x7a, 0xc8, 0x25, 0x33, 0xed, 0xbd, 0x87, 0xde, + 0x7b, 0xed, 0x4c, 0xef, 0x3d, 0xf4, 0xd0, 0x99, 0xf6, 0x3f, 0xe8, 0xb5, 0xb3, 0xbb, 0x00, 0x08, + 0x7e, 0xd9, 0x4a, 0x66, 0x9c, 0x9c, 0xa4, 0xfd, 0xbd, 0xdf, 0x7b, 0x78, 0xfb, 0xf6, 0x87, 0xdd, + 0x87, 0x25, 0x20, 0x83, 0xfa, 0xba, 0x67, 0xba, 0x81, 0xe3, 0x55, 0x5d, 0xcf, 0x09, 0x1c, 0xbc, + 0x36, 0x74, 0x9c, 0xa1, 0x45, 0xc5, 0xe8, 0x78, 0x3c, 0xa8, 0x1c, 0xc2, 0xfa, 0x23, 0xd3, 0xa2, + 0x8d, 0x98, 0xd8, 0xa3, 0x01, 0x7e, 0x08, 0x99, 0x81, 0x69, 0x51, 0x39, 0xb5, 0x9d, 0xde, 0x29, + 0xee, 0x5e, 0xab, 0xce, 0x38, 0x55, 0xa7, 0x3d, 0xba, 0x0c, 0x56, 0xb9, 0x47, 0xe5, 0xdf, 0x19, + 0xd8, 0x58, 0x60, 0xc5, 0x18, 0x32, 0x36, 0x19, 0xb1, 0x88, 0xa9, 0x9d, 0x82, 0xca, 0xff, 0xc7, + 0x32, 0xac, 0xb8, 0x44, 0x7f, 0x4e, 0x86, 0x54, 0x96, 0x38, 0x1c, 0x0d, 0xf1, 0x07, 0x00, 0x06, + 0x75, 0xa9, 0x6d, 0x50, 0x5b, 0x3f, 0x95, 0xd3, 0xdb, 0xe9, 0x9d, 0x82, 0x9a, 0x40, 0xf0, 0x6d, + 0x58, 0x77, 0xc7, 0xc7, 0x96, 0xa9, 0x6b, 0x09, 0x1a, 0x6c, 0xa7, 0x77, 0xb2, 0x2a, 0x12, 0x86, + 0xc6, 0x84, 0x7c, 0x03, 0xd6, 0x5e, 0x52, 0xf2, 0x3c, 0x49, 0x2d, 0x72, 0x6a, 0x99, 0xc1, 0x09, + 0x62, 0x1d, 0x4a, 0x23, 0xea, 0xfb, 0x64, 0x48, 0xb5, 0xe0, 0xd4, 0xa5, 0x72, 0x86, 0xcf, 0x7e, + 0x7b, 0x6e, 0xf6, 0xb3, 0x33, 0x2f, 0x86, 0x5e, 0xfd, 0x53, 0x97, 0xe2, 0x1a, 0x14, 0xa8, 0x3d, + 0x1e, 0x89, 0x08, 0xd9, 0x25, 0xf5, 0x53, 0xec, 0xf1, 0x68, 0x36, 0x4a, 0x9e, 0xb9, 0x85, 0x21, + 0x56, 0x7c, 0xea, 0xbd, 0x30, 0x75, 0x2a, 0xe7, 0x78, 0x80, 0x1b, 0x73, 0x01, 0x7a, 0xc2, 0x3e, + 0x1b, 0x23, 0xf2, 0xc3, 0x75, 0x28, 0xd0, 0x57, 0x01, 0xb5, 0x7d, 0xd3, 0xb1, 0xe5, 0x15, 0x1e, + 0xe4, 0xfa, 0x82, 0x55, 0xa4, 0x96, 0x31, 0x1b, 0x62, 0xe2, 0x87, 0x1f, 0xc0, 0x8a, 0xe3, 0x06, + 0xa6, 0x63, 0xfb, 0x72, 0x7e, 0x3b, 0xb5, 0x53, 0xdc, 0x7d, 0x7f, 0xa1, 0x10, 0x3a, 0x82, 0xa3, + 0x46, 0x64, 0xdc, 0x04, 0xe4, 0x3b, 0x63, 0x4f, 0xa7, 0x9a, 0xee, 0x18, 0x54, 0x33, 0xed, 0x81, + 0x23, 0x17, 0x78, 0x80, 0x2b, 0xf3, 0x13, 0xe1, 0xc4, 0xba, 0x63, 0xd0, 0xa6, 0x3d, 0x70, 0xd4, + 0xb2, 0x3f, 0x35, 0xc6, 0xe7, 0x21, 0xe7, 0x9f, 0xda, 0x01, 0x79, 0x25, 0x97, 0xb8, 0x42, 0xc2, + 0x51, 0xe5, 0x7f, 0x59, 0x58, 0x3b, 0x8b, 0xc4, 0x3e, 0x85, 0xec, 0x80, 0xcd, 0x52, 0x96, 0xbe, + 0x4b, 0x0d, 0x84, 0xcf, 0x74, 0x11, 0x73, 0xdf, 0xb3, 0x88, 0x35, 0x28, 0xda, 0xd4, 0x0f, 0xa8, + 0x21, 0x14, 0x91, 0x3e, 0xa3, 0xa6, 0x40, 0x38, 0xcd, 0x4b, 0x2a, 0xf3, 0xbd, 0x24, 0xf5, 0x04, + 0xd6, 0xe2, 0x94, 0x34, 0x8f, 0xd8, 0xc3, 0x48, 0x9b, 0x77, 0xde, 0x96, 0x49, 0x55, 0x89, 0xfc, + 0x54, 0xe6, 0xa6, 0x96, 0xe9, 0xd4, 0x18, 0x37, 0x00, 0x1c, 0x9b, 0x3a, 0x03, 0xcd, 0xa0, 0xba, + 0x25, 0xe7, 0x97, 0x54, 0xa9, 0xc3, 0x28, 0x73, 0x55, 0x72, 0x04, 0xaa, 0x5b, 0xf8, 0x93, 0x89, + 0xd4, 0x56, 0x96, 0x28, 0xe5, 0x50, 0xbc, 0x64, 0x73, 0x6a, 0x3b, 0x82, 0xb2, 0x47, 0x99, 0xee, + 0xa9, 0x11, 0xce, 0xac, 0xc0, 0x93, 0xa8, 0xbe, 0x75, 0x66, 0x6a, 0xe8, 0x26, 0x26, 0xb6, 0xea, + 0x25, 0x87, 0xf8, 0x43, 0x88, 0x01, 0x8d, 0xcb, 0x0a, 0xf8, 0x2e, 0x54, 0x8a, 0xc0, 0x36, 0x19, + 0xd1, 0xad, 0x87, 0x50, 0x9e, 0x2e, 0x0f, 0xde, 0x84, 0xac, 0x1f, 0x10, 0x2f, 0xe0, 0x2a, 0xcc, + 0xaa, 0x62, 0x80, 0x11, 0xa4, 0xa9, 0x6d, 0xf0, 0x5d, 0x2e, 0xab, 0xb2, 0x7f, 0xb7, 0x3e, 0x86, + 0xd5, 0xa9, 0xc7, 0x9f, 0xd5, 0xb1, 0xf2, 0xbb, 0x1c, 0x6c, 0x2e, 0xd2, 0xdc, 0x42, 0xf9, 0x9f, + 0x87, 0x9c, 0x3d, 0x1e, 0x1d, 0x53, 0x4f, 0x4e, 0xf3, 0x08, 0xe1, 0x08, 0xd7, 0x20, 0x6b, 0x91, + 0x63, 0x6a, 0xc9, 0x99, 0xed, 0xd4, 0x4e, 0x79, 0xf7, 0xf6, 0x99, 0x54, 0x5d, 0x6d, 0x31, 0x17, + 0x55, 0x78, 0xe2, 0xcf, 0x20, 0x13, 0x6e, 0x71, 0x2c, 0xc2, 0xad, 0xb3, 0x45, 0x60, 0x5a, 0x54, + 0xb9, 0x1f, 0xbe, 0x04, 0x05, 0xf6, 0x57, 0xd4, 0x36, 0xc7, 0x73, 0xce, 0x33, 0x80, 0xd5, 0x15, + 0x6f, 0x41, 0x9e, 0xcb, 0xcc, 0xa0, 0xd1, 0xd1, 0x10, 0x8f, 0xd9, 0xc2, 0x18, 0x74, 0x40, 0xc6, + 0x56, 0xa0, 0xbd, 0x20, 0xd6, 0x98, 0x72, 0xc1, 0x14, 0xd4, 0x52, 0x08, 0xfe, 0x9a, 0x61, 0xf8, + 0x0a, 0x14, 0x85, 0x2a, 0x4d, 0xdb, 0xa0, 0xaf, 0xf8, 0xee, 0x93, 0x55, 0x85, 0x50, 0x9b, 0x0c, + 0x61, 0x8f, 0x7f, 0xe6, 0x3b, 0x76, 0xb4, 0xb4, 0xfc, 0x11, 0x0c, 0xe0, 0x8f, 0xff, 0x78, 0x76, + 0xe3, 0xbb, 0xbc, 0x78, 0x7a, 0xb3, 0x5a, 0xac, 0xfc, 0x45, 0x82, 0x0c, 0x7f, 0xdf, 0xd6, 0xa0, + 0xd8, 0x7f, 0xda, 0x55, 0xb4, 0x46, 0xe7, 0x68, 0xbf, 0xa5, 0xa0, 0x14, 0x2e, 0x03, 0x70, 0xe0, + 0x51, 0xab, 0x53, 0xeb, 0x23, 0x29, 0x1e, 0x37, 0xdb, 0xfd, 0x07, 0xf7, 0x50, 0x3a, 0x76, 0x38, + 0x12, 0x40, 0x26, 0x49, 0xb8, 0xbb, 0x8b, 0xb2, 0x18, 0x41, 0x49, 0x04, 0x68, 0x3e, 0x51, 0x1a, + 0x0f, 0xee, 0xa1, 0xdc, 0x34, 0x72, 0x77, 0x17, 0xad, 0xe0, 0x55, 0x28, 0x70, 0x64, 0xbf, 0xd3, + 0x69, 0xa1, 0x7c, 0x1c, 0xb3, 0xd7, 0x57, 0x9b, 0xed, 0x03, 0x54, 0x88, 0x63, 0x1e, 0xa8, 0x9d, + 0xa3, 0x2e, 0x82, 0x38, 0xc2, 0xa1, 0xd2, 0xeb, 0xd5, 0x0e, 0x14, 0x54, 0x8c, 0x19, 0xfb, 0x4f, + 0xfb, 0x4a, 0x0f, 0x95, 0xa6, 0xd2, 0xba, 0xbb, 0x8b, 0x56, 0xe3, 0x47, 0x28, 0xed, 0xa3, 0x43, + 0x54, 0xc6, 0xeb, 0xb0, 0x2a, 0x1e, 0x11, 0x25, 0xb1, 0x36, 0x03, 0x3d, 0xb8, 0x87, 0xd0, 0x24, + 0x11, 0x11, 0x65, 0x7d, 0x0a, 0x78, 0x70, 0x0f, 0xe1, 0x4a, 0x1d, 0xb2, 0x5c, 0x5d, 0x18, 0x43, + 0xb9, 0x55, 0xdb, 0x57, 0x5a, 0x5a, 0xa7, 0xdb, 0x6f, 0x76, 0xda, 0xb5, 0x16, 0x4a, 0x4d, 0x30, + 0x55, 0xf9, 0xd5, 0x51, 0x53, 0x55, 0x1a, 0x48, 0x4a, 0x62, 0x5d, 0xa5, 0xd6, 0x57, 0x1a, 0x28, + 0x5d, 0xd1, 0x61, 0x73, 0xd1, 0x3e, 0xb3, 0xf0, 0xcd, 0x48, 0x2c, 0xb1, 0xb4, 0x64, 0x89, 0x79, + 0xac, 0xb9, 0x25, 0xfe, 0x26, 0x05, 0x1b, 0x0b, 0xf6, 0xda, 0x85, 0x0f, 0xf9, 0x05, 0x64, 0x85, + 0x44, 0xc5, 0xe9, 0x73, 0x73, 0xe1, 0xa6, 0xcd, 0x05, 0x3b, 0x77, 0x02, 0x71, 0xbf, 0xe4, 0x09, + 0x9c, 0x5e, 0x72, 0x02, 0xb3, 0x10, 0x73, 0x49, 0xfe, 0x26, 0x05, 0xf2, 0xb2, 0xd8, 0x6f, 0xd9, + 0x28, 0xa4, 0xa9, 0x8d, 0xe2, 0xd3, 0xd9, 0x04, 0xae, 0x2e, 0x9f, 0xc3, 0x5c, 0x16, 0xdf, 0xa6, + 0xe0, 0xfc, 0xe2, 0x46, 0x65, 0x61, 0x0e, 0x9f, 0x41, 0x6e, 0x44, 0x83, 0x13, 0x27, 0x3a, 0xac, + 0x7f, 0xb2, 0xe0, 0x08, 0x60, 0xe6, 0xd9, 0x5a, 0x85, 0x5e, 0xc9, 0x33, 0x24, 0xbd, 0xac, 0xdb, + 0x10, 0xd9, 0xcc, 0x65, 0xfa, 0x5b, 0x09, 0xde, 0x5b, 0x18, 0x7c, 0x61, 0xa2, 0x97, 0x01, 0x4c, + 0xdb, 0x1d, 0x07, 0xe2, 0x40, 0x16, 0xfb, 0x53, 0x81, 0x23, 0xfc, 0xdd, 0x67, 0x7b, 0xcf, 0x38, + 0x88, 0xed, 0x69, 0x6e, 0x07, 0x01, 0x71, 0xc2, 0xc3, 0x49, 0xa2, 0x19, 0x9e, 0xe8, 0x07, 0x4b, + 0x66, 0x3a, 0x77, 0xd6, 0x7d, 0x04, 0x48, 0xb7, 0x4c, 0x6a, 0x07, 0x9a, 0x1f, 0x78, 0x94, 0x8c, + 0x4c, 0x7b, 0xc8, 0x37, 0xe0, 0xfc, 0x5e, 0x76, 0x40, 0x2c, 0x9f, 0xaa, 0x6b, 0xc2, 0xdc, 0x8b, + 0xac, 0xcc, 0x83, 0x9f, 0x32, 0x5e, 0xc2, 0x23, 0x37, 0xe5, 0x21, 0xcc, 0xb1, 0x47, 0xe5, 0xcf, + 0x2b, 0x50, 0x4c, 0xb4, 0x75, 0xf8, 0x2a, 0x94, 0x9e, 0x91, 0x17, 0x44, 0x8b, 0x5a, 0x75, 0x51, + 0x89, 0x22, 0xc3, 0xba, 0x61, 0xbb, 0xfe, 0x11, 0x6c, 0x72, 0x8a, 0x33, 0x0e, 0xa8, 0xa7, 0xe9, + 0x16, 0xf1, 0x7d, 0x5e, 0xb4, 0x3c, 0xa7, 0x62, 0x66, 0xeb, 0x30, 0x53, 0x3d, 0xb2, 0xe0, 0xfb, + 0xb0, 0xc1, 0x3d, 0x46, 0x63, 0x2b, 0x30, 0x5d, 0x8b, 0x6a, 0xec, 0xe3, 0xc1, 0xe7, 0x1b, 0x71, + 0x9c, 0xd9, 0x3a, 0x63, 0x1c, 0x86, 0x04, 0x96, 0x91, 0x8f, 0x1b, 0x70, 0x99, 0xbb, 0x0d, 0xa9, + 0x4d, 0x3d, 0x12, 0x50, 0x8d, 0x7e, 0x35, 0x26, 0x96, 0xaf, 0x11, 0xdb, 0xd0, 0x4e, 0x88, 0x7f, + 0x22, 0x6f, 0xb2, 0x00, 0xfb, 0x92, 0x9c, 0x52, 0x2f, 0x32, 0xe2, 0x41, 0xc8, 0x53, 0x38, 0xad, + 0x66, 0x1b, 0x9f, 0x13, 0xff, 0x04, 0xef, 0xc1, 0x79, 0x1e, 0xc5, 0x0f, 0x3c, 0xd3, 0x1e, 0x6a, + 0xfa, 0x09, 0xd5, 0x9f, 0x6b, 0xe3, 0x60, 0xf0, 0x50, 0xbe, 0x94, 0x7c, 0x3e, 0xcf, 0xb0, 0xc7, + 0x39, 0x75, 0x46, 0x39, 0x0a, 0x06, 0x0f, 0x71, 0x0f, 0x4a, 0x6c, 0x31, 0x46, 0xe6, 0xd7, 0x54, + 0x1b, 0x38, 0x1e, 0x3f, 0x59, 0xca, 0x0b, 0xde, 0xec, 0x44, 0x05, 0xab, 0x9d, 0xd0, 0xe1, 0xd0, + 0x31, 0xe8, 0x5e, 0xb6, 0xd7, 0x55, 0x94, 0x86, 0x5a, 0x8c, 0xa2, 0x3c, 0x72, 0x3c, 0x26, 0xa8, + 0xa1, 0x13, 0x17, 0xb8, 0x28, 0x04, 0x35, 0x74, 0xa2, 0xf2, 0xde, 0x87, 0x0d, 0x5d, 0x17, 0x73, + 0x36, 0x75, 0x2d, 0x6c, 0xf1, 0x7d, 0x19, 0x4d, 0x15, 0x4b, 0xd7, 0x0f, 0x04, 0x21, 0xd4, 0xb8, + 0x8f, 0x3f, 0x81, 0xf7, 0x26, 0xc5, 0x4a, 0x3a, 0xae, 0xcf, 0xcd, 0x72, 0xd6, 0xf5, 0x3e, 0x6c, + 0xb8, 0xa7, 0xf3, 0x8e, 0x78, 0xea, 0x89, 0xee, 0xe9, 0xac, 0xdb, 0x75, 0xfe, 0xd9, 0xe6, 0x51, + 0x9d, 0x04, 0xd4, 0x90, 0x2f, 0x24, 0xd9, 0x09, 0x03, 0xbe, 0x03, 0x48, 0xd7, 0x35, 0x6a, 0x93, + 0x63, 0x8b, 0x6a, 0xc4, 0xa3, 0x36, 0xf1, 0xe5, 0x2b, 0x49, 0x72, 0x59, 0xd7, 0x15, 0x6e, 0xad, + 0x71, 0x23, 0xbe, 0x05, 0xeb, 0xce, 0xf1, 0x33, 0x5d, 0x28, 0x4b, 0x73, 0x3d, 0x3a, 0x30, 0x5f, + 0xc9, 0xd7, 0x78, 0x99, 0xd6, 0x98, 0x81, 0xeb, 0xaa, 0xcb, 0x61, 0x7c, 0x13, 0x90, 0xee, 0x9f, + 0x10, 0xcf, 0xe5, 0x47, 0xbb, 0xef, 0x12, 0x9d, 0xca, 0xd7, 0x05, 0x55, 0xe0, 0xed, 0x08, 0x66, + 0xca, 0xf6, 0x5f, 0x9a, 0x83, 0x20, 0x8a, 0x78, 0x43, 0x28, 0x9b, 0x63, 0x61, 0xb4, 0x27, 0xb0, + 0x39, 0xb6, 0x4d, 0x3b, 0xa0, 0x9e, 0xeb, 0x51, 0xd6, 0xc4, 0x8b, 0x37, 0x51, 0xfe, 0xcf, 0xca, + 0x92, 0x36, 0xfc, 0x28, 0xc9, 0x16, 0x02, 0x50, 0x37, 0xc6, 0xf3, 0x60, 0x65, 0x0f, 0x4a, 0x49, + 0x5d, 0xe0, 0x02, 0x08, 0x65, 0xa0, 0x14, 0x3b, 0x63, 0xeb, 0x9d, 0x06, 0x3b, 0x1d, 0xbf, 0x54, + 0x90, 0xc4, 0x4e, 0xe9, 0x56, 0xb3, 0xaf, 0x68, 0xea, 0x51, 0xbb, 0xdf, 0x3c, 0x54, 0x50, 0xfa, + 0x56, 0x21, 0xff, 0xdf, 0x15, 0xf4, 0xfa, 0xf5, 0xeb, 0xd7, 0x52, 0xe5, 0x6f, 0x12, 0x94, 0xa7, + 0x3b, 0x63, 0xfc, 0x73, 0xb8, 0x10, 0x7d, 0xc6, 0xfa, 0x34, 0xd0, 0x5e, 0x9a, 0x1e, 0x97, 0xea, + 0x88, 0x88, 0xde, 0x32, 0xae, 0xf2, 0x66, 0xc8, 0xea, 0xd1, 0xe0, 0x0b, 0xd3, 0x63, 0x42, 0x1c, + 0x91, 0x00, 0xb7, 0xe0, 0x8a, 0xed, 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0xb9, 0x40, + 0xd0, 0x88, 0xae, 0x53, 0xdf, 0x77, 0xc4, 0x11, 0x11, 0x47, 0x79, 0xdf, 0x76, 0x7a, 0x21, 0x79, + 0xb2, 0x77, 0xd6, 0x42, 0xea, 0x8c, 0x22, 0xd2, 0xcb, 0x14, 0x71, 0x09, 0x0a, 0x23, 0xe2, 0x6a, + 0xd4, 0x0e, 0xbc, 0x53, 0xde, 0xcf, 0xe5, 0xd5, 0xfc, 0x88, 0xb8, 0x0a, 0x1b, 0xbf, 0xbb, 0x35, + 0x48, 0xd6, 0xf1, 0x9f, 0x69, 0x28, 0x25, 0x7b, 0x3a, 0xd6, 0x22, 0xeb, 0x7c, 0xff, 0x4e, 0xf1, + 0x37, 0xfc, 0xc3, 0x37, 0x76, 0x80, 0xd5, 0x3a, 0xdb, 0xd8, 0xf7, 0x72, 0xa2, 0xd3, 0x52, 0x85, + 0x27, 0x3b, 0x54, 0xd9, 0x3b, 0x4d, 0x45, 0xff, 0x9e, 0x57, 0xc3, 0x11, 0x3e, 0x80, 0xdc, 0x33, + 0x9f, 0xc7, 0xce, 0xf1, 0xd8, 0xd7, 0xde, 0x1c, 0xfb, 0x71, 0x8f, 0x07, 0x2f, 0x3c, 0xee, 0x69, + 0xed, 0x8e, 0x7a, 0x58, 0x6b, 0xa9, 0xa1, 0x3b, 0xbe, 0x08, 0x19, 0x8b, 0x7c, 0x7d, 0x3a, 0x7d, + 0x04, 0x70, 0xe8, 0xac, 0x85, 0xbf, 0x08, 0x99, 0x97, 0x94, 0x3c, 0x9f, 0xde, 0x78, 0x39, 0xf4, + 0x0e, 0xa5, 0x7f, 0x07, 0xb2, 0xbc, 0x5e, 0x18, 0x20, 0xac, 0x18, 0x3a, 0x87, 0xf3, 0x90, 0xa9, + 0x77, 0x54, 0x26, 0x7f, 0x04, 0x25, 0x81, 0x6a, 0xdd, 0xa6, 0x52, 0x57, 0x90, 0x54, 0xb9, 0x0f, + 0x39, 0x51, 0x04, 0xf6, 0x6a, 0xc4, 0x65, 0x40, 0xe7, 0xc2, 0x61, 0x18, 0x23, 0x15, 0x59, 0x8f, + 0x0e, 0xf7, 0x15, 0x15, 0x49, 0xc9, 0xe5, 0xf5, 0xa1, 0x94, 0x6c, 0xe7, 0x7e, 0x18, 0x4d, 0xfd, + 0x35, 0x05, 0xc5, 0x44, 0x7b, 0xc6, 0x1a, 0x03, 0x62, 0x59, 0xce, 0x4b, 0x8d, 0x58, 0x26, 0xf1, + 0x43, 0x51, 0x00, 0x87, 0x6a, 0x0c, 0x39, 0xeb, 0xa2, 0xfd, 0x20, 0xc9, 0xff, 0x31, 0x05, 0x68, + 0xb6, 0xb5, 0x9b, 0x49, 0x30, 0xf5, 0xa3, 0x26, 0xf8, 0x87, 0x14, 0x94, 0xa7, 0xfb, 0xb9, 0x99, + 0xf4, 0xae, 0xfe, 0xa8, 0xe9, 0xfd, 0x4b, 0x82, 0xd5, 0xa9, 0x2e, 0xee, 0xac, 0xd9, 0x7d, 0x05, + 0xeb, 0xa6, 0x41, 0x47, 0xae, 0x13, 0x50, 0x5b, 0x3f, 0xd5, 0x2c, 0xfa, 0x82, 0x5a, 0x72, 0x85, + 0x6f, 0x14, 0x77, 0xde, 0xdc, 0x27, 0x56, 0x9b, 0x13, 0xbf, 0x16, 0x73, 0xdb, 0xdb, 0x68, 0x36, + 0x94, 0xc3, 0x6e, 0xa7, 0xaf, 0xb4, 0xeb, 0x4f, 0xb5, 0xa3, 0xf6, 0x2f, 0xdb, 0x9d, 0x2f, 0xda, + 0x2a, 0x32, 0x67, 0x68, 0xef, 0xf0, 0x55, 0xef, 0x02, 0x9a, 0x4d, 0x0a, 0x5f, 0x80, 0x45, 0x69, + 0xa1, 0x73, 0x78, 0x03, 0xd6, 0xda, 0x1d, 0xad, 0xd7, 0x6c, 0x28, 0x9a, 0xf2, 0xe8, 0x91, 0x52, + 0xef, 0xf7, 0xc4, 0x87, 0x73, 0xcc, 0xee, 0x4f, 0xbf, 0xd4, 0xbf, 0x4f, 0xc3, 0xc6, 0x82, 0x4c, + 0x70, 0x2d, 0xec, 0xd9, 0xc5, 0x67, 0xc4, 0xcf, 0xce, 0x92, 0x7d, 0x95, 0x75, 0x05, 0x5d, 0xe2, + 0x05, 0x61, 0x8b, 0x7f, 0x13, 0x58, 0x95, 0xec, 0xc0, 0x1c, 0x98, 0xd4, 0x0b, 0xef, 0x19, 0x44, + 0x23, 0xbf, 0x36, 0xc1, 0xc5, 0x55, 0xc3, 0x4f, 0x01, 0xbb, 0x8e, 0x6f, 0x06, 0xe6, 0x0b, 0xaa, + 0x99, 0x76, 0x74, 0x29, 0xc1, 0x1a, 0xfb, 0x8c, 0x8a, 0x22, 0x4b, 0xd3, 0x0e, 0x62, 0xb6, 0x4d, + 0x87, 0x64, 0x86, 0xcd, 0x36, 0xf0, 0xb4, 0x8a, 0x22, 0x4b, 0xcc, 0xbe, 0x0a, 0x25, 0xc3, 0x19, + 0xb3, 0x36, 0x49, 0xf0, 0xd8, 0x79, 0x91, 0x52, 0x8b, 0x02, 0x8b, 0x29, 0x61, 0x1f, 0x3b, 0xb9, + 0x0d, 0x29, 0xa9, 0x45, 0x81, 0x09, 0xca, 0x0d, 0x58, 0x23, 0xc3, 0xa1, 0xc7, 0x82, 0x47, 0x81, + 0x44, 0x67, 0x5e, 0x8e, 0x61, 0x4e, 0xdc, 0x7a, 0x0c, 0xf9, 0xa8, 0x0e, 0xec, 0x48, 0x66, 0x95, + 0xd0, 0x5c, 0x71, 0x27, 0x25, 0xed, 0x14, 0xd4, 0xbc, 0x1d, 0x19, 0xaf, 0x42, 0xc9, 0xf4, 0xb5, + 0xc9, 0xe5, 0xa8, 0xb4, 0x2d, 0xed, 0xe4, 0xd5, 0xa2, 0xe9, 0xc7, 0xb7, 0x61, 0x95, 0x6f, 0x25, + 0x28, 0x4f, 0x5f, 0xee, 0xe2, 0x06, 0xe4, 0x2d, 0x47, 0x27, 0x5c, 0x5a, 0xe2, 0x97, 0x85, 0x9d, + 0xb7, 0xdc, 0x07, 0x57, 0x5b, 0x21, 0x5f, 0x8d, 0x3d, 0xb7, 0xfe, 0x9e, 0x82, 0x7c, 0x04, 0xe3, + 0xf3, 0x90, 0x71, 0x49, 0x70, 0xc2, 0xc3, 0x65, 0xf7, 0x25, 0x94, 0x52, 0xf9, 0x98, 0xe1, 0xbe, + 0x4b, 0x6c, 0x2e, 0x81, 0x10, 0x67, 0x63, 0xb6, 0xae, 0x16, 0x25, 0x06, 0x6f, 0xfb, 0x9d, 0xd1, + 0x88, 0xda, 0x81, 0x1f, 0xad, 0x6b, 0x88, 0xd7, 0x43, 0x18, 0xdf, 0x86, 0xf5, 0xc0, 0x23, 0xa6, + 0x35, 0xc5, 0xcd, 0x70, 0x2e, 0x8a, 0x0c, 0x31, 0x79, 0x0f, 0x2e, 0x46, 0x71, 0x0d, 0x1a, 0x10, + 0xfd, 0x84, 0x1a, 0x13, 0xa7, 0x1c, 0xbf, 0x39, 0xbc, 0x10, 0x12, 0x1a, 0xa1, 0x3d, 0xf2, 0xad, + 0xfc, 0x23, 0x05, 0xeb, 0xd1, 0x87, 0x8a, 0x11, 0x17, 0xeb, 0x10, 0x80, 0xd8, 0xb6, 0x13, 0x24, + 0xcb, 0x35, 0x2f, 0xe5, 0x39, 0xbf, 0x6a, 0x2d, 0x76, 0x52, 0x13, 0x01, 0xb6, 0x46, 0x00, 0x13, + 0xcb, 0xd2, 0xb2, 0x5d, 0x81, 0x62, 0x78, 0x73, 0xcf, 0x7f, 0xfe, 0x11, 0x9f, 0xb6, 0x20, 0x20, + 0xf6, 0x45, 0x83, 0x37, 0x21, 0x7b, 0x4c, 0x87, 0xa6, 0x1d, 0xde, 0x27, 0x8a, 0x41, 0x74, 0x4b, + 0x99, 0x89, 0x6f, 0x29, 0xf7, 0x9f, 0xc0, 0x86, 0xee, 0x8c, 0x66, 0xd3, 0xdd, 0x47, 0x33, 0x9f, + 0xd7, 0xfe, 0xe7, 0xa9, 0x2f, 0x61, 0xd2, 0x62, 0x7e, 0x23, 0xa5, 0x0f, 0xba, 0xfb, 0x7f, 0x92, + 0xb6, 0x0e, 0x84, 0x5f, 0x37, 0x9a, 0xa6, 0x4a, 0x07, 0x16, 0xd5, 0x59, 0xea, 0xff, 0x0f, 0x00, + 0x00, 0xff, 0xff, 0xa0, 0xbf, 0x63, 0x15, 0xd3, 0x1a, 0x00, 0x00, } diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go index b175f555..bac9913e 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -21,12 +21,14 @@ It has these top-level messages: FileOptions MessageOptions FieldOptions + OneofOptions EnumOptions EnumValueOptions ServiceOptions MethodOptions UninterpretedOption SourceCodeInfo + GeneratedCodeInfo */ package descriptor @@ -231,11 +233,14 @@ func (this *OneofDescriptorProto) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 5) + s := make([]string, 0, 6) s = append(s, "&descriptor.OneofDescriptorProto{") if this.Name != nil { s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } @@ -383,8 +388,8 @@ func (this *FileOptions) GoString() string { if this.CsharpNamespace != nil { s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") } - if this.JavananoUseDeprecatedPackage != nil { - s = append(s, "JavananoUseDeprecatedPackage: "+valueToGoStringDescriptor(this.JavananoUseDeprecatedPackage, "bool")+",\n") + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") } if this.UninterpretedOption != nil { s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") @@ -458,6 +463,22 @@ func (this *FieldOptions) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func (this *EnumOptions) GoString() string { if this == nil { return "nil" @@ -522,11 +543,14 @@ func (this *MethodOptions) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 7) s = append(s, "&descriptor.MethodOptions{") if this.Deprecated != nil { s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "descriptor.MethodOptions_IdempotencyLevel")+",\n") + } if this.UninterpretedOption != nil { s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") } @@ -630,6 +654,45 @@ func (this *SourceCodeInfo_Location) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func valueToGoStringDescriptor(v interface{}, typ string) string { rv := reflect.ValueOf(v) if rv.IsNil() { diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go index 861f4d02..e0846a35 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -99,6 +99,17 @@ func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { return x } +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + func (field *FieldDescriptorProto) GetKey() []byte { x := field.GetKeyUint64() i := 0 @@ -111,6 +122,18 @@ func (field *FieldDescriptorProto) GetKey() []byte { return keybuf } +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { msg := desc.GetMessage(packageName, messageName) if msg == nil { @@ -352,6 +375,16 @@ func (f *FieldDescriptorProto) IsPacked() bool { return f.Options != nil && f.GetOptions().GetPacked() } +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + func (m *DescriptorProto) HasExtension() bool { return len(m.ExtensionRange) > 0 } diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md index 795f53f6..037fc7c8 100644 --- a/vendor/github.com/golang/protobuf/README.md +++ b/vendor/github.com/golang/protobuf/README.md @@ -1,7 +1,5 @@ # Go support for Protocol Buffers -[![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf) - Google's data interchange format. Copyright 2010 The Go Authors. https://github.com/golang/protobuf @@ -24,7 +22,7 @@ To use this software, you must: for details or, if you are using gccgo, follow the instructions at https://golang.org/doc/install/gccgo - Grab the code from the repository and install the proto package. - The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`. + The simplest way is to run `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`. The compiler plugin, protoc-gen-go, will be installed in $GOBIN, defaulting to $GOPATH/bin. It must be in your $PATH for the protocol compiler, protoc, to find it. @@ -106,6 +104,7 @@ for a protocol buffer variable v: When the .proto file specifies `syntax="proto3"`, there are some differences: - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. - Enum types do not get an Enum method. Consider file test.proto, containing diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go index 8b84d1b2..68b9b30c 100644 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -174,11 +174,11 @@ func sizeFixed32(x uint64) int { // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func sizeZigzag64(x uint64) int { - return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) + return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer @@ -1075,17 +1075,10 @@ func (o *Buffer) enc_map(p *Properties, base structPointer) error { func (o *Buffer) enc_exts(p *Properties, base structPointer) error { exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { + if err := encodeExtensions(exts); err != nil { return err } + v, _ := exts.extensionsRead() return o.enc_map_body(v) } diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index eaad2183..6b9b3637 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -154,7 +154,6 @@ type ExtensionDesc struct { Field int32 // field number Name string // fully-qualified name of extension, for text formatting Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined } func (ed *ExtensionDesc) repeated() bool { diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index 1c225504..ac4ddbc0 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -73,6 +73,7 @@ for a protocol buffer variable v: When the .proto file specifies `syntax="proto3"`, there are some differences: - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. - Enum types do not get an Enum method. The simplest way to describe this is to see an example. diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go index 5e14513f..61f83c1e 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -865,7 +865,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(x) + fv.SetUint(uint64(x)) return nil } case reflect.Uint64: diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 00000000..89e07ae1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,136 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *any.Any) (string, error) { + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*any.Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func Empty(any *any.Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *any.Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = Empty(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *any.Any, pb proto.Message) bool { + aname, err := AnyMessageName(any) + if err != nil { + return false + } + + return aname == proto.MessageName(pb) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 00000000..f2c6906b --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,155 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/any/any.proto +// DO NOT EDIT! + +/* +Package any is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/any/any.proto + +It has these top-level messages: + Any +*/ +package any + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Any) XXX_WellKnownType() string { return "Any" } + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 187 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc, + 0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c, + 0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69, + 0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24, + 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1, + 0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19, + 0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, + 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9, + 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00, + 0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto new file mode 100644 index 00000000..81dcf46c --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -0,0 +1,140 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/any"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 00000000..c0d595da --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package ptypes contains code for interacting with well-known types. +*/ +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 00000000..65cb0f8e --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + // Range of a durpb.Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the durpb.Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid durpb.Duration +// may still be too large to fit into a time.Duration (the range of durpb.Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *durpb.Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) + } + return nil +} + +// Duration converts a durpb.Duration to a time.Duration. Duration +// returns an error if the durpb.Duration is invalid or is too large to be +// represented in a time.Duration. +func Duration(p *durpb.Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durpb.Duration. +func DurationProto(d time.Duration) *durpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durpb.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 00000000..56974834 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,114 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/duration/duration.proto +// DO NOT EDIT! + +/* +Package duration is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/duration/duration.proto + +It has these top-level messages: + Duration +*/ +package duration + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Duration) XXX_WellKnownType() string { return "Duration" } + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 189 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29, + 0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, + 0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8, + 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60, + 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6, + 0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98, + 0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xbb, 0x80, 0x91, 0x71, 0x11, 0x13, + 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9, + 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto new file mode 100644 index 00000000..96c1796d --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -0,0 +1,98 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/duration"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 00000000..1b365762 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,125 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *tspb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *tspb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := &tspb.Timestamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *tspb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 00000000..ffcc5159 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,127 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +// DO NOT EDIT! + +/* +Package timestamp is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + +It has these top-level messages: + Timestamp +*/ +package timestamp + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// now = time.time() +// seconds = int(now) +// nanos = int((now - seconds) * 10**9) +// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 194 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9, + 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3, + 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24, + 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, + 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, + 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x91, 0x91, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x50, 0x27, + 0x3e, 0xb8, 0x91, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0x1c, 0xbd, 0x80, 0x91, 0xf1, + 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0xc3, 0x03, + 0xa0, 0xca, 0xf5, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0xda, 0x92, + 0xd8, 0xc0, 0xe6, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01, + 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto new file mode 100644 index 00000000..7992a858 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -0,0 +1,111 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/timestamp"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// now = time.time() +// seconds = int(now) +// nanos = int((now - seconds) * 10**9) +// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/go-openapi/analysis/LICENSE b/vendor/github.com/google/btree/LICENSE similarity index 100% rename from vendor/github.com/go-openapi/analysis/LICENSE rename to vendor/github.com/google/btree/LICENSE diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md new file mode 100644 index 00000000..6062a4da --- /dev/null +++ b/vendor/github.com/google/btree/README.md @@ -0,0 +1,12 @@ +# BTree implementation for Go + +![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) + +This package provides an in-memory B-Tree implementation for Go, useful as +an ordered, mutable data structure. + +The API is based off of the wonderful +http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to +act as a drop-in replacement for gollrb trees. + +See http://godoc.org/github.com/google/btree for documentation. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go new file mode 100644 index 00000000..fc5aaaa1 --- /dev/null +++ b/vendor/github.com/google/btree/btree.go @@ -0,0 +1,649 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implmentation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values or backwards iteration. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are not safe for concurrent write access. +type FreeList struct { + freelist []*node +} + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return &FreeList{freelist: make([]*node, 0, size)} +} + +func (f *FreeList) newNode() (n *node) { + index := len(f.freelist) - 1 + if index < 0 { + return new(node) + } + f.freelist, n = f.freelist[:index], f.freelist[index] + return +} + +func (f *FreeList) freeNode(n *node) { + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + } +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator func(i Item) bool + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + if degree <= 1 { + panic("bad degree") + } + return &BTree{ + degree: degree, + freelist: f, + } +} + +// items stores items in a node. +type items []Item + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items) insertAt(index int, item Item) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items) removeAt(index int) Item { + item := (*s)[index] + (*s)[index] = nil + copy((*s)[index:], (*s)[index+1:]) + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items) pop() (out Item) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(item Item) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return item.Less(s[i]) + }) + if i > 0 && !s[i-1].Less(item) { + return i - 1, true + } + return i, false +} + +// children stores child nodes in a node. +type children []*node + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *children) insertAt(index int, n *node) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = n +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *children) removeAt(index int) *node { + n := (*s)[index] + (*s)[index] = nil + copy((*s)[index:], (*s)[index+1:]) + *s = (*s)[:len(*s)-1] + return n +} + +// pop removes and returns the last element in the list. +func (s *children) pop() (out *node) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node struct { + items items + children children + t *BTree +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node) split(i int) (Item, *node) { + item := n.items[i] + next := n.t.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items = n.items[:i] + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children = n.children[:i+1] + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.children[i] + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node) insert(item Item, maxItems int) Item { + i, found := n.items.find(item) + if found { + out := n.items[i] + n.items[i] = item + return out + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return nil + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case item.Less(inTree): + // no change, we want first split node + case inTree.Less(item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out + } + } + return n.children[i].insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node) get(key Item) Item { + i, found := n.items.find(key) + if found { + return n.items[i] + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return nil +} + +// min returns the first item in the subtree. +func min(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// max returns the last item in the subtree. +func max(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node) remove(item Item, minItems int, typ toRemove) Item { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop() + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0) + } + i = 0 + case removeItem: + i, found = n.items.find(item) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i) + } + return nil + } + default: + panic("invalid type") + } + // If we get to here, we have children. + child := n.children[i] + if len(child.items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + n.items[i] = child.remove(nil, minItems, removeMax) + return out + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { + child := n.children[i] + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + stealFrom := n.children[i-1] + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + stealFrom := n.children[i+1] + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + child = n.children[i] + } + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.t.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +// iterate provides a simple method for iterating over elements in the tree. +// It could probably use some work to be extra-efficient (it calls from() a +// little more than it should), but it works pretty well for now. +// +// It requires that 'from' and 'to' both return true for values we should hit +// with the iterator. It should also be the case that 'from' returns true for +// values less than or equal to values 'to' returns true for, and 'to' +// returns true for values greater than or equal to those that 'from' +// does. +func (n *node) iterate(from, to func(Item) bool, iter ItemIterator) bool { + for i, item := range n.items { + if !from(item) { + continue + } + if len(n.children) > 0 && !n.children[i].iterate(from, to, iter) { + return false + } + if !to(item) { + return false + } + if !iter(item) { + return false + } + } + if len(n.children) > 0 { + return n.children[len(n.children)-1].iterate(from, to, iter) + } + return true +} + +// Used for testing/debugging purposes. +func (n *node) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree struct { + degree int + length int + root *node + freelist *FreeList +} + +// maxItems returns the max number of items to allow per node. +func (t *BTree) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTree) minItems() int { + return t.degree - 1 +} + +func (t *BTree) newNode() (n *node) { + n = t.freelist.newNode() + n.t = t + return +} + +func (t *BTree) freeNode(n *node) { + for i := range n.items { + n.items[i] = nil // clear to allow GC + } + n.items = n.items[:0] + for i := range n.children { + n.children[i] = nil // clear to allow GC + } + n.children = n.children[:0] + n.t = nil // clear to allow GC + t.freelist.freeNode(n) +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("nil item being added to BTree") + } + if t.root == nil { + t.root = t.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return nil + } else if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + out := t.root.insert(item, t.maxItems()) + if out == nil { + t.length++ + } + return out +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + return t.deleteItem(nil, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + return t.deleteItem(nil, removeMax) +} + +func (t *BTree) deleteItem(item Item, typ toRemove) Item { + if t.root == nil || len(t.root.items) == 0 { + return nil + } + out := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.freeNode(oldroot) + } + if out != nil { + t.length-- + } + return out +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate( + func(a Item) bool { return !a.Less(greaterOrEqual) }, + func(a Item) bool { return a.Less(lessThan) }, + iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate( + func(a Item) bool { return true }, + func(a Item) bool { return a.Less(pivot) }, + iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate( + func(a Item) bool { return !a.Less(pivot) }, + func(a Item) bool { return true }, + iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate( + func(a Item) bool { return true }, + func(a Item) bool { return true }, + iterator) +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + if t.root == nil { + return nil + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + return min(t.root) +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return t.Get(key) != nil +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return t.length +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} diff --git a/vendor/github.com/go-openapi/loads/LICENSE b/vendor/github.com/googleapis/gnostic/LICENSE similarity index 100% rename from vendor/github.com/go-openapi/loads/LICENSE rename to vendor/github.com/googleapis/gnostic/LICENSE diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go new file mode 100644 index 00000000..0e32451a --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go @@ -0,0 +1,8728 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +package openapi_v2 + +import ( + "fmt" + "github.com/googleapis/gnostic/compiler" + "gopkg.in/yaml.v2" + "regexp" + "strings" +) + +// Version returns the package name (and OpenAPI version). +func Version() string { + return "openapi_v2" +} + +// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. +func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*AdditionalPropertiesItem, error) { + errors := make([]error, 0) + x := &AdditionalPropertiesItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) + if matchingError == nil { + x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // bool boolean = 2; + boolValue, ok := in.(bool) + if ok { + x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewAny creates an object of type Any if possible, returning an error if not. +func NewAny(in interface{}, context *compiler.Context) (*Any, error) { + errors := make([]error, 0) + x := &Any{} + bytes, _ := yaml.Marshal(in) + x.Yaml = string(bytes) + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. +func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecurity, error) { + errors := make([]error, 0) + x := &ApiKeySecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [apiKey] + if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header query] + if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. +func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (*BasicAuthenticationSecurity, error) { + errors := make([]error, 0) + x := &BasicAuthenticationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [basic] + if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. +func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter, error) { + errors := make([]error, 0) + x := &BodyParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "schema"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "required", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [body] + if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool required = 4; + v4 := compiler.MapValueForKey(m, "required") + if v4 != nil { + x.Required, ok = v4.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema schema = 5; + v5 := compiler.MapValueForKey(m, "schema") + if v5 != nil { + var err error + x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewContact creates an object of type Contact if possible, returning an error if not. +func NewContact(in interface{}, context *compiler.Context) (*Contact, error) { + errors := make([]error, 0) + x := &Contact{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"email", "name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string email = 3; + v3 := compiler.MapValueForKey(m, "email") + if v3 != nil { + x.Email, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for email: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefault creates an object of type Default if possible, returning an error if not. +func NewDefault(in interface{}, context *compiler.Context) (*Default, error) { + errors := make([]error, 0) + x := &Default{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefinitions creates an object of type Definitions if possible, returning an error if not. +func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, error) { + errors := make([]error, 0) + x := &Definitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDocument creates an object of type Document if possible, returning an error if not. +func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { + errors := make([]error, 0) + x := &Document{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"info", "paths", "swagger"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string swagger = 1; + v1 := compiler.MapValueForKey(m, "swagger") + if v1 != nil { + x.Swagger, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [2.0] + if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { + message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Info info = 2; + v2 := compiler.MapValueForKey(m, "info") + if v2 != nil { + var err error + x.Info, err = NewInfo(v2, compiler.NewContext("info", context)) + if err != nil { + errors = append(errors, err) + } + } + // string host = 3; + v3 := compiler.MapValueForKey(m, "host") + if v3 != nil { + x.Host, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for host: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string base_path = 4; + v4 := compiler.MapValueForKey(m, "basePath") + if v4 != nil { + x.BasePath, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for basePath: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string schemes = 5; + v5 := compiler.MapValueForKey(m, "schemes") + if v5 != nil { + v, ok := v5.([]interface{}) + if ok { + x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %+v", v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 6; + v6 := compiler.MapValueForKey(m, "consumes") + if v6 != nil { + v, ok := v6.([]interface{}) + if ok { + x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 7; + v7 := compiler.MapValueForKey(m, "produces") + if v7 != nil { + v, ok := v7.([]interface{}) + if ok { + x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Paths paths = 8; + v8 := compiler.MapValueForKey(m, "paths") + if v8 != nil { + var err error + x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context)) + if err != nil { + errors = append(errors, err) + } + } + // Definitions definitions = 9; + v9 := compiler.MapValueForKey(m, "definitions") + if v9 != nil { + var err error + x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context)) + if err != nil { + errors = append(errors, err) + } + } + // ParameterDefinitions parameters = 10; + v10 := compiler.MapValueForKey(m, "parameters") + if v10 != nil { + var err error + x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + } + // ResponseDefinitions responses = 11; + v11 := compiler.MapValueForKey(m, "responses") + if v11 != nil { + var err error + x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := v12.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // SecurityDefinitions security_definitions = 13; + v13 := compiler.MapValueForKey(m, "securityDefinitions") + if v13 != nil { + var err error + x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Tag tags = 14; + v14 := compiler.MapValueForKey(m, "tags") + if v14 != nil { + // repeated Tag + x.Tags = make([]*Tag, 0) + a, ok := v14.([]interface{}) + if ok { + for _, item := range a { + y, err := NewTag(item, compiler.NewContext("tags", context)) + if err != nil { + errors = append(errors, err) + } + x.Tags = append(x.Tags, y) + } + } + } + // ExternalDocs external_docs = 15; + v15 := compiler.MapValueForKey(m, "externalDocs") + if v15 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 16; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExamples creates an object of type Examples if possible, returning an error if not. +func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) { + errors := make([]error, 0) + x := &Examples{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. +func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, error) { + errors := make([]error, 0) + x := &ExternalDocs{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"url"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFileSchema creates an object of type FileSchema if possible, returning an error if not. +func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, error) { + errors := make([]error, 0) + x := &FileSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string format = 1; + v1 := compiler.MapValueForKey(m, "format") + if v1 != nil { + x.Format, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 2; + v2 := compiler.MapValueForKey(m, "title") + if v2 != nil { + x.Title, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 4; + v4 := compiler.MapValueForKey(m, "default") + if v4 != nil { + var err error + x.Default, err = NewAny(v4, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string required = 5; + v5 := compiler.MapValueForKey(m, "required") + if v5 != nil { + v, ok := v5.([]interface{}) + if ok { + x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [file] + if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 7; + v7 := compiler.MapValueForKey(m, "readOnly") + if v7 != nil { + x.ReadOnly, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 8; + v8 := compiler.MapValueForKey(m, "externalDocs") + if v8 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 9; + v9 := compiler.MapValueForKey(m, "example") + if v9 != nil { + var err error + x.Example, err = NewAny(v9, compiler.NewContext("example", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. +func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*FormDataParameterSubSchema, error) { + errors := make([]error, 0) + x := &FormDataParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [formData] + if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array file] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = v7.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = v9.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + switch v11 := v11.(type) { + case float64: + x.Maximum = v11 + case float32: + x.Maximum = float64(v11) + case uint64: + x.Maximum = float64(v11) + case uint32: + x.Maximum = float64(v11) + case int64: + x.Maximum = float64(v11) + case int32: + x.Maximum = float64(v11) + case int: + x.Maximum = float64(v11) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = v12.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + switch v13 := v13.(type) { + case float64: + x.Minimum = v13 + case float32: + x.Minimum = float64(v13) + case uint64: + x.Minimum = float64(v13) + case uint32: + x.Minimum = float64(v13) + case int64: + x.Minimum = float64(v13) + case int32: + x.Minimum = float64(v13) + case int: + x.Minimum = float64(v13) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = v14.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := v16.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = v17.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := v19.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = v20.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v21.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + switch v22 := v22.(type) { + case float64: + x.MultipleOf = v22 + case float32: + x.MultipleOf = float64(v22) + case uint64: + x.MultipleOf = float64(v22) + case uint32: + x.MultipleOf = float64(v22) + case int64: + x.MultipleOf = float64(v22) + case int32: + x.MultipleOf = float64(v22) + case int: + x.MultipleOf = float64(v22) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeader creates an object of type Header if possible, returning an error if not. +func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { + errors := make([]error, 0) + x := &Header{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.Maximum = v6 + case float32: + x.Maximum = float64(v6) + case uint64: + x.Maximum = float64(v6) + case uint32: + x.Maximum = float64(v6) + case int64: + x.Maximum = float64(v6) + case int32: + x.Maximum = float64(v6) + case int: + x.Maximum = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + switch v8 := v8.(type) { + case float64: + x.Minimum = v8 + case float32: + x.Minimum = float64(v8) + case uint64: + x.Minimum = float64(v8) + case uint32: + x.Minimum = float64(v8) + case int64: + x.Minimum = float64(v8) + case int32: + x.Minimum = float64(v8) + case int: + x.Minimum = float64(v8) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = v9.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := v10.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = v12.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := v13.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = v15.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v16.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + switch v17 := v17.(type) { + case float64: + x.MultipleOf = v17 + case float32: + x.MultipleOf = float64(v17) + case uint64: + x.MultipleOf = float64(v17) + case uint32: + x.MultipleOf = float64(v17) + case int64: + x.MultipleOf = float64(v17) + case int32: + x.MultipleOf = float64(v17) + case int: + x.MultipleOf = float64(v17) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 18; + v18 := compiler.MapValueForKey(m, "description") + if v18 != nil { + x.Description, ok = v18.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 19; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. +func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*HeaderParameterSubSchema, error) { + errors := make([]error, 0) + x := &HeaderParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header] + if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = v8.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + switch v10 := v10.(type) { + case float64: + x.Maximum = v10 + case float32: + x.Maximum = float64(v10) + case uint64: + x.Maximum = float64(v10) + case uint32: + x.Maximum = float64(v10) + case int64: + x.Maximum = float64(v10) + case int32: + x.Maximum = float64(v10) + case int: + x.Maximum = float64(v10) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + switch v12 := v12.(type) { + case float64: + x.Minimum = v12 + case float32: + x.Minimum = float64(v12) + case uint64: + x.Minimum = float64(v12) + case uint32: + x.Minimum = float64(v12) + case int64: + x.Minimum = float64(v12) + case int32: + x.Minimum = float64(v12) + case int: + x.Minimum = float64(v12) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = v13.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = v16.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = v19.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + switch v21 := v21.(type) { + case float64: + x.MultipleOf = v21 + case float32: + x.MultipleOf = float64(v21) + case uint64: + x.MultipleOf = float64(v21) + case uint32: + x.MultipleOf = float64(v21) + case int64: + x.MultipleOf = float64(v21) + case int32: + x.MultipleOf = float64(v21) + case int: + x.MultipleOf = float64(v21) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaders creates an object of type Headers if possible, returning an error if not. +func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) { + errors := make([]error, 0) + x := &Headers{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedHeader additional_properties = 1; + // MAP: Header + x.AdditionalProperties = make([]*NamedHeader, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedHeader{} + pair.Name = k + var err error + pair.Value, err = NewHeader(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewInfo creates an object of type Info if possible, returning an error if not. +func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { + errors := make([]error, 0) + x := &Info{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"title", "version"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string title = 1; + v1 := compiler.MapValueForKey(m, "title") + if v1 != nil { + x.Title, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string version = 2; + v2 := compiler.MapValueForKey(m, "version") + if v2 != nil { + x.Version, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for version: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string terms_of_service = 4; + v4 := compiler.MapValueForKey(m, "termsOfService") + if v4 != nil { + x.TermsOfService, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for termsOfService: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Contact contact = 5; + v5 := compiler.MapValueForKey(m, "contact") + if v5 != nil { + var err error + x.Contact, err = NewContact(v5, compiler.NewContext("contact", context)) + if err != nil { + errors = append(errors, err) + } + } + // License license = 6; + v6 := compiler.MapValueForKey(m, "license") + if v6 != nil { + var err error + x.License, err = NewLicense(v6, compiler.NewContext("license", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. +func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) { + errors := make([]error, 0) + x := &ItemsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Schema = make([]*Schema, 0) + y, err := NewSchema(m, compiler.NewContext("", context)) + if err != nil { + return nil, err + } + x.Schema = append(x.Schema, y) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewJsonReference creates an object of type JsonReference if possible, returning an error if not. +func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference, error) { + errors := make([]error, 0) + x := &JsonReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"$ref"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"$ref", "description"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewLicense creates an object of type License if possible, returning an error if not. +func NewLicense(in interface{}, context *compiler.Context) (*License, error) { + errors := make([]error, 0) + x := &License{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. +func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { + errors := make([]error, 0) + x := &NamedAny{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewAny(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. +func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, error) { + errors := make([]error, 0) + x := &NamedHeader{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Header value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewHeader(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. +func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParameter, error) { + errors := make([]error, 0) + x := &NamedParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Parameter value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewParameter(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. +func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem, error) { + errors := make([]error, 0) + x := &NamedPathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PathItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewPathItem(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. +func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse, error) { + errors := make([]error, 0) + x := &NamedResponse{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Response value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponse(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. +func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedResponseValue, error) { + errors := make([]error, 0) + x := &NamedResponseValue{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ResponseValue value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. +func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, error) { + errors := make([]error, 0) + x := &NamedSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSchema(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. +func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &NamedSecurityDefinitionsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SecurityDefinitionsItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedString creates an object of type NamedString if possible, returning an error if not. +func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, error) { + errors := make([]error, 0) + x := &NamedString{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + x.Value, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for value: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. +func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStringArray, error) { + errors := make([]error, 0) + x := &NamedStringArray{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // StringArray value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewStringArray(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. +func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyParameter, error) { + errors := make([]error, 0) + x := &NonBodyParameter{} + matched := false + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // HeaderParameterSubSchema header_parameter_sub_schema = 1; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // QueryParameterSubSchema query_parameter_sub_schema = 3; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // PathParameterSubSchema path_parameter_sub_schema = 4; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. +func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { + errors := make([]error, 0) + x := &Oauth2AccessCodeSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [accessCode] + if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string token_url = 5; + v5 := compiler.MapValueForKey(m, "tokenUrl") + if v5 != nil { + x.TokenUrl, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 6; + v6 := compiler.MapValueForKey(m, "description") + if v6 != nil { + x.Description, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. +func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ApplicationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [application] + if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. +func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ImplicitSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [implicit] + if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. +func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oauth2PasswordSecurity, error) { + errors := make([]error, 0) + x := &Oauth2PasswordSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [password] + if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. +func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, error) { + errors := make([]error, 0) + x := &Oauth2Scopes{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedString additional_properties = 1; + // MAP: string + x.AdditionalProperties = make([]*NamedString, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedString{} + pair.Name = k + pair.Value = v.(string) + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOperation creates an object of type Operation if possible, returning an error if not. +func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) { + errors := make([]error, 0) + x := &Operation{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"responses"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated string tags = 1; + v1 := compiler.MapValueForKey(m, "tags") + if v1 != nil { + v, ok := v1.([]interface{}) + if ok { + x.Tags = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for tags: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string summary = 2; + v2 := compiler.MapValueForKey(m, "summary") + if v2 != nil { + x.Summary, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 4; + v4 := compiler.MapValueForKey(m, "externalDocs") + if v4 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // string operation_id = 5; + v5 := compiler.MapValueForKey(m, "operationId") + if v5 != nil { + x.OperationId, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for operationId: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 6; + v6 := compiler.MapValueForKey(m, "produces") + if v6 != nil { + v, ok := v6.([]interface{}) + if ok { + x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 7; + v7 := compiler.MapValueForKey(m, "consumes") + if v7 != nil { + v, ok := v7.([]interface{}) + if ok { + x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated ParametersItem parameters = 8; + v8 := compiler.MapValueForKey(m, "parameters") + if v8 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := v8.([]interface{}) + if ok { + for _, item := range a { + y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // Responses responses = 9; + v9 := compiler.MapValueForKey(m, "responses") + if v9 != nil { + var err error + x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string schemes = 10; + v10 := compiler.MapValueForKey(m, "schemes") + if v10 != nil { + v, ok := v10.([]interface{}) + if ok { + x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %+v", v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool deprecated = 11; + v11 := compiler.MapValueForKey(m, "deprecated") + if v11 != nil { + x.Deprecated, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := v12.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // repeated NamedAny vendor_extension = 13; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameter creates an object of type Parameter if possible, returning an error if not. +func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) { + errors := make([]error, 0) + x := &Parameter{} + matched := false + // BodyParameter body_parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", context)) + if matchingError == nil { + x.Oneof = &Parameter_BodyParameter{BodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // NonBodyParameter non_body_parameter = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context)) + if matchingError == nil { + x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. +func NewParameterDefinitions(in interface{}, context *compiler.Context) (*ParameterDefinitions, error) { + errors := make([]error, 0) + x := &ParameterDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedParameter additional_properties = 1; + // MAP: Parameter + x.AdditionalProperties = make([]*NamedParameter, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedParameter{} + pair.Name = k + var err error + pair.Value, err = NewParameter(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. +func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersItem, error) { + errors := make([]error, 0) + x := &ParametersItem{} + matched := false + // Parameter parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewParameter(m, compiler.NewContext("parameter", context)) + if matchingError == nil { + x.Oneof = &ParametersItem_Parameter{Parameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + if matchingError == nil { + x.Oneof = &ParametersItem_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathItem creates an object of type PathItem if possible, returning an error if not. +func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { + errors := make([]error, 0) + x := &PathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Operation get = 2; + v2 := compiler.MapValueForKey(m, "get") + if v2 != nil { + var err error + x.Get, err = NewOperation(v2, compiler.NewContext("get", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation put = 3; + v3 := compiler.MapValueForKey(m, "put") + if v3 != nil { + var err error + x.Put, err = NewOperation(v3, compiler.NewContext("put", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation post = 4; + v4 := compiler.MapValueForKey(m, "post") + if v4 != nil { + var err error + x.Post, err = NewOperation(v4, compiler.NewContext("post", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation delete = 5; + v5 := compiler.MapValueForKey(m, "delete") + if v5 != nil { + var err error + x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation options = 6; + v6 := compiler.MapValueForKey(m, "options") + if v6 != nil { + var err error + x.Options, err = NewOperation(v6, compiler.NewContext("options", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation head = 7; + v7 := compiler.MapValueForKey(m, "head") + if v7 != nil { + var err error + x.Head, err = NewOperation(v7, compiler.NewContext("head", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation patch = 8; + v8 := compiler.MapValueForKey(m, "patch") + if v8 != nil { + var err error + x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated ParametersItem parameters = 9; + v9 := compiler.MapValueForKey(m, "parameters") + if v9 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := v9.([]interface{}) + if ok { + for _, item := range a { + y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. +func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*PathParameterSubSchema, error) { + errors := make([]error, 0) + x := &PathParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"required"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [path] + if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = v8.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + switch v10 := v10.(type) { + case float64: + x.Maximum = v10 + case float32: + x.Maximum = float64(v10) + case uint64: + x.Maximum = float64(v10) + case uint32: + x.Maximum = float64(v10) + case int64: + x.Maximum = float64(v10) + case int32: + x.Maximum = float64(v10) + case int: + x.Maximum = float64(v10) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + switch v12 := v12.(type) { + case float64: + x.Minimum = v12 + case float32: + x.Minimum = float64(v12) + case uint64: + x.Minimum = float64(v12) + case uint32: + x.Minimum = float64(v12) + case int64: + x.Minimum = float64(v12) + case int32: + x.Minimum = float64(v12) + case int: + x.Minimum = float64(v12) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = v13.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = v16.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = v19.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + switch v21 := v21.(type) { + case float64: + x.MultipleOf = v21 + case float32: + x.MultipleOf = float64(v21) + case uint64: + x.MultipleOf = float64(v21) + case uint32: + x.MultipleOf = float64(v21) + case int64: + x.MultipleOf = float64(v21) + case int32: + x.MultipleOf = float64(v21) + case int: + x.MultipleOf = float64(v21) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPaths creates an object of type Paths if possible, returning an error if not. +func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { + errors := make([]error, 0) + x := &Paths{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern0, pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedAny vendor_extension = 1; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + // repeated NamedPathItem path = 2; + // MAP: PathItem ^/ + x.Path = make([]*NamedPathItem, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "/") { + pair := &NamedPathItem{} + pair.Name = k + var err error + pair.Value, err = NewPathItem(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.Path = append(x.Path, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. +func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesItems, error) { + errors := make([]error, 0) + x := &PrimitivesItems{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.Maximum = v6 + case float32: + x.Maximum = float64(v6) + case uint64: + x.Maximum = float64(v6) + case uint32: + x.Maximum = float64(v6) + case int64: + x.Maximum = float64(v6) + case int32: + x.Maximum = float64(v6) + case int: + x.Maximum = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + switch v8 := v8.(type) { + case float64: + x.Minimum = v8 + case float32: + x.Minimum = float64(v8) + case uint64: + x.Minimum = float64(v8) + case uint32: + x.Minimum = float64(v8) + case int64: + x.Minimum = float64(v8) + case int32: + x.Minimum = float64(v8) + case int: + x.Minimum = float64(v8) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = v9.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := v10.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = v12.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := v13.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = v15.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v16.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + switch v17 := v17.(type) { + case float64: + x.MultipleOf = v17 + case float32: + x.MultipleOf = float64(v17) + case uint64: + x.MultipleOf = float64(v17) + case uint32: + x.MultipleOf = float64(v17) + case int64: + x.MultipleOf = float64(v17) + case int32: + x.MultipleOf = float64(v17) + case int: + x.MultipleOf = float64(v17) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 18; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewProperties creates an object of type Properties if possible, returning an error if not. +func NewProperties(in interface{}, context *compiler.Context) (*Properties, error) { + errors := make([]error, 0) + x := &Properties{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. +func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*QueryParameterSubSchema, error) { + errors := make([]error, 0) + x := &QueryParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [query] + if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = v7.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = v9.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + switch v11 := v11.(type) { + case float64: + x.Maximum = v11 + case float32: + x.Maximum = float64(v11) + case uint64: + x.Maximum = float64(v11) + case uint32: + x.Maximum = float64(v11) + case int64: + x.Maximum = float64(v11) + case int32: + x.Maximum = float64(v11) + case int: + x.Maximum = float64(v11) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = v12.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + switch v13 := v13.(type) { + case float64: + x.Minimum = v13 + case float32: + x.Minimum = float64(v13) + case uint64: + x.Minimum = float64(v13) + case uint32: + x.Minimum = float64(v13) + case int64: + x.Minimum = float64(v13) + case int32: + x.Minimum = float64(v13) + case int: + x.Minimum = float64(v13) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = v14.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := v16.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = v17.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := v19.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = v20.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v21.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + switch v22 := v22.(type) { + case float64: + x.MultipleOf = v22 + case float32: + x.MultipleOf = float64(v22) + case uint64: + x.MultipleOf = float64(v22) + case uint32: + x.MultipleOf = float64(v22) + case int64: + x.MultipleOf = float64(v22) + case int32: + x.MultipleOf = float64(v22) + case int: + x.MultipleOf = float64(v22) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponse creates an object of type Response if possible, returning an error if not. +func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { + errors := make([]error, 0) + x := &Response{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"description"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "examples", "headers", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SchemaItem schema = 2; + v2 := compiler.MapValueForKey(m, "schema") + if v2 != nil { + var err error + x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context)) + if err != nil { + errors = append(errors, err) + } + } + // Headers headers = 3; + v3 := compiler.MapValueForKey(m, "headers") + if v3 != nil { + var err error + x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context)) + if err != nil { + errors = append(errors, err) + } + } + // Examples examples = 4; + v4 := compiler.MapValueForKey(m, "examples") + if v4 != nil { + var err error + x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. +func NewResponseDefinitions(in interface{}, context *compiler.Context) (*ResponseDefinitions, error) { + errors := make([]error, 0) + x := &ResponseDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedResponse additional_properties = 1; + // MAP: Response + x.AdditionalProperties = make([]*NamedResponse, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedResponse{} + pair.Name = k + var err error + pair.Value, err = NewResponse(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. +func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue, error) { + errors := make([]error, 0) + x := &ResponseValue{} + matched := false + // Response response = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewResponse(m, compiler.NewContext("response", context)) + if matchingError == nil { + x.Oneof = &ResponseValue_Response{Response: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + if matchingError == nil { + x.Oneof = &ResponseValue_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponses creates an object of type Responses if possible, returning an error if not. +func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) { + errors := make([]error, 0) + x := &Responses{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern2, pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedResponseValue response_code = 1; + // MAP: ResponseValue ^([0-9]{3})$|^(default)$ + x.ResponseCode = make([]*NamedResponseValue, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if pattern2.MatchString(k) { + pair := &NamedResponseValue{} + pair.Name = k + var err error + pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.ResponseCode = append(x.ResponseCode, pair) + } + } + } + // repeated NamedAny vendor_extension = 2; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchema creates an object of type Schema if possible, returning an error if not. +func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { + errors := make([]error, 0) + x := &Schema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 3; + v3 := compiler.MapValueForKey(m, "title") + if v3 != nil { + x.Title, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float multiple_of = 6; + v6 := compiler.MapValueForKey(m, "multipleOf") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.MultipleOf = v6 + case float32: + x.MultipleOf = float64(v6) + case uint64: + x.MultipleOf = float64(v6) + case uint32: + x.MultipleOf = float64(v6) + case int64: + x.MultipleOf = float64(v6) + case int32: + x.MultipleOf = float64(v6) + case int: + x.MultipleOf = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float maximum = 7; + v7 := compiler.MapValueForKey(m, "maximum") + if v7 != nil { + switch v7 := v7.(type) { + case float64: + x.Maximum = v7 + case float32: + x.Maximum = float64(v7) + case uint64: + x.Maximum = float64(v7) + case uint32: + x.Maximum = float64(v7) + case int64: + x.Maximum = float64(v7) + case int32: + x.Maximum = float64(v7) + case int: + x.Maximum = float64(v7) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 8; + v8 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v8 != nil { + x.ExclusiveMaximum, ok = v8.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 9; + v9 := compiler.MapValueForKey(m, "minimum") + if v9 != nil { + switch v9 := v9.(type) { + case float64: + x.Minimum = v9 + case float32: + x.Minimum = float64(v9) + case uint64: + x.Minimum = float64(v9) + case uint32: + x.Minimum = float64(v9) + case int64: + x.Minimum = float64(v9) + case int32: + x.Minimum = float64(v9) + case int: + x.Minimum = float64(v9) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 10; + v10 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v10 != nil { + x.ExclusiveMinimum, ok = v10.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 11; + v11 := compiler.MapValueForKey(m, "maxLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 12; + v12 := compiler.MapValueForKey(m, "minLength") + if v12 != nil { + t, ok := v12.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 13; + v13 := compiler.MapValueForKey(m, "pattern") + if v13 != nil { + x.Pattern, ok = v13.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 14; + v14 := compiler.MapValueForKey(m, "maxItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 15; + v15 := compiler.MapValueForKey(m, "minItems") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 16; + v16 := compiler.MapValueForKey(m, "uniqueItems") + if v16 != nil { + x.UniqueItems, ok = v16.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_properties = 17; + v17 := compiler.MapValueForKey(m, "maxProperties") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxProperties: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_properties = 18; + v18 := compiler.MapValueForKey(m, "minProperties") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minProperties: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string required = 19; + v19 := compiler.MapValueForKey(m, "required") + if v19 != nil { + v, ok := v19.([]interface{}) + if ok { + x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // AdditionalPropertiesItem additional_properties = 21; + v21 := compiler.MapValueForKey(m, "additionalProperties") + if v21 != nil { + var err error + x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context)) + if err != nil { + errors = append(errors, err) + } + } + // TypeItem type = 22; + v22 := compiler.MapValueForKey(m, "type") + if v22 != nil { + var err error + x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context)) + if err != nil { + errors = append(errors, err) + } + } + // ItemsItem items = 23; + v23 := compiler.MapValueForKey(m, "items") + if v23 != nil { + var err error + x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Schema all_of = 24; + v24 := compiler.MapValueForKey(m, "allOf") + if v24 != nil { + // repeated Schema + x.AllOf = make([]*Schema, 0) + a, ok := v24.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSchema(item, compiler.NewContext("allOf", context)) + if err != nil { + errors = append(errors, err) + } + x.AllOf = append(x.AllOf, y) + } + } + } + // Properties properties = 25; + v25 := compiler.MapValueForKey(m, "properties") + if v25 != nil { + var err error + x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context)) + if err != nil { + errors = append(errors, err) + } + } + // string discriminator = 26; + v26 := compiler.MapValueForKey(m, "discriminator") + if v26 != nil { + x.Discriminator, ok = v26.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for discriminator: %+v (%T)", v26, v26) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 27; + v27 := compiler.MapValueForKey(m, "readOnly") + if v27 != nil { + x.ReadOnly, ok = v27.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v27, v27) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Xml xml = 28; + v28 := compiler.MapValueForKey(m, "xml") + if v28 != nil { + var err error + x.Xml, err = NewXml(v28, compiler.NewContext("xml", context)) + if err != nil { + errors = append(errors, err) + } + } + // ExternalDocs external_docs = 29; + v29 := compiler.MapValueForKey(m, "externalDocs") + if v29 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 30; + v30 := compiler.MapValueForKey(m, "example") + if v30 != nil { + var err error + x.Example, err = NewAny(v30, compiler.NewContext("example", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 31; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. +func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, error) { + errors := make([]error, 0) + x := &SchemaItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) + if matchingError == nil { + x.Oneof = &SchemaItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // FileSchema file_schema = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", context)) + if matchingError == nil { + x.Oneof = &SchemaItem_FileSchema{FileSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. +func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*SecurityDefinitions, error) { + errors := make([]error, 0) + x := &SecurityDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSecurityDefinitionsItem additional_properties = 1; + // MAP: SecurityDefinitionsItem + x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSecurityDefinitionsItem{} + pair.Name = k + var err error + pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. +func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*SecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &SecurityDefinitionsItem{} + matched := false + // BasicAuthenticationSecurity basic_authentication_security = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // ApiKeySecurity api_key_security = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ImplicitSecurity oauth2_implicit_security = 3; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2PasswordSecurity oauth2_password_security = 4; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ApplicationSecurity oauth2_application_security = 5; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. +func NewSecurityRequirement(in interface{}, context *compiler.Context) (*SecurityRequirement, error) { + errors := make([]error, 0) + x := &SecurityRequirement{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedStringArray additional_properties = 1; + // MAP: StringArray + x.AdditionalProperties = make([]*NamedStringArray, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedStringArray{} + pair.Name = k + var err error + pair.Value, err = NewStringArray(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewStringArray creates an object of type StringArray if possible, returning an error if not. +func NewStringArray(in interface{}, context *compiler.Context) (*StringArray, error) { + errors := make([]error, 0) + x := &StringArray{} + a, ok := in.([]interface{}) + if !ok { + message := fmt.Sprintf("has unexpected value for StringArray: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Value = make([]string, 0) + for _, s := range a { + x.Value = append(x.Value, s.(string)) + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTag creates an object of type Tag if possible, returning an error if not. +func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { + errors := make([]error, 0) + x := &Tag{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "externalDocs", "name"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 3; + v3 := compiler.MapValueForKey(m, "externalDocs") + if v3 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTypeItem creates an object of type TypeItem if possible, returning an error if not. +func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) { + errors := make([]error, 0) + x := &TypeItem{} + switch in := in.(type) { + case string: + x.Value = make([]string, 0) + x.Value = append(x.Value, in) + case []interface{}: + x.Value = make([]string, 0) + for _, v := range in { + value, ok := v.(string) + if ok { + x.Value = append(x.Value, value) + } else { + message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value) + errors = append(errors, compiler.NewError(context, message)) + } + } + default: + message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. +func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExtension, error) { + errors := make([]error, 0) + x := &VendorExtension{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewXml creates an object of type Xml if possible, returning an error if not. +func NewXml(in interface{}, context *compiler.Context) (*Xml, error) { + errors := make([]error, 0) + x := &Xml{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string namespace = 2; + v2 := compiler.MapValueForKey(m, "namespace") + if v2 != nil { + x.Namespace, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for namespace: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string prefix = 3; + v3 := compiler.MapValueForKey(m, "prefix") + if v3 != nil { + x.Prefix, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for prefix: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool attribute = 4; + v4 := compiler.MapValueForKey(m, "attribute") + if v4 != nil { + x.Attribute, ok = v4.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for attribute: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool wrapped = 5; + v5 := compiler.MapValueForKey(m, "wrapped") + if v5 != nil { + x.Wrapped, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for wrapped: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. +func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Any objects. +func (m *Any) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ApiKeySecurity objects. +func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. +func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BodyParameter objects. +func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Contact objects. +func (m *Contact) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Default objects. +func (m *Default) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Definitions objects. +func (m *Definitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Document objects. +func (m *Document) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Info != nil { + _, err := m.Info.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Paths != nil { + _, err := m.Paths.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Definitions != nil { + _, err := m.Definitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Parameters != nil { + _, err := m.Parameters.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.SecurityDefinitions != nil { + _, err := m.SecurityDefinitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Tags { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Examples objects. +func (m *Examples) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ExternalDocs objects. +func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FileSchema objects. +func (m *FileSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FormDataParameterSubSchema objects. +func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Header objects. +func (m *Header) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside HeaderParameterSubSchema objects. +func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Headers objects. +func (m *Headers) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Info objects. +func (m *Info) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Contact != nil { + _, err := m.Contact.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.License != nil { + _, err := m.License.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ItemsItem objects. +func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.Schema { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside JsonReference objects. +func (m *JsonReference) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewJsonReference(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside License objects. +func (m *License) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedAny objects. +func (m *NamedAny) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedHeader objects. +func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedParameter objects. +func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedPathItem objects. +func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponse objects. +func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponseValue objects. +func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSchema objects. +func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. +func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedString objects. +func (m *NamedString) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedStringArray objects. +func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NonBodyParameter objects. +func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) + if ok { + _, err := p.HeaderParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema) + if ok { + _, err := p.FormDataParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema) + if ok { + _, err := p.QueryParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema) + if ok { + _, err := p.PathParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. +func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. +func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. +func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. +func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2Scopes objects. +func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Operation objects. +func (m *Operation) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Parameter objects. +func (m *Parameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*Parameter_BodyParameter) + if ok { + _, err := p.BodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*Parameter_NonBodyParameter) + if ok { + _, err := p.NonBodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParameterDefinitions objects. +func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParametersItem objects. +func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ParametersItem_Parameter) + if ok { + _, err := p.Parameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ParametersItem_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewParametersItem(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathItem objects. +func (m *PathItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewPathItem(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Get != nil { + _, err := m.Get.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Put != nil { + _, err := m.Put.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Post != nil { + _, err := m.Post.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Delete != nil { + _, err := m.Delete.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Options != nil { + _, err := m.Options.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Head != nil { + _, err := m.Head.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Patch != nil { + _, err := m.Patch.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathParameterSubSchema objects. +func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Paths objects. +func (m *Paths) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.Path { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PrimitivesItems objects. +func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Properties objects. +func (m *Properties) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside QueryParameterSubSchema objects. +func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Response objects. +func (m *Response) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Headers != nil { + _, err := m.Headers.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseDefinitions objects. +func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseValue objects. +func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ResponseValue_Response) + if ok { + _, err := p.Response.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ResponseValue_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewResponseValue(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Responses objects. +func (m *Responses) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.ResponseCode { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Schema objects. +func (m *Schema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewSchema(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.AdditionalProperties != nil { + _, err := m.AdditionalProperties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Type != nil { + _, err := m.Type.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.AllOf { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Properties != nil { + _, err := m.Properties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Xml != nil { + _, err := m.Xml.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SchemaItem objects. +func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SchemaItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SchemaItem_FileSchema) + if ok { + _, err := p.FileSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitions objects. +func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitionsItem objects. +func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) + if ok { + _, err := p.BasicAuthenticationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity) + if ok { + _, err := p.ApiKeySecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity) + if ok { + _, err := p.Oauth2ImplicitSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity) + if ok { + _, err := p.Oauth2PasswordSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity) + if ok { + _, err := p.Oauth2ApplicationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) + if ok { + _, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityRequirement objects. +func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside StringArray objects. +func (m *StringArray) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Tag objects. +func (m *Tag) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside TypeItem objects. +func (m *TypeItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside VendorExtension objects. +func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Xml objects. +func (m *Xml) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. +func (m *AdditionalPropertiesItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // AdditionalPropertiesItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return v1.Boolean + } + return nil +} + +// ToRawInfo returns a description of Any suitable for JSON or YAML export. +func (m *Any) ToRawInfo() interface{} { + var err error + var info1 []yaml.MapSlice + err = yaml.Unmarshal([]byte(m.Yaml), &info1) + if err == nil { + return info1 + } + var info2 yaml.MapSlice + err = yaml.Unmarshal([]byte(m.Yaml), &info2) + if err == nil { + return info2 + } + var info3 interface{} + err = yaml.Unmarshal([]byte(m.Yaml), &info3) + if err == nil { + return info3 + } + return nil +} + +// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. +func (m *ApiKeySecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. +func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. +func (m *BodyParameter) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.Schema != nil { + info = append(info, yaml.MapItem{"schema", m.Schema.ToRawInfo()}) + } + // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Contact suitable for JSON or YAML export. +func (m *Contact) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Url != "" { + info = append(info, yaml.MapItem{"url", m.Url}) + } + if m.Email != "" { + info = append(info, yaml.MapItem{"email", m.Email}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Default suitable for JSON or YAML export. +func (m *Default) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:} + return info +} + +// ToRawInfo returns a description of Definitions suitable for JSON or YAML export. +func (m *Definitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Document suitable for JSON or YAML export. +func (m *Document) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Swagger != "" { + info = append(info, yaml.MapItem{"swagger", m.Swagger}) + } + if m.Info != nil { + info = append(info, yaml.MapItem{"info", m.Info.ToRawInfo()}) + } + // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Host != "" { + info = append(info, yaml.MapItem{"host", m.Host}) + } + if m.BasePath != "" { + info = append(info, yaml.MapItem{"basePath", m.BasePath}) + } + if len(m.Schemes) != 0 { + info = append(info, yaml.MapItem{"schemes", m.Schemes}) + } + if len(m.Consumes) != 0 { + info = append(info, yaml.MapItem{"consumes", m.Consumes}) + } + if len(m.Produces) != 0 { + info = append(info, yaml.MapItem{"produces", m.Produces}) + } + if m.Paths != nil { + info = append(info, yaml.MapItem{"paths", m.Paths.ToRawInfo()}) + } + // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Definitions != nil { + info = append(info, yaml.MapItem{"definitions", m.Definitions.ToRawInfo()}) + } + // &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Parameters != nil { + info = append(info, yaml.MapItem{"parameters", m.Parameters.ToRawInfo()}) + } + // &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Responses != nil { + info = append(info, yaml.MapItem{"responses", m.Responses.ToRawInfo()}) + } + // &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Security) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Security { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"security", items}) + } + // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.SecurityDefinitions != nil { + info = append(info, yaml.MapItem{"securityDefinitions", m.SecurityDefinitions.ToRawInfo()}) + } + // &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Tags) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Tags { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"tags", items}) + } + // &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Examples suitable for JSON or YAML export. +func (m *Examples) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. +func (m *ExternalDocs) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Url != "" { + info = append(info, yaml.MapItem{"url", m.Url}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. +func (m *FileSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Title != "" { + info = append(info, yaml.MapItem{"title", m.Title}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Required) != 0 { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.ReadOnly != false { + info = append(info, yaml.MapItem{"readOnly", m.ReadOnly}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Example != nil { + info = append(info, yaml.MapItem{"example", m.Example.ToRawInfo()}) + } + // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. +func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.AllowEmptyValue != false { + info = append(info, yaml.MapItem{"allowEmptyValue", m.AllowEmptyValue}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Header suitable for JSON or YAML export. +func (m *Header) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. +func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Headers suitable for JSON or YAML export. +func (m *Headers) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Info suitable for JSON or YAML export. +func (m *Info) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Title != "" { + info = append(info, yaml.MapItem{"title", m.Title}) + } + if m.Version != "" { + info = append(info, yaml.MapItem{"version", m.Version}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.TermsOfService != "" { + info = append(info, yaml.MapItem{"termsOfService", m.TermsOfService}) + } + if m.Contact != nil { + info = append(info, yaml.MapItem{"contact", m.Contact.ToRawInfo()}) + } + // &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.License != nil { + info = append(info, yaml.MapItem{"license", m.License.ToRawInfo()}) + } + // &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. +func (m *ItemsItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if len(m.Schema) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Schema { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"schema", items}) + } + // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + return info +} + +// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. +func (m *JsonReference) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.XRef != "" { + info = append(info, yaml.MapItem{"$ref", m.XRef}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + return info +} + +// ToRawInfo returns a description of License suitable for JSON or YAML export. +func (m *License) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Url != "" { + info = append(info, yaml.MapItem{"url", m.Url}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. +func (m *NamedAny) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. +func (m *NamedHeader) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. +func (m *NamedParameter) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. +func (m *NamedPathItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. +func (m *NamedResponse) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. +func (m *NamedResponseValue) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. +func (m *NamedSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. +func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. +func (m *NamedString) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Value != "" { + info = append(info, yaml.MapItem{"value", m.Value}) + } + return info +} + +// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. +func (m *NamedStringArray) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. +func (m *NonBodyParameter) ToRawInfo() interface{} { + // ONE OF WRAPPER + // NonBodyParameter + // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetHeaderParameterSubSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFormDataParameterSubSchema() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetQueryParameterSubSchema() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetPathParameterSubSchema() + if v3 != nil { + return v3.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. +func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{"flow", m.Flow}) + } + if m.Scopes != nil { + info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.AuthorizationUrl != "" { + info = append(info, yaml.MapItem{"authorizationUrl", m.AuthorizationUrl}) + } + if m.TokenUrl != "" { + info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. +func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{"flow", m.Flow}) + } + if m.Scopes != nil { + info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.TokenUrl != "" { + info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. +func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{"flow", m.Flow}) + } + if m.Scopes != nil { + info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.AuthorizationUrl != "" { + info = append(info, yaml.MapItem{"authorizationUrl", m.AuthorizationUrl}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. +func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{"flow", m.Flow}) + } + if m.Scopes != nil { + info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.TokenUrl != "" { + info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. +func (m *Oauth2Scopes) ToRawInfo() interface{} { + info := yaml.MapSlice{} + // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Operation suitable for JSON or YAML export. +func (m *Operation) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if len(m.Tags) != 0 { + info = append(info, yaml.MapItem{"tags", m.Tags}) + } + if m.Summary != "" { + info = append(info, yaml.MapItem{"summary", m.Summary}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.OperationId != "" { + info = append(info, yaml.MapItem{"operationId", m.OperationId}) + } + if len(m.Produces) != 0 { + info = append(info, yaml.MapItem{"produces", m.Produces}) + } + if len(m.Consumes) != 0 { + info = append(info, yaml.MapItem{"consumes", m.Consumes}) + } + if len(m.Parameters) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Parameters { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"parameters", items}) + } + // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} + if m.Responses != nil { + info = append(info, yaml.MapItem{"responses", m.Responses.ToRawInfo()}) + } + // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Schemes) != 0 { + info = append(info, yaml.MapItem{"schemes", m.Schemes}) + } + if m.Deprecated != false { + info = append(info, yaml.MapItem{"deprecated", m.Deprecated}) + } + if len(m.Security) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Security { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"security", items}) + } + // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. +func (m *Parameter) ToRawInfo() interface{} { + // ONE OF WRAPPER + // Parameter + // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBodyParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetNonBodyParameter() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. +func (m *ParameterDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. +func (m *ParametersItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // ParametersItem + // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. +func (m *PathItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.XRef != "" { + info = append(info, yaml.MapItem{"$ref", m.XRef}) + } + if m.Get != nil { + info = append(info, yaml.MapItem{"get", m.Get.ToRawInfo()}) + } + // &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Put != nil { + info = append(info, yaml.MapItem{"put", m.Put.ToRawInfo()}) + } + // &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Post != nil { + info = append(info, yaml.MapItem{"post", m.Post.ToRawInfo()}) + } + // &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Delete != nil { + info = append(info, yaml.MapItem{"delete", m.Delete.ToRawInfo()}) + } + // &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Options != nil { + info = append(info, yaml.MapItem{"options", m.Options.ToRawInfo()}) + } + // &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Head != nil { + info = append(info, yaml.MapItem{"head", m.Head.ToRawInfo()}) + } + // &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Patch != nil { + info = append(info, yaml.MapItem{"patch", m.Patch.ToRawInfo()}) + } + // &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Parameters) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Parameters { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"parameters", items}) + } + // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. +func (m *PathParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Paths suitable for JSON or YAML export. +func (m *Paths) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + if m.Path != nil { + for _, item := range m.Path { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. +func (m *PrimitivesItems) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Properties suitable for JSON or YAML export. +func (m *Properties) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. +func (m *QueryParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.AllowEmptyValue != false { + info = append(info, yaml.MapItem{"allowEmptyValue", m.AllowEmptyValue}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Response suitable for JSON or YAML export. +func (m *Response) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Schema != nil { + info = append(info, yaml.MapItem{"schema", m.Schema.ToRawInfo()}) + } + // &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Headers != nil { + info = append(info, yaml.MapItem{"headers", m.Headers.ToRawInfo()}) + } + // &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Examples != nil { + info = append(info, yaml.MapItem{"examples", m.Examples.ToRawInfo()}) + } + // &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. +func (m *ResponseDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. +func (m *ResponseValue) ToRawInfo() interface{} { + // ONE OF WRAPPER + // ResponseValue + // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetResponse() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of Responses suitable for JSON or YAML export. +func (m *Responses) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.ResponseCode != nil { + for _, item := range m.ResponseCode { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Schema suitable for JSON or YAML export. +func (m *Schema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.XRef != "" { + info = append(info, yaml.MapItem{"$ref", m.XRef}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Title != "" { + info = append(info, yaml.MapItem{"title", m.Title}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if m.MaxProperties != 0 { + info = append(info, yaml.MapItem{"maxProperties", m.MaxProperties}) + } + if m.MinProperties != 0 { + info = append(info, yaml.MapItem{"minProperties", m.MinProperties}) + } + if len(m.Required) != 0 { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.AdditionalProperties != nil { + info = append(info, yaml.MapItem{"additionalProperties", m.AdditionalProperties.ToRawInfo()}) + } + // &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Type != nil { + if len(m.Type.Value) == 1 { + info = append(info, yaml.MapItem{"type", m.Type.Value[0]}) + } else { + info = append(info, yaml.MapItem{"type", m.Type.Value}) + } + } + // &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Items != nil { + items := make([]interface{}, 0) + for _, item := range m.Items.Schema { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"items", items[0]}) + } + // &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.AllOf) != 0 { + items := make([]interface{}, 0) + for _, item := range m.AllOf { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"allOf", items}) + } + // &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.Properties != nil { + info = append(info, yaml.MapItem{"properties", m.Properties.ToRawInfo()}) + } + // &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Discriminator != "" { + info = append(info, yaml.MapItem{"discriminator", m.Discriminator}) + } + if m.ReadOnly != false { + info = append(info, yaml.MapItem{"readOnly", m.ReadOnly}) + } + if m.Xml != nil { + info = append(info, yaml.MapItem{"xml", m.Xml.ToRawInfo()}) + } + // &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Example != nil { + info = append(info, yaml.MapItem{"example", m.Example.ToRawInfo()}) + } + // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. +func (m *SchemaItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // SchemaItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFileSchema() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. +func (m *SecurityDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. +func (m *SecurityDefinitionsItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // SecurityDefinitionsItem + // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBasicAuthenticationSecurity() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetApiKeySecurity() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetOauth2ImplicitSecurity() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetOauth2PasswordSecurity() + if v3 != nil { + return v3.ToRawInfo() + } + // {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v4 := m.GetOauth2ApplicationSecurity() + if v4 != nil { + return v4.ToRawInfo() + } + // {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v5 := m.GetOauth2AccessCodeSecurity() + if v5 != nil { + return v5.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. +func (m *SecurityRequirement) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. +func (m *StringArray) ToRawInfo() interface{} { + return m.Value +} + +// ToRawInfo returns a description of Tag suitable for JSON or YAML export. +func (m *Tag) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. +func (m *TypeItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if len(m.Value) != 0 { + info = append(info, yaml.MapItem{"value", m.Value}) + } + return info +} + +// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. +func (m *VendorExtension) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Xml suitable for JSON or YAML export. +func (m *Xml) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Namespace != "" { + info = append(info, yaml.MapItem{"namespace", m.Namespace}) + } + if m.Prefix != "" { + info = append(info, yaml.MapItem{"prefix", m.Prefix}) + } + if m.Attribute != false { + info = append(info, yaml.MapItem{"attribute", m.Attribute}) + } + if m.Wrapped != false { + info = append(info, yaml.MapItem{"wrapped", m.Wrapped}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +var ( + pattern0 = regexp.MustCompile("^x-") + pattern1 = regexp.MustCompile("^/") + pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$") +) diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go new file mode 100644 index 00000000..37da7df2 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go @@ -0,0 +1,4456 @@ +// Code generated by protoc-gen-go. +// source: OpenAPIv2/OpenAPIv2.proto +// DO NOT EDIT! + +/* +Package openapi_v2 is a generated protocol buffer package. + +It is generated from these files: + OpenAPIv2/OpenAPIv2.proto + +It has these top-level messages: + AdditionalPropertiesItem + Any + ApiKeySecurity + BasicAuthenticationSecurity + BodyParameter + Contact + Default + Definitions + Document + Examples + ExternalDocs + FileSchema + FormDataParameterSubSchema + Header + HeaderParameterSubSchema + Headers + Info + ItemsItem + JsonReference + License + NamedAny + NamedHeader + NamedParameter + NamedPathItem + NamedResponse + NamedResponseValue + NamedSchema + NamedSecurityDefinitionsItem + NamedString + NamedStringArray + NonBodyParameter + Oauth2AccessCodeSecurity + Oauth2ApplicationSecurity + Oauth2ImplicitSecurity + Oauth2PasswordSecurity + Oauth2Scopes + Operation + Parameter + ParameterDefinitions + ParametersItem + PathItem + PathParameterSubSchema + Paths + PrimitivesItems + Properties + QueryParameterSubSchema + Response + ResponseDefinitions + ResponseValue + Responses + Schema + SchemaItem + SecurityDefinitions + SecurityDefinitionsItem + SecurityRequirement + StringArray + Tag + TypeItem + VendorExtension + Xml +*/ +package openapi_v2 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type AdditionalPropertiesItem struct { + // Types that are valid to be assigned to Oneof: + // *AdditionalPropertiesItem_Schema + // *AdditionalPropertiesItem_Boolean + Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} } +func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) } +func (*AdditionalPropertiesItem) ProtoMessage() {} +func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isAdditionalPropertiesItem_Oneof interface { + isAdditionalPropertiesItem_Oneof() +} + +type AdditionalPropertiesItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` +} +type AdditionalPropertiesItem_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,oneof"` +} + +func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} +func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} + +func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *AdditionalPropertiesItem) GetSchema() *Schema { + if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Schema); ok { + return x.Schema + } + return nil +} + +func (m *AdditionalPropertiesItem) GetBoolean() bool { + if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return x.Boolean + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AdditionalPropertiesItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AdditionalPropertiesItem_OneofMarshaler, _AdditionalPropertiesItem_OneofUnmarshaler, _AdditionalPropertiesItem_OneofSizer, []interface{}{ + (*AdditionalPropertiesItem_Schema)(nil), + (*AdditionalPropertiesItem_Boolean)(nil), + } +} + +func _AdditionalPropertiesItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AdditionalPropertiesItem) + // oneof + switch x := m.Oneof.(type) { + case *AdditionalPropertiesItem_Schema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Schema); err != nil { + return err + } + case *AdditionalPropertiesItem_Boolean: + t := uint64(0) + if x.Boolean { + t = 1 + } + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("AdditionalPropertiesItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _AdditionalPropertiesItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AdditionalPropertiesItem) + switch tag { + case 1: // oneof.schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Schema) + err := b.DecodeMessage(msg) + m.Oneof = &AdditionalPropertiesItem_Schema{msg} + return true, err + case 2: // oneof.boolean + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Oneof = &AdditionalPropertiesItem_Boolean{x != 0} + return true, err + default: + return false, nil + } +} + +func _AdditionalPropertiesItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AdditionalPropertiesItem) + // oneof + switch x := m.Oneof.(type) { + case *AdditionalPropertiesItem_Schema: + s := proto.Size(x.Schema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AdditionalPropertiesItem_Boolean: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Any struct { + Value *google_protobuf.Any `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` + Yaml string `protobuf:"bytes,2,opt,name=yaml" json:"yaml,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Any) GetValue() *google_protobuf.Any { + if m != nil { + return m.Value + } + return nil +} + +func (m *Any) GetYaml() string { + if m != nil { + return m.Yaml + } + return "" +} + +type ApiKeySecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} } +func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) } +func (*ApiKeySecurity) ProtoMessage() {} +func (*ApiKeySecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ApiKeySecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ApiKeySecurity) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ApiKeySecurity) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *ApiKeySecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type BasicAuthenticationSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} } +func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) } +func (*BasicAuthenticationSecurity) ProtoMessage() {} +func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *BasicAuthenticationSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *BasicAuthenticationSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type BodyParameter struct { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,4,opt,name=required" json:"required,omitempty"` + Schema *Schema `protobuf:"bytes,5,opt,name=schema" json:"schema,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *BodyParameter) Reset() { *m = BodyParameter{} } +func (m *BodyParameter) String() string { return proto.CompactTextString(m) } +func (*BodyParameter) ProtoMessage() {} +func (*BodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *BodyParameter) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *BodyParameter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BodyParameter) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *BodyParameter) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *BodyParameter) GetSchema() *Schema { + if m != nil { + return m.Schema + } + return nil +} + +func (m *BodyParameter) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Contact information for the owners of the API. +type Contact struct { + // The identifying name of the contact person/organization. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The URL pointing to the contact information. + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + // The email address of the contact person/organization. + Email string `protobuf:"bytes,3,opt,name=email" json:"email,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Contact) Reset() { *m = Contact{} } +func (m *Contact) String() string { return proto.CompactTextString(m) } +func (*Contact) ProtoMessage() {} +func (*Contact) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *Contact) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Contact) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *Contact) GetEmail() string { + if m != nil { + return m.Email + } + return "" +} + +func (m *Contact) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Default struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Default) Reset() { *m = Default{} } +func (m *Default) String() string { return proto.CompactTextString(m) } +func (*Default) ProtoMessage() {} +func (*Default) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *Default) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +type Definitions struct { + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Definitions) Reset() { *m = Definitions{} } +func (m *Definitions) String() string { return proto.CompactTextString(m) } +func (*Definitions) ProtoMessage() {} +func (*Definitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *Definitions) GetAdditionalProperties() []*NamedSchema { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Document struct { + // The Swagger version of this document. + Swagger string `protobuf:"bytes,1,opt,name=swagger" json:"swagger,omitempty"` + Info *Info `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + // The host (name or ip) of the API. Example: 'swagger.io' + Host string `protobuf:"bytes,3,opt,name=host" json:"host,omitempty"` + // The base path to the API. Example: '/api'. + BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath" json:"base_path,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,5,rep,name=schemes" json:"schemes,omitempty"` + // A list of MIME types accepted by the API. + Consumes []string `protobuf:"bytes,6,rep,name=consumes" json:"consumes,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,7,rep,name=produces" json:"produces,omitempty"` + Paths *Paths `protobuf:"bytes,8,opt,name=paths" json:"paths,omitempty"` + Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions" json:"definitions,omitempty"` + Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters" json:"parameters,omitempty"` + Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses" json:"responses,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` + SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions" json:"security_definitions,omitempty"` + Tags []*Tag `protobuf:"bytes,14,rep,name=tags" json:"tags,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *Document) GetSwagger() string { + if m != nil { + return m.Swagger + } + return "" +} + +func (m *Document) GetInfo() *Info { + if m != nil { + return m.Info + } + return nil +} + +func (m *Document) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *Document) GetBasePath() string { + if m != nil { + return m.BasePath + } + return "" +} + +func (m *Document) GetSchemes() []string { + if m != nil { + return m.Schemes + } + return nil +} + +func (m *Document) GetConsumes() []string { + if m != nil { + return m.Consumes + } + return nil +} + +func (m *Document) GetProduces() []string { + if m != nil { + return m.Produces + } + return nil +} + +func (m *Document) GetPaths() *Paths { + if m != nil { + return m.Paths + } + return nil +} + +func (m *Document) GetDefinitions() *Definitions { + if m != nil { + return m.Definitions + } + return nil +} + +func (m *Document) GetParameters() *ParameterDefinitions { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *Document) GetResponses() *ResponseDefinitions { + if m != nil { + return m.Responses + } + return nil +} + +func (m *Document) GetSecurity() []*SecurityRequirement { + if m != nil { + return m.Security + } + return nil +} + +func (m *Document) GetSecurityDefinitions() *SecurityDefinitions { + if m != nil { + return m.SecurityDefinitions + } + return nil +} + +func (m *Document) GetTags() []*Tag { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Document) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Document) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Examples struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Examples) Reset() { *m = Examples{} } +func (m *Examples) String() string { return proto.CompactTextString(m) } +func (*Examples) ProtoMessage() {} +func (*Examples) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *Examples) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// information about external documentation +type ExternalDocs struct { + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *ExternalDocs) Reset() { *m = ExternalDocs{} } +func (m *ExternalDocs) String() string { return proto.CompactTextString(m) } +func (*ExternalDocs) ProtoMessage() {} +func (*ExternalDocs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *ExternalDocs) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ExternalDocs) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *ExternalDocs) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type FileSchema struct { + Format string `protobuf:"bytes,1,opt,name=format" json:"format,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + Default *Any `protobuf:"bytes,4,opt,name=default" json:"default,omitempty"` + Required []string `protobuf:"bytes,5,rep,name=required" json:"required,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,9,opt,name=example" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *FileSchema) Reset() { *m = FileSchema{} } +func (m *FileSchema) String() string { return proto.CompactTextString(m) } +func (*FileSchema) ProtoMessage() {} +func (*FileSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *FileSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *FileSchema) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *FileSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *FileSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *FileSchema) GetRequired() []string { + if m != nil { + return m.Required + } + return nil +} + +func (m *FileSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *FileSchema) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +func (m *FileSchema) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *FileSchema) GetExample() *Any { + if m != nil { + return m.Example + } + return nil +} + +func (m *FileSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type FormDataParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} } +func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*FormDataParameterSubSchema) ProtoMessage() {} +func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *FormDataParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *FormDataParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *FormDataParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *FormDataParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *FormDataParameterSubSchema) GetAllowEmptyValue() bool { + if m != nil { + return m.AllowEmptyValue + } + return false +} + +func (m *FormDataParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *FormDataParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *FormDataParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *FormDataParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *FormDataParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *FormDataParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *FormDataParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *FormDataParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *FormDataParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *FormDataParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *FormDataParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Header struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + Description string `protobuf:"bytes,18,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *Header) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Header) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *Header) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *Header) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *Header) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *Header) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *Header) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *Header) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *Header) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *Header) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *Header) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *Header) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *Header) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *Header) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Header) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *Header) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *Header) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *Header) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Header) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type HeaderParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} } +func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*HeaderParameterSubSchema) ProtoMessage() {} +func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *HeaderParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *HeaderParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *HeaderParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *HeaderParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *HeaderParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *HeaderParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *HeaderParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *HeaderParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *HeaderParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *HeaderParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *HeaderParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *HeaderParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *HeaderParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *HeaderParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *HeaderParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Headers struct { + AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Headers) Reset() { *m = Headers{} } +func (m *Headers) String() string { return proto.CompactTextString(m) } +func (*Headers) ProtoMessage() {} +func (*Headers) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *Headers) GetAdditionalProperties() []*NamedHeader { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// General information about the API. +type Info struct { + // A unique and precise title of the API. + Title string `protobuf:"bytes,1,opt,name=title" json:"title,omitempty"` + // A semantic version number of the API. + Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The terms of service for the API. + TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService" json:"terms_of_service,omitempty"` + Contact *Contact `protobuf:"bytes,5,opt,name=contact" json:"contact,omitempty"` + License *License `protobuf:"bytes,6,opt,name=license" json:"license,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Info) Reset() { *m = Info{} } +func (m *Info) String() string { return proto.CompactTextString(m) } +func (*Info) ProtoMessage() {} +func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *Info) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Info) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Info) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Info) GetTermsOfService() string { + if m != nil { + return m.TermsOfService + } + return "" +} + +func (m *Info) GetContact() *Contact { + if m != nil { + return m.Contact + } + return nil +} + +func (m *Info) GetLicense() *License { + if m != nil { + return m.License + } + return nil +} + +func (m *Info) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type ItemsItem struct { + Schema []*Schema `protobuf:"bytes,1,rep,name=schema" json:"schema,omitempty"` +} + +func (m *ItemsItem) Reset() { *m = ItemsItem{} } +func (m *ItemsItem) String() string { return proto.CompactTextString(m) } +func (*ItemsItem) ProtoMessage() {} +func (*ItemsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *ItemsItem) GetSchema() []*Schema { + if m != nil { + return m.Schema + } + return nil +} + +type JsonReference struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` +} + +func (m *JsonReference) Reset() { *m = JsonReference{} } +func (m *JsonReference) String() string { return proto.CompactTextString(m) } +func (*JsonReference) ProtoMessage() {} +func (*JsonReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *JsonReference) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *JsonReference) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +type License struct { + // The name of the license type. It's encouraged to use an OSI compatible license. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The URL pointing to the license. + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *License) Reset() { *m = License{} } +func (m *License) String() string { return proto.CompactTextString(m) } +func (*License) ProtoMessage() {} +func (*License) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *License) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *License) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *License) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +type NamedAny struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedAny) Reset() { *m = NamedAny{} } +func (m *NamedAny) String() string { return proto.CompactTextString(m) } +func (*NamedAny) ProtoMessage() {} +func (*NamedAny) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *NamedAny) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedAny) GetValue() *Any { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +type NamedHeader struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Header `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedHeader) Reset() { *m = NamedHeader{} } +func (m *NamedHeader) String() string { return proto.CompactTextString(m) } +func (*NamedHeader) ProtoMessage() {} +func (*NamedHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *NamedHeader) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedHeader) GetValue() *Header { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +type NamedParameter struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Parameter `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedParameter) Reset() { *m = NamedParameter{} } +func (m *NamedParameter) String() string { return proto.CompactTextString(m) } +func (*NamedParameter) ProtoMessage() {} +func (*NamedParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *NamedParameter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedParameter) GetValue() *Parameter { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +type NamedPathItem struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *PathItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedPathItem) Reset() { *m = NamedPathItem{} } +func (m *NamedPathItem) String() string { return proto.CompactTextString(m) } +func (*NamedPathItem) ProtoMessage() {} +func (*NamedPathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *NamedPathItem) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedPathItem) GetValue() *PathItem { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +type NamedResponse struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Response `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedResponse) Reset() { *m = NamedResponse{} } +func (m *NamedResponse) String() string { return proto.CompactTextString(m) } +func (*NamedResponse) ProtoMessage() {} +func (*NamedResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *NamedResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedResponse) GetValue() *Response { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +type NamedResponseValue struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *ResponseValue `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} } +func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) } +func (*NamedResponseValue) ProtoMessage() {} +func (*NamedResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *NamedResponseValue) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedResponseValue) GetValue() *ResponseValue { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +type NamedSchema struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Schema `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedSchema) Reset() { *m = NamedSchema{} } +func (m *NamedSchema) String() string { return proto.CompactTextString(m) } +func (*NamedSchema) ProtoMessage() {} +func (*NamedSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *NamedSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedSchema) GetValue() *Schema { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +type NamedSecurityDefinitionsItem struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} } +func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*NamedSecurityDefinitionsItem) ProtoMessage() {} +func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *NamedSecurityDefinitionsItem) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +type NamedString struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedString) Reset() { *m = NamedString{} } +func (m *NamedString) String() string { return proto.CompactTextString(m) } +func (*NamedString) ProtoMessage() {} +func (*NamedString) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *NamedString) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedString) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +type NamedStringArray struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *StringArray `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedStringArray) Reset() { *m = NamedStringArray{} } +func (m *NamedStringArray) String() string { return proto.CompactTextString(m) } +func (*NamedStringArray) ProtoMessage() {} +func (*NamedStringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *NamedStringArray) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedStringArray) GetValue() *StringArray { + if m != nil { + return m.Value + } + return nil +} + +type NonBodyParameter struct { + // Types that are valid to be assigned to Oneof: + // *NonBodyParameter_HeaderParameterSubSchema + // *NonBodyParameter_FormDataParameterSubSchema + // *NonBodyParameter_QueryParameterSubSchema + // *NonBodyParameter_PathParameterSubSchema + Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} } +func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) } +func (*NonBodyParameter) ProtoMessage() {} +func (*NonBodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +type isNonBodyParameter_Oneof interface { + isNonBodyParameter_Oneof() +} + +type NonBodyParameter_HeaderParameterSubSchema struct { + HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,oneof"` +} +type NonBodyParameter_FormDataParameterSubSchema struct { + FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,oneof"` +} +type NonBodyParameter_QueryParameterSubSchema struct { + QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,oneof"` +} +type NonBodyParameter_PathParameterSubSchema struct { + PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,oneof"` +} + +func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { + return x.HeaderParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { + return x.FormDataParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { + return x.QueryParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { + return x.PathParameterSubSchema + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NonBodyParameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NonBodyParameter_OneofMarshaler, _NonBodyParameter_OneofUnmarshaler, _NonBodyParameter_OneofSizer, []interface{}{ + (*NonBodyParameter_HeaderParameterSubSchema)(nil), + (*NonBodyParameter_FormDataParameterSubSchema)(nil), + (*NonBodyParameter_QueryParameterSubSchema)(nil), + (*NonBodyParameter_PathParameterSubSchema)(nil), + } +} + +func _NonBodyParameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NonBodyParameter) + // oneof + switch x := m.Oneof.(type) { + case *NonBodyParameter_HeaderParameterSubSchema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HeaderParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_FormDataParameterSubSchema: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FormDataParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_QueryParameterSubSchema: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.QueryParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_PathParameterSubSchema: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PathParameterSubSchema); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("NonBodyParameter.Oneof has unexpected type %T", x) + } + return nil +} + +func _NonBodyParameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NonBodyParameter) + switch tag { + case 1: // oneof.header_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(HeaderParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_HeaderParameterSubSchema{msg} + return true, err + case 2: // oneof.form_data_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FormDataParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_FormDataParameterSubSchema{msg} + return true, err + case 3: // oneof.query_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(QueryParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_QueryParameterSubSchema{msg} + return true, err + case 4: // oneof.path_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PathParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_PathParameterSubSchema{msg} + return true, err + default: + return false, nil + } +} + +func _NonBodyParameter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NonBodyParameter) + // oneof + switch x := m.Oneof.(type) { + case *NonBodyParameter_HeaderParameterSubSchema: + s := proto.Size(x.HeaderParameterSubSchema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_FormDataParameterSubSchema: + s := proto.Size(x.FormDataParameterSubSchema) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_QueryParameterSubSchema: + s := proto.Size(x.QueryParameterSubSchema) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_PathParameterSubSchema: + s := proto.Size(x.PathParameterSubSchema) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Oauth2AccessCodeSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` + TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,6,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} } +func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2AccessCodeSecurity) ProtoMessage() {} +func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *Oauth2AccessCodeSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { + if m != nil { + return m.AuthorizationUrl + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2ApplicationSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} } +func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ApplicationSecurity) ProtoMessage() {} +func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +func (m *Oauth2ApplicationSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2ApplicationSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2ImplicitSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} } +func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ImplicitSecurity) ProtoMessage() {} +func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *Oauth2ImplicitSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { + if m != nil { + return m.AuthorizationUrl + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2PasswordSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} } +func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2PasswordSecurity) ProtoMessage() {} +func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +func (m *Oauth2PasswordSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2PasswordSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2Scopes struct { + AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} } +func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) } +func (*Oauth2Scopes) ProtoMessage() {} +func (*Oauth2Scopes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Operation struct { + Tags []string `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` + // A brief summary of the operation. + Summary string `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` + // A longer description of the operation, GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + // A unique identifier of the operation. + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,6,rep,name=produces" json:"produces,omitempty"` + // A list of MIME types the API can consume. + Consumes []string `protobuf:"bytes,7,rep,name=consumes" json:"consumes,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters" json:"parameters,omitempty"` + Responses *Responses `protobuf:"bytes,9,opt,name=responses" json:"responses,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,10,rep,name=schemes" json:"schemes,omitempty"` + Deprecated bool `protobuf:"varint,11,opt,name=deprecated" json:"deprecated,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +func (m *Operation) GetTags() []string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Operation) GetSummary() string { + if m != nil { + return m.Summary + } + return "" +} + +func (m *Operation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Operation) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Operation) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +func (m *Operation) GetProduces() []string { + if m != nil { + return m.Produces + } + return nil +} + +func (m *Operation) GetConsumes() []string { + if m != nil { + return m.Consumes + } + return nil +} + +func (m *Operation) GetParameters() []*ParametersItem { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *Operation) GetResponses() *Responses { + if m != nil { + return m.Responses + } + return nil +} + +func (m *Operation) GetSchemes() []string { + if m != nil { + return m.Schemes + } + return nil +} + +func (m *Operation) GetDeprecated() bool { + if m != nil { + return m.Deprecated + } + return false +} + +func (m *Operation) GetSecurity() []*SecurityRequirement { + if m != nil { + return m.Security + } + return nil +} + +func (m *Operation) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Parameter struct { + // Types that are valid to be assigned to Oneof: + // *Parameter_BodyParameter + // *Parameter_NonBodyParameter + Oneof isParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (m *Parameter) Reset() { *m = Parameter{} } +func (m *Parameter) String() string { return proto.CompactTextString(m) } +func (*Parameter) ProtoMessage() {} +func (*Parameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +type isParameter_Oneof interface { + isParameter_Oneof() +} + +type Parameter_BodyParameter struct { + BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,oneof"` +} +type Parameter_NonBodyParameter struct { + NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,oneof"` +} + +func (*Parameter_BodyParameter) isParameter_Oneof() {} +func (*Parameter_NonBodyParameter) isParameter_Oneof() {} + +func (m *Parameter) GetOneof() isParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *Parameter) GetBodyParameter() *BodyParameter { + if x, ok := m.GetOneof().(*Parameter_BodyParameter); ok { + return x.BodyParameter + } + return nil +} + +func (m *Parameter) GetNonBodyParameter() *NonBodyParameter { + if x, ok := m.GetOneof().(*Parameter_NonBodyParameter); ok { + return x.NonBodyParameter + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Parameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Parameter_OneofMarshaler, _Parameter_OneofUnmarshaler, _Parameter_OneofSizer, []interface{}{ + (*Parameter_BodyParameter)(nil), + (*Parameter_NonBodyParameter)(nil), + } +} + +func _Parameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Parameter) + // oneof + switch x := m.Oneof.(type) { + case *Parameter_BodyParameter: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BodyParameter); err != nil { + return err + } + case *Parameter_NonBodyParameter: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NonBodyParameter); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Parameter.Oneof has unexpected type %T", x) + } + return nil +} + +func _Parameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Parameter) + switch tag { + case 1: // oneof.body_parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BodyParameter) + err := b.DecodeMessage(msg) + m.Oneof = &Parameter_BodyParameter{msg} + return true, err + case 2: // oneof.non_body_parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NonBodyParameter) + err := b.DecodeMessage(msg) + m.Oneof = &Parameter_NonBodyParameter{msg} + return true, err + default: + return false, nil + } +} + +func _Parameter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Parameter) + // oneof + switch x := m.Oneof.(type) { + case *Parameter_BodyParameter: + s := proto.Size(x.BodyParameter) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Parameter_NonBodyParameter: + s := proto.Size(x.NonBodyParameter) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// One or more JSON representations for parameters +type ParameterDefinitions struct { + AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} } +func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) } +func (*ParameterDefinitions) ProtoMessage() {} +func (*ParameterDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } + +func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type ParametersItem struct { + // Types that are valid to be assigned to Oneof: + // *ParametersItem_Parameter + // *ParametersItem_JsonReference + Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *ParametersItem) Reset() { *m = ParametersItem{} } +func (m *ParametersItem) String() string { return proto.CompactTextString(m) } +func (*ParametersItem) ProtoMessage() {} +func (*ParametersItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + +type isParametersItem_Oneof interface { + isParametersItem_Oneof() +} + +type ParametersItem_Parameter struct { + Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,oneof"` +} +type ParametersItem_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` +} + +func (*ParametersItem_Parameter) isParametersItem_Oneof() {} +func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} + +func (m *ParametersItem) GetOneof() isParametersItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *ParametersItem) GetParameter() *Parameter { + if x, ok := m.GetOneof().(*ParametersItem_Parameter); ok { + return x.Parameter + } + return nil +} + +func (m *ParametersItem) GetJsonReference() *JsonReference { + if x, ok := m.GetOneof().(*ParametersItem_JsonReference); ok { + return x.JsonReference + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ParametersItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ParametersItem_OneofMarshaler, _ParametersItem_OneofUnmarshaler, _ParametersItem_OneofSizer, []interface{}{ + (*ParametersItem_Parameter)(nil), + (*ParametersItem_JsonReference)(nil), + } +} + +func _ParametersItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ParametersItem) + // oneof + switch x := m.Oneof.(type) { + case *ParametersItem_Parameter: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Parameter); err != nil { + return err + } + case *ParametersItem_JsonReference: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JsonReference); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ParametersItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _ParametersItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ParametersItem) + switch tag { + case 1: // oneof.parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Parameter) + err := b.DecodeMessage(msg) + m.Oneof = &ParametersItem_Parameter{msg} + return true, err + case 2: // oneof.json_reference + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JsonReference) + err := b.DecodeMessage(msg) + m.Oneof = &ParametersItem_JsonReference{msg} + return true, err + default: + return false, nil + } +} + +func _ParametersItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ParametersItem) + // oneof + switch x := m.Oneof.(type) { + case *ParametersItem_Parameter: + s := proto.Size(x.Parameter) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ParametersItem_JsonReference: + s := proto.Size(x.JsonReference) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type PathItem struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` + Get *Operation `protobuf:"bytes,2,opt,name=get" json:"get,omitempty"` + Put *Operation `protobuf:"bytes,3,opt,name=put" json:"put,omitempty"` + Post *Operation `protobuf:"bytes,4,opt,name=post" json:"post,omitempty"` + Delete *Operation `protobuf:"bytes,5,opt,name=delete" json:"delete,omitempty"` + Options *Operation `protobuf:"bytes,6,opt,name=options" json:"options,omitempty"` + Head *Operation `protobuf:"bytes,7,opt,name=head" json:"head,omitempty"` + Patch *Operation `protobuf:"bytes,8,opt,name=patch" json:"patch,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters" json:"parameters,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PathItem) Reset() { *m = PathItem{} } +func (m *PathItem) String() string { return proto.CompactTextString(m) } +func (*PathItem) ProtoMessage() {} +func (*PathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } + +func (m *PathItem) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *PathItem) GetGet() *Operation { + if m != nil { + return m.Get + } + return nil +} + +func (m *PathItem) GetPut() *Operation { + if m != nil { + return m.Put + } + return nil +} + +func (m *PathItem) GetPost() *Operation { + if m != nil { + return m.Post + } + return nil +} + +func (m *PathItem) GetDelete() *Operation { + if m != nil { + return m.Delete + } + return nil +} + +func (m *PathItem) GetOptions() *Operation { + if m != nil { + return m.Options + } + return nil +} + +func (m *PathItem) GetHead() *Operation { + if m != nil { + return m.Head + } + return nil +} + +func (m *PathItem) GetPatch() *Operation { + if m != nil { + return m.Patch + } + return nil +} + +func (m *PathItem) GetParameters() []*ParametersItem { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *PathItem) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type PathParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} } +func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*PathParameterSubSchema) ProtoMessage() {} +func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } + +func (m *PathParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *PathParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *PathParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PathParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PathParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PathParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *PathParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *PathParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *PathParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *PathParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *PathParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *PathParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *PathParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *PathParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *PathParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *PathParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *PathParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *PathParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *PathParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *PathParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *PathParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +type Paths struct { + VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path" json:"path,omitempty"` +} + +func (m *Paths) Reset() { *m = Paths{} } +func (m *Paths) String() string { return proto.CompactTextString(m) } +func (*Paths) ProtoMessage() {} +func (*Paths) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } + +func (m *Paths) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +func (m *Paths) GetPath() []*NamedPathItem { + if m != nil { + return m.Path + } + return nil +} + +type PrimitivesItems struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} } +func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) } +func (*PrimitivesItems) ProtoMessage() {} +func (*PrimitivesItems) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } + +func (m *PrimitivesItems) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PrimitivesItems) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *PrimitivesItems) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *PrimitivesItems) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *PrimitivesItems) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *PrimitivesItems) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *PrimitivesItems) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *PrimitivesItems) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *PrimitivesItems) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *PrimitivesItems) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *PrimitivesItems) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *PrimitivesItems) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *PrimitivesItems) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *PrimitivesItems) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *PrimitivesItems) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *PrimitivesItems) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *PrimitivesItems) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *PrimitivesItems) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Properties struct { + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Properties) Reset() { *m = Properties{} } +func (m *Properties) String() string { return proto.CompactTextString(m) } +func (*Properties) ProtoMessage() {} +func (*Properties) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } + +func (m *Properties) GetAdditionalProperties() []*NamedSchema { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type QueryParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} } +func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*QueryParameterSubSchema) ProtoMessage() {} +func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } + +func (m *QueryParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *QueryParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *QueryParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *QueryParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *QueryParameterSubSchema) GetAllowEmptyValue() bool { + if m != nil { + return m.AllowEmptyValue + } + return false +} + +func (m *QueryParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *QueryParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *QueryParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *QueryParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *QueryParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *QueryParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *QueryParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *QueryParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *QueryParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *QueryParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *QueryParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *QueryParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *QueryParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *QueryParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *QueryParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *QueryParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *QueryParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Response struct { + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema" json:"schema,omitempty"` + Headers *Headers `protobuf:"bytes,3,opt,name=headers" json:"headers,omitempty"` + Examples *Examples `protobuf:"bytes,4,opt,name=examples" json:"examples,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } + +func (m *Response) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Response) GetSchema() *SchemaItem { + if m != nil { + return m.Schema + } + return nil +} + +func (m *Response) GetHeaders() *Headers { + if m != nil { + return m.Headers + } + return nil +} + +func (m *Response) GetExamples() *Examples { + if m != nil { + return m.Examples + } + return nil +} + +func (m *Response) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// One or more JSON representations for parameters +type ResponseDefinitions struct { + AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} } +func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) } +func (*ResponseDefinitions) ProtoMessage() {} +func (*ResponseDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } + +func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type ResponseValue struct { + // Types that are valid to be assigned to Oneof: + // *ResponseValue_Response + // *ResponseValue_JsonReference + Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` +} + +func (m *ResponseValue) Reset() { *m = ResponseValue{} } +func (m *ResponseValue) String() string { return proto.CompactTextString(m) } +func (*ResponseValue) ProtoMessage() {} +func (*ResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } + +type isResponseValue_Oneof interface { + isResponseValue_Oneof() +} + +type ResponseValue_Response struct { + Response *Response `protobuf:"bytes,1,opt,name=response,oneof"` +} +type ResponseValue_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` +} + +func (*ResponseValue_Response) isResponseValue_Oneof() {} +func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} + +func (m *ResponseValue) GetOneof() isResponseValue_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *ResponseValue) GetResponse() *Response { + if x, ok := m.GetOneof().(*ResponseValue_Response); ok { + return x.Response + } + return nil +} + +func (m *ResponseValue) GetJsonReference() *JsonReference { + if x, ok := m.GetOneof().(*ResponseValue_JsonReference); ok { + return x.JsonReference + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ResponseValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ResponseValue_OneofMarshaler, _ResponseValue_OneofUnmarshaler, _ResponseValue_OneofSizer, []interface{}{ + (*ResponseValue_Response)(nil), + (*ResponseValue_JsonReference)(nil), + } +} + +func _ResponseValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ResponseValue) + // oneof + switch x := m.Oneof.(type) { + case *ResponseValue_Response: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Response); err != nil { + return err + } + case *ResponseValue_JsonReference: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JsonReference); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ResponseValue.Oneof has unexpected type %T", x) + } + return nil +} + +func _ResponseValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ResponseValue) + switch tag { + case 1: // oneof.response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Response) + err := b.DecodeMessage(msg) + m.Oneof = &ResponseValue_Response{msg} + return true, err + case 2: // oneof.json_reference + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JsonReference) + err := b.DecodeMessage(msg) + m.Oneof = &ResponseValue_JsonReference{msg} + return true, err + default: + return false, nil + } +} + +func _ResponseValue_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ResponseValue) + // oneof + switch x := m.Oneof.(type) { + case *ResponseValue_Response: + s := proto.Size(x.Response) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ResponseValue_JsonReference: + s := proto.Size(x.JsonReference) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Response objects names can either be any valid HTTP status code or 'default'. +type Responses struct { + ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode" json:"response_code,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Responses) Reset() { *m = Responses{} } +func (m *Responses) String() string { return proto.CompactTextString(m) } +func (*Responses) ProtoMessage() {} +func (*Responses) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } + +func (m *Responses) GetResponseCode() []*NamedResponseValue { + if m != nil { + return m.ResponseCode + } + return nil +} + +func (m *Responses) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type Schema struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Title string `protobuf:"bytes,3,opt,name=title" json:"title,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + Maximum float64 `protobuf:"fixed64,7,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,9,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,13,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties" json:"max_properties,omitempty"` + MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,19,rep,name=required" json:"required,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + Type *TypeItem `protobuf:"bytes,22,opt,name=type" json:"type,omitempty"` + Items *ItemsItem `protobuf:"bytes,23,opt,name=items" json:"items,omitempty"` + AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf" json:"all_of,omitempty"` + Properties *Properties `protobuf:"bytes,25,opt,name=properties" json:"properties,omitempty"` + Discriminator string `protobuf:"bytes,26,opt,name=discriminator" json:"discriminator,omitempty"` + ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` + Xml *Xml `protobuf:"bytes,28,opt,name=xml" json:"xml,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,30,opt,name=example" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Schema) Reset() { *m = Schema{} } +func (m *Schema) String() string { return proto.CompactTextString(m) } +func (*Schema) ProtoMessage() {} +func (*Schema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } + +func (m *Schema) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *Schema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *Schema) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Schema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Schema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *Schema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *Schema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *Schema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *Schema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *Schema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *Schema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *Schema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *Schema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *Schema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *Schema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Schema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *Schema) GetMaxProperties() int64 { + if m != nil { + return m.MaxProperties + } + return 0 +} + +func (m *Schema) GetMinProperties() int64 { + if m != nil { + return m.MinProperties + } + return 0 +} + +func (m *Schema) GetRequired() []string { + if m != nil { + return m.Required + } + return nil +} + +func (m *Schema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +func (m *Schema) GetType() *TypeItem { + if m != nil { + return m.Type + } + return nil +} + +func (m *Schema) GetItems() *ItemsItem { + if m != nil { + return m.Items + } + return nil +} + +func (m *Schema) GetAllOf() []*Schema { + if m != nil { + return m.AllOf + } + return nil +} + +func (m *Schema) GetProperties() *Properties { + if m != nil { + return m.Properties + } + return nil +} + +func (m *Schema) GetDiscriminator() string { + if m != nil { + return m.Discriminator + } + return "" +} + +func (m *Schema) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +func (m *Schema) GetXml() *Xml { + if m != nil { + return m.Xml + } + return nil +} + +func (m *Schema) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Schema) GetExample() *Any { + if m != nil { + return m.Example + } + return nil +} + +func (m *Schema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type SchemaItem struct { + // Types that are valid to be assigned to Oneof: + // *SchemaItem_Schema + // *SchemaItem_FileSchema + Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *SchemaItem) Reset() { *m = SchemaItem{} } +func (m *SchemaItem) String() string { return proto.CompactTextString(m) } +func (*SchemaItem) ProtoMessage() {} +func (*SchemaItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } + +type isSchemaItem_Oneof interface { + isSchemaItem_Oneof() +} + +type SchemaItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` +} +type SchemaItem_FileSchema struct { + FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,oneof"` +} + +func (*SchemaItem_Schema) isSchemaItem_Oneof() {} +func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} + +func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *SchemaItem) GetSchema() *Schema { + if x, ok := m.GetOneof().(*SchemaItem_Schema); ok { + return x.Schema + } + return nil +} + +func (m *SchemaItem) GetFileSchema() *FileSchema { + if x, ok := m.GetOneof().(*SchemaItem_FileSchema); ok { + return x.FileSchema + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SchemaItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SchemaItem_OneofMarshaler, _SchemaItem_OneofUnmarshaler, _SchemaItem_OneofSizer, []interface{}{ + (*SchemaItem_Schema)(nil), + (*SchemaItem_FileSchema)(nil), + } +} + +func _SchemaItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SchemaItem) + // oneof + switch x := m.Oneof.(type) { + case *SchemaItem_Schema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Schema); err != nil { + return err + } + case *SchemaItem_FileSchema: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FileSchema); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SchemaItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _SchemaItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SchemaItem) + switch tag { + case 1: // oneof.schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Schema) + err := b.DecodeMessage(msg) + m.Oneof = &SchemaItem_Schema{msg} + return true, err + case 2: // oneof.file_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileSchema) + err := b.DecodeMessage(msg) + m.Oneof = &SchemaItem_FileSchema{msg} + return true, err + default: + return false, nil + } +} + +func _SchemaItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SchemaItem) + // oneof + switch x := m.Oneof.(type) { + case *SchemaItem_Schema: + s := proto.Size(x.Schema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SchemaItem_FileSchema: + s := proto.Size(x.FileSchema) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type SecurityDefinitions struct { + AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} } +func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitions) ProtoMessage() {} +func (*SecurityDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } + +func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type SecurityDefinitionsItem struct { + // Types that are valid to be assigned to Oneof: + // *SecurityDefinitionsItem_BasicAuthenticationSecurity + // *SecurityDefinitionsItem_ApiKeySecurity + // *SecurityDefinitionsItem_Oauth2ImplicitSecurity + // *SecurityDefinitionsItem_Oauth2PasswordSecurity + // *SecurityDefinitionsItem_Oauth2ApplicationSecurity + // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity + Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} } +func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitionsItem) ProtoMessage() {} +func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } + +type isSecurityDefinitionsItem_Oneof interface { + isSecurityDefinitionsItem_Oneof() +} + +type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { + BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,oneof"` +} +type SecurityDefinitionsItem_ApiKeySecurity struct { + ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { + Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { + Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { + Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { + Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,oneof"` +} + +func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { + return x.BasicAuthenticationSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { + return x.ApiKeySecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { + return x.Oauth2ImplicitSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { + return x.Oauth2PasswordSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { + return x.Oauth2ApplicationSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { + return x.Oauth2AccessCodeSecurity + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SecurityDefinitionsItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SecurityDefinitionsItem_OneofMarshaler, _SecurityDefinitionsItem_OneofUnmarshaler, _SecurityDefinitionsItem_OneofSizer, []interface{}{ + (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), + (*SecurityDefinitionsItem_ApiKeySecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), + } +} + +func _SecurityDefinitionsItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SecurityDefinitionsItem) + // oneof + switch x := m.Oneof.(type) { + case *SecurityDefinitionsItem_BasicAuthenticationSecurity: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BasicAuthenticationSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_ApiKeySecurity: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ApiKeySecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2ImplicitSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2PasswordSecurity: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2PasswordSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2ApplicationSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2AccessCodeSecurity); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SecurityDefinitionsItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _SecurityDefinitionsItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SecurityDefinitionsItem) + switch tag { + case 1: // oneof.basic_authentication_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BasicAuthenticationSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{msg} + return true, err + case 2: // oneof.api_key_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ApiKeySecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{msg} + return true, err + case 3: // oneof.oauth2_implicit_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2ImplicitSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{msg} + return true, err + case 4: // oneof.oauth2_password_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2PasswordSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{msg} + return true, err + case 5: // oneof.oauth2_application_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2ApplicationSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{msg} + return true, err + case 6: // oneof.oauth2_access_code_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2AccessCodeSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{msg} + return true, err + default: + return false, nil + } +} + +func _SecurityDefinitionsItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SecurityDefinitionsItem) + // oneof + switch x := m.Oneof.(type) { + case *SecurityDefinitionsItem_BasicAuthenticationSecurity: + s := proto.Size(x.BasicAuthenticationSecurity) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_ApiKeySecurity: + s := proto.Size(x.ApiKeySecurity) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: + s := proto.Size(x.Oauth2ImplicitSecurity) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2PasswordSecurity: + s := proto.Size(x.Oauth2PasswordSecurity) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: + s := proto.Size(x.Oauth2ApplicationSecurity) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: + s := proto.Size(x.Oauth2AccessCodeSecurity) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type SecurityRequirement struct { + AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} } +func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) } +func (*SecurityRequirement) ProtoMessage() {} +func (*SecurityRequirement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } + +func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type StringArray struct { + Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` +} + +func (m *StringArray) Reset() { *m = StringArray{} } +func (m *StringArray) String() string { return proto.CompactTextString(m) } +func (*StringArray) ProtoMessage() {} +func (*StringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } + +func (m *StringArray) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +type Tag struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Tag) Reset() { *m = Tag{} } +func (m *Tag) String() string { return proto.CompactTextString(m) } +func (*Tag) ProtoMessage() {} +func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } + +func (m *Tag) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Tag) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Tag) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Tag) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type TypeItem struct { + Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` +} + +func (m *TypeItem) Reset() { *m = TypeItem{} } +func (m *TypeItem) String() string { return proto.CompactTextString(m) } +func (*TypeItem) ProtoMessage() {} +func (*TypeItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } + +func (m *TypeItem) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +// Any property starting with x- is valid. +type VendorExtension struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *VendorExtension) Reset() { *m = VendorExtension{} } +func (m *VendorExtension) String() string { return proto.CompactTextString(m) } +func (*VendorExtension) ProtoMessage() {} +func (*VendorExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } + +func (m *VendorExtension) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Xml struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix" json:"prefix,omitempty"` + Attribute bool `protobuf:"varint,4,opt,name=attribute" json:"attribute,omitempty"` + Wrapped bool `protobuf:"varint,5,opt,name=wrapped" json:"wrapped,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Xml) Reset() { *m = Xml{} } +func (m *Xml) String() string { return proto.CompactTextString(m) } +func (*Xml) ProtoMessage() {} +func (*Xml) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } + +func (m *Xml) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Xml) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *Xml) GetPrefix() string { + if m != nil { + return m.Prefix + } + return "" +} + +func (m *Xml) GetAttribute() bool { + if m != nil { + return m.Attribute + } + return false +} + +func (m *Xml) GetWrapped() bool { + if m != nil { + return m.Wrapped + } + return false +} + +func (m *Xml) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +func init() { + proto.RegisterType((*AdditionalPropertiesItem)(nil), "openapi.v2.AdditionalPropertiesItem") + proto.RegisterType((*Any)(nil), "openapi.v2.Any") + proto.RegisterType((*ApiKeySecurity)(nil), "openapi.v2.ApiKeySecurity") + proto.RegisterType((*BasicAuthenticationSecurity)(nil), "openapi.v2.BasicAuthenticationSecurity") + proto.RegisterType((*BodyParameter)(nil), "openapi.v2.BodyParameter") + proto.RegisterType((*Contact)(nil), "openapi.v2.Contact") + proto.RegisterType((*Default)(nil), "openapi.v2.Default") + proto.RegisterType((*Definitions)(nil), "openapi.v2.Definitions") + proto.RegisterType((*Document)(nil), "openapi.v2.Document") + proto.RegisterType((*Examples)(nil), "openapi.v2.Examples") + proto.RegisterType((*ExternalDocs)(nil), "openapi.v2.ExternalDocs") + proto.RegisterType((*FileSchema)(nil), "openapi.v2.FileSchema") + proto.RegisterType((*FormDataParameterSubSchema)(nil), "openapi.v2.FormDataParameterSubSchema") + proto.RegisterType((*Header)(nil), "openapi.v2.Header") + proto.RegisterType((*HeaderParameterSubSchema)(nil), "openapi.v2.HeaderParameterSubSchema") + proto.RegisterType((*Headers)(nil), "openapi.v2.Headers") + proto.RegisterType((*Info)(nil), "openapi.v2.Info") + proto.RegisterType((*ItemsItem)(nil), "openapi.v2.ItemsItem") + proto.RegisterType((*JsonReference)(nil), "openapi.v2.JsonReference") + proto.RegisterType((*License)(nil), "openapi.v2.License") + proto.RegisterType((*NamedAny)(nil), "openapi.v2.NamedAny") + proto.RegisterType((*NamedHeader)(nil), "openapi.v2.NamedHeader") + proto.RegisterType((*NamedParameter)(nil), "openapi.v2.NamedParameter") + proto.RegisterType((*NamedPathItem)(nil), "openapi.v2.NamedPathItem") + proto.RegisterType((*NamedResponse)(nil), "openapi.v2.NamedResponse") + proto.RegisterType((*NamedResponseValue)(nil), "openapi.v2.NamedResponseValue") + proto.RegisterType((*NamedSchema)(nil), "openapi.v2.NamedSchema") + proto.RegisterType((*NamedSecurityDefinitionsItem)(nil), "openapi.v2.NamedSecurityDefinitionsItem") + proto.RegisterType((*NamedString)(nil), "openapi.v2.NamedString") + proto.RegisterType((*NamedStringArray)(nil), "openapi.v2.NamedStringArray") + proto.RegisterType((*NonBodyParameter)(nil), "openapi.v2.NonBodyParameter") + proto.RegisterType((*Oauth2AccessCodeSecurity)(nil), "openapi.v2.Oauth2AccessCodeSecurity") + proto.RegisterType((*Oauth2ApplicationSecurity)(nil), "openapi.v2.Oauth2ApplicationSecurity") + proto.RegisterType((*Oauth2ImplicitSecurity)(nil), "openapi.v2.Oauth2ImplicitSecurity") + proto.RegisterType((*Oauth2PasswordSecurity)(nil), "openapi.v2.Oauth2PasswordSecurity") + proto.RegisterType((*Oauth2Scopes)(nil), "openapi.v2.Oauth2Scopes") + proto.RegisterType((*Operation)(nil), "openapi.v2.Operation") + proto.RegisterType((*Parameter)(nil), "openapi.v2.Parameter") + proto.RegisterType((*ParameterDefinitions)(nil), "openapi.v2.ParameterDefinitions") + proto.RegisterType((*ParametersItem)(nil), "openapi.v2.ParametersItem") + proto.RegisterType((*PathItem)(nil), "openapi.v2.PathItem") + proto.RegisterType((*PathParameterSubSchema)(nil), "openapi.v2.PathParameterSubSchema") + proto.RegisterType((*Paths)(nil), "openapi.v2.Paths") + proto.RegisterType((*PrimitivesItems)(nil), "openapi.v2.PrimitivesItems") + proto.RegisterType((*Properties)(nil), "openapi.v2.Properties") + proto.RegisterType((*QueryParameterSubSchema)(nil), "openapi.v2.QueryParameterSubSchema") + proto.RegisterType((*Response)(nil), "openapi.v2.Response") + proto.RegisterType((*ResponseDefinitions)(nil), "openapi.v2.ResponseDefinitions") + proto.RegisterType((*ResponseValue)(nil), "openapi.v2.ResponseValue") + proto.RegisterType((*Responses)(nil), "openapi.v2.Responses") + proto.RegisterType((*Schema)(nil), "openapi.v2.Schema") + proto.RegisterType((*SchemaItem)(nil), "openapi.v2.SchemaItem") + proto.RegisterType((*SecurityDefinitions)(nil), "openapi.v2.SecurityDefinitions") + proto.RegisterType((*SecurityDefinitionsItem)(nil), "openapi.v2.SecurityDefinitionsItem") + proto.RegisterType((*SecurityRequirement)(nil), "openapi.v2.SecurityRequirement") + proto.RegisterType((*StringArray)(nil), "openapi.v2.StringArray") + proto.RegisterType((*Tag)(nil), "openapi.v2.Tag") + proto.RegisterType((*TypeItem)(nil), "openapi.v2.TypeItem") + proto.RegisterType((*VendorExtension)(nil), "openapi.v2.VendorExtension") + proto.RegisterType((*Xml)(nil), "openapi.v2.Xml") +} + +func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 3129 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, + 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c, + 0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb, + 0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a, + 0x16, 0x2c, 0xa0, 0x8a, 0x35, 0x50, 0x59, 0x53, 0x15, 0x16, 0x14, 0x55, 0x59, 0xb0, 0x62, 0xc5, + 0x1f, 0x60, 0xc7, 0x3f, 0x60, 0x0d, 0x5b, 0xaa, 0x58, 0x51, 0x3c, 0xea, 0xbe, 0xfa, 0x31, 0x7d, + 0x7b, 0x1e, 0x96, 0x0b, 0x28, 0xd0, 0x6a, 0xe6, 0xde, 0x73, 0xee, 0xb9, 0xa7, 0x4f, 0x9f, 0xd7, + 0x3d, 0xe7, 0x36, 0xac, 0xef, 0x79, 0xd8, 0xdd, 0xdd, 0x7f, 0x70, 0xb2, 0x73, 0x2b, 0xfa, 0xb7, + 0xed, 0xf9, 0x24, 0x24, 0x1a, 0x10, 0x0f, 0xbb, 0xc8, 0xb3, 0xb6, 0x4f, 0x76, 0x36, 0xd6, 0x8f, + 0x08, 0x39, 0xb2, 0xf1, 0x2d, 0x06, 0x39, 0x1c, 0x0e, 0x6e, 0x21, 0x77, 0xc4, 0xd1, 0xb6, 0x1c, + 0xd0, 0x77, 0xfb, 0x7d, 0x2b, 0xb4, 0x88, 0x8b, 0xec, 0x7d, 0x9f, 0x78, 0xd8, 0x0f, 0x2d, 0x1c, + 0x3c, 0x08, 0xb1, 0xa3, 0xfd, 0x1f, 0xd4, 0x82, 0xde, 0x31, 0x76, 0x90, 0x5e, 0xbc, 0x52, 0xbc, + 0xb6, 0xb0, 0xa3, 0x6d, 0xc7, 0x34, 0xb7, 0x0f, 0x18, 0xa4, 0x5b, 0x30, 0x04, 0x8e, 0xb6, 0x01, + 0xf5, 0x43, 0x42, 0x6c, 0x8c, 0x5c, 0xbd, 0x74, 0xa5, 0x78, 0xad, 0xd1, 0x2d, 0x18, 0x72, 0xe2, + 0x76, 0x1d, 0xaa, 0xc4, 0xc5, 0x64, 0xb0, 0x75, 0x0f, 0xca, 0xbb, 0xee, 0x48, 0xbb, 0x01, 0xd5, + 0x13, 0x64, 0x0f, 0xb1, 0x20, 0xbc, 0xba, 0xcd, 0x19, 0xdc, 0x96, 0x0c, 0x6e, 0xef, 0xba, 0x23, + 0x83, 0xa3, 0x68, 0x1a, 0x54, 0x46, 0xc8, 0xb1, 0x19, 0xd1, 0xa6, 0xc1, 0xfe, 0x6f, 0x7d, 0x51, + 0x84, 0xf6, 0xae, 0x67, 0xbd, 0x8b, 0x47, 0x07, 0xb8, 0x37, 0xf4, 0xad, 0x70, 0x44, 0xd1, 0xc2, + 0x91, 0xc7, 0x29, 0x36, 0x0d, 0xf6, 0x9f, 0xce, 0xb9, 0xc8, 0xc1, 0x72, 0x29, 0xfd, 0xaf, 0xb5, + 0xa1, 0x64, 0xb9, 0x7a, 0x99, 0xcd, 0x94, 0x2c, 0x57, 0xbb, 0x02, 0x0b, 0x7d, 0x1c, 0xf4, 0x7c, + 0xcb, 0xa3, 0x32, 0xd0, 0x2b, 0x0c, 0x90, 0x9c, 0xd2, 0xbe, 0x06, 0x9d, 0x13, 0xec, 0xf6, 0x89, + 0x6f, 0xe2, 0xd3, 0x10, 0xbb, 0x01, 0x45, 0xab, 0x5e, 0x29, 0x33, 0xbe, 0x13, 0x02, 0x79, 0x0f, + 0x39, 0xb8, 0x4f, 0xf9, 0x5e, 0xe2, 0xd8, 0xf7, 0x24, 0xf2, 0xd6, 0x67, 0x45, 0xd8, 0xbc, 0x8d, + 0x02, 0xab, 0xb7, 0x3b, 0x0c, 0x8f, 0xb1, 0x1b, 0x5a, 0x3d, 0x44, 0x09, 0x4f, 0x64, 0x7d, 0x8c, + 0xad, 0xd2, 0x6c, 0x6c, 0x95, 0xe7, 0x61, 0xeb, 0x0f, 0x45, 0x68, 0xdd, 0x26, 0xfd, 0xd1, 0x3e, + 0xf2, 0x91, 0x83, 0x43, 0xec, 0x8f, 0x6f, 0x5a, 0xcc, 0x6e, 0x3a, 0x8b, 0x44, 0x37, 0xa0, 0xe1, + 0xe3, 0x27, 0x43, 0xcb, 0xc7, 0x7d, 0x26, 0xce, 0x86, 0x11, 0x8d, 0xb5, 0x1b, 0x91, 0x4a, 0x55, + 0xf3, 0x54, 0x2a, 0x52, 0x28, 0xd5, 0x03, 0xd6, 0xe6, 0x79, 0xc0, 0x1f, 0x17, 0xa1, 0x7e, 0x87, + 0xb8, 0x21, 0xea, 0x85, 0x11, 0xe3, 0xc5, 0x04, 0xe3, 0x1d, 0x28, 0x0f, 0x7d, 0xa9, 0x58, 0xf4, + 0xaf, 0xb6, 0x0a, 0x55, 0xec, 0x20, 0xcb, 0x16, 0x4f, 0xc3, 0x07, 0x4a, 0x46, 0x2a, 0xf3, 0x30, + 0xf2, 0x08, 0xea, 0x77, 0xf1, 0x00, 0x0d, 0xed, 0x50, 0x7b, 0x00, 0x17, 0x50, 0x64, 0x6f, 0xa6, + 0x17, 0x19, 0x9c, 0x5e, 0x9c, 0x40, 0x70, 0x15, 0x29, 0x4c, 0x74, 0xeb, 0x3b, 0xb0, 0x70, 0x17, + 0x0f, 0x2c, 0x97, 0x41, 0x02, 0xed, 0xe1, 0x64, 0xca, 0x17, 0x33, 0x94, 0x85, 0xb8, 0xd5, 0xc4, + 0xff, 0x58, 0x85, 0xc6, 0x5d, 0xd2, 0x1b, 0x3a, 0xd8, 0x0d, 0x35, 0x1d, 0xea, 0xc1, 0x53, 0x74, + 0x74, 0x84, 0x7d, 0x21, 0x3f, 0x39, 0xd4, 0x5e, 0x86, 0x8a, 0xe5, 0x0e, 0x08, 0x93, 0xe1, 0xc2, + 0x4e, 0x27, 0xb9, 0xc7, 0x03, 0x77, 0x40, 0x0c, 0x06, 0xa5, 0xc2, 0x3f, 0x26, 0x41, 0x28, 0xa4, + 0xca, 0xfe, 0x6b, 0x9b, 0xd0, 0x3c, 0x44, 0x01, 0x36, 0x3d, 0x14, 0x1e, 0x0b, 0xab, 0x6b, 0xd0, + 0x89, 0x7d, 0x14, 0x1e, 0xb3, 0x0d, 0x29, 0x77, 0x38, 0x60, 0x96, 0x46, 0x37, 0xe4, 0x43, 0xaa, + 0x5c, 0x3d, 0xe2, 0x06, 0x43, 0x0a, 0xaa, 0x31, 0x50, 0x34, 0xa6, 0x30, 0xcf, 0x27, 0xfd, 0x61, + 0x0f, 0x07, 0x7a, 0x9d, 0xc3, 0xe4, 0x58, 0x7b, 0x0d, 0xaa, 0x74, 0xa7, 0x40, 0x6f, 0x30, 0x4e, + 0x97, 0x93, 0x9c, 0xd2, 0x2d, 0x03, 0x83, 0xc3, 0xb5, 0xb7, 0xa9, 0x0d, 0x44, 0x52, 0xd5, 0x9b, + 0x0c, 0x3d, 0x25, 0xbc, 0x84, 0xd0, 0x8d, 0x24, 0xae, 0xf6, 0x75, 0x00, 0x4f, 0xda, 0x52, 0xa0, + 0x03, 0x5b, 0x79, 0x25, 0xbd, 0x91, 0x80, 0x26, 0x49, 0x24, 0xd6, 0x68, 0xef, 0x40, 0xd3, 0xc7, + 0x81, 0x47, 0xdc, 0x00, 0x07, 0xfa, 0x02, 0x23, 0xf0, 0x62, 0x92, 0x80, 0x21, 0x80, 0xc9, 0xf5, + 0xf1, 0x0a, 0xed, 0xab, 0xd0, 0x08, 0x84, 0x53, 0xd1, 0x17, 0xd9, 0x5b, 0x4f, 0xad, 0x96, 0x0e, + 0xc7, 0xe0, 0xd6, 0x48, 0x5f, 0xad, 0x11, 0x2d, 0xd0, 0x0c, 0x58, 0x95, 0xff, 0xcd, 0xa4, 0x04, + 0x5a, 0x59, 0x36, 0x24, 0xa1, 0x24, 0x1b, 0x2b, 0x41, 0x76, 0x52, 0xbb, 0x0a, 0x95, 0x10, 0x1d, + 0x05, 0x7a, 0x9b, 0x31, 0xb3, 0x94, 0xa4, 0xf1, 0x08, 0x1d, 0x19, 0x0c, 0xa8, 0xbd, 0x03, 0x2d, + 0x6a, 0x57, 0x3e, 0x55, 0xdb, 0x3e, 0xe9, 0x05, 0xfa, 0x12, 0xdb, 0x51, 0x4f, 0x62, 0xdf, 0x13, + 0x08, 0x77, 0x49, 0x2f, 0x30, 0x16, 0x71, 0x62, 0xa4, 0xb4, 0xce, 0xce, 0x3c, 0xd6, 0xf9, 0x18, + 0x1a, 0xf7, 0x4e, 0x91, 0xe3, 0xd9, 0x38, 0x78, 0x9e, 0xe6, 0xf9, 0xa3, 0x22, 0x2c, 0x26, 0xd9, + 0x9e, 0xc1, 0xbb, 0x66, 0x1d, 0xd2, 0x99, 0x9d, 0xfc, 0x3f, 0x4a, 0x00, 0xf7, 0x2d, 0x1b, 0x73, + 0x63, 0xd7, 0xd6, 0xa0, 0x36, 0x20, 0xbe, 0x83, 0x42, 0xb1, 0xbd, 0x18, 0x51, 0xc7, 0x17, 0x5a, + 0xa1, 0x2d, 0x1d, 0x3b, 0x1f, 0x8c, 0x73, 0x5c, 0xce, 0x72, 0x7c, 0x1d, 0xea, 0x7d, 0xee, 0xd9, + 0x98, 0x0d, 0x8f, 0xbd, 0x63, 0xca, 0x91, 0x84, 0xa7, 0xc2, 0x02, 0x37, 0xea, 0x38, 0x2c, 0xc8, + 0x08, 0x58, 0x4b, 0x44, 0xc0, 0x4d, 0x6a, 0x0b, 0xa8, 0x6f, 0x12, 0xd7, 0x1e, 0xe9, 0x75, 0x19, + 0x47, 0x50, 0x7f, 0xcf, 0xb5, 0x47, 0x59, 0x9d, 0x69, 0xcc, 0xa5, 0x33, 0xd7, 0xa1, 0x8e, 0xf9, + 0x2b, 0x17, 0x06, 0x9e, 0x65, 0x5b, 0xc0, 0x95, 0x6f, 0x00, 0xe6, 0x79, 0x03, 0x5f, 0xd4, 0x60, + 0xe3, 0x3e, 0xf1, 0x9d, 0xbb, 0x28, 0x44, 0x91, 0x03, 0x38, 0x18, 0x1e, 0x1e, 0xc8, 0xb4, 0x29, + 0x16, 0x4b, 0x71, 0x2c, 0x5a, 0xf2, 0xc8, 0x5a, 0xca, 0xcb, 0x55, 0xca, 0xf9, 0xf1, 0xb9, 0x92, + 0x08, 0x73, 0x37, 0x60, 0x19, 0xd9, 0x36, 0x79, 0x6a, 0x62, 0xc7, 0x0b, 0x47, 0x26, 0x4f, 0xbc, + 0xaa, 0x6c, 0xab, 0x25, 0x06, 0xb8, 0x47, 0xe7, 0x3f, 0x90, 0xc9, 0x56, 0xe6, 0x45, 0xc4, 0x3a, + 0x53, 0x4f, 0xe9, 0xcc, 0xff, 0x43, 0xd5, 0x0a, 0xb1, 0x23, 0x65, 0xbf, 0x99, 0xf2, 0x74, 0xbe, + 0xe5, 0x58, 0xa1, 0x75, 0xc2, 0x33, 0xc9, 0xc0, 0xe0, 0x98, 0xda, 0xeb, 0xb0, 0xdc, 0x23, 0xb6, + 0x8d, 0x7b, 0x94, 0x59, 0x53, 0x50, 0x6d, 0x32, 0xaa, 0x9d, 0x18, 0x70, 0x9f, 0xd3, 0x4f, 0xe8, + 0x16, 0x4c, 0xd1, 0x2d, 0x1d, 0xea, 0x0e, 0x3a, 0xb5, 0x9c, 0xa1, 0xc3, 0xbc, 0x66, 0xd1, 0x90, + 0x43, 0xba, 0x23, 0x3e, 0xed, 0xd9, 0xc3, 0xc0, 0x3a, 0xc1, 0xa6, 0xc4, 0x59, 0x64, 0x0f, 0xdf, + 0x89, 0x00, 0xdf, 0x14, 0xc8, 0x94, 0x8c, 0xe5, 0x32, 0x94, 0x96, 0x20, 0xc3, 0x87, 0x63, 0x64, + 0x04, 0x4e, 0x7b, 0x9c, 0x8c, 0x40, 0x7e, 0x01, 0xc0, 0x41, 0xa7, 0xa6, 0x8d, 0xdd, 0xa3, 0xf0, + 0x98, 0x79, 0xb3, 0xb2, 0xd1, 0x74, 0xd0, 0xe9, 0x43, 0x36, 0xc1, 0xc0, 0x96, 0x2b, 0xc1, 0x1d, + 0x01, 0xb6, 0x5c, 0x01, 0xd6, 0xa1, 0xee, 0xa1, 0x90, 0x2a, 0xab, 0xbe, 0xcc, 0x83, 0xad, 0x18, + 0x52, 0x8b, 0xa0, 0x74, 0xb9, 0xd0, 0x35, 0xb6, 0xae, 0xe1, 0xa0, 0x53, 0x26, 0x61, 0x06, 0xb4, + 0x5c, 0x01, 0x5c, 0x11, 0x40, 0xcb, 0xe5, 0xc0, 0x97, 0x60, 0x71, 0xe8, 0x5a, 0x4f, 0x86, 0x58, + 0xc0, 0x57, 0x19, 0xe7, 0x0b, 0x7c, 0x8e, 0xa3, 0x5c, 0x85, 0x0a, 0x76, 0x87, 0x8e, 0x7e, 0x21, + 0xeb, 0xaa, 0xa9, 0xa8, 0x19, 0x50, 0x7b, 0x11, 0x16, 0x9c, 0xa1, 0x1d, 0x5a, 0x9e, 0x8d, 0x4d, + 0x32, 0xd0, 0xd7, 0x98, 0x90, 0x40, 0x4e, 0xed, 0x0d, 0x94, 0xd6, 0x72, 0x71, 0x2e, 0x6b, 0xa9, + 0x42, 0xad, 0x8b, 0x51, 0x1f, 0xfb, 0xca, 0xb4, 0x38, 0xd6, 0xc5, 0x92, 0x5a, 0x17, 0xcb, 0x67, + 0xd3, 0xc5, 0xca, 0x74, 0x5d, 0xac, 0xce, 0xae, 0x8b, 0xb5, 0x19, 0x74, 0xb1, 0x3e, 0x5d, 0x17, + 0x1b, 0x33, 0xe8, 0x62, 0x73, 0x26, 0x5d, 0x84, 0xc9, 0xba, 0xb8, 0x30, 0x41, 0x17, 0x17, 0x27, + 0xe8, 0x62, 0x6b, 0x92, 0x2e, 0xb6, 0xa7, 0xe8, 0xe2, 0x52, 0xbe, 0x2e, 0x76, 0xe6, 0xd0, 0xc5, + 0xe5, 0x8c, 0x2e, 0x8e, 0x79, 0x4b, 0x6d, 0xb6, 0x23, 0xd4, 0xca, 0x3c, 0xda, 0xfa, 0xb7, 0x2a, + 0xe8, 0x5c, 0x5b, 0xff, 0x2d, 0x9e, 0x5d, 0x5a, 0x48, 0x55, 0x69, 0x21, 0x35, 0xb5, 0x85, 0xd4, + 0xcf, 0x66, 0x21, 0x8d, 0xe9, 0x16, 0xd2, 0x9c, 0xdd, 0x42, 0x60, 0x06, 0x0b, 0x59, 0x98, 0x6e, + 0x21, 0x8b, 0x33, 0x58, 0x48, 0x6b, 0x26, 0x0b, 0x69, 0x4f, 0xb6, 0x90, 0xa5, 0x09, 0x16, 0xd2, + 0x99, 0x60, 0x21, 0xcb, 0x93, 0x2c, 0x44, 0x9b, 0x62, 0x21, 0x2b, 0xf9, 0x16, 0xb2, 0x3a, 0x87, + 0x85, 0x5c, 0x98, 0xc9, 0x5b, 0xaf, 0xcd, 0xa3, 0xff, 0xdf, 0x82, 0x3a, 0x57, 0xff, 0x67, 0x38, + 0x7e, 0xf2, 0x85, 0x39, 0xc9, 0xf3, 0xe7, 0x25, 0xa8, 0xd0, 0x03, 0x64, 0x9c, 0x98, 0x16, 0x93, + 0x89, 0xa9, 0x0e, 0xf5, 0x13, 0xec, 0x07, 0x71, 0x65, 0x44, 0x0e, 0x67, 0x30, 0xa4, 0x6b, 0xd0, + 0x09, 0xb1, 0xef, 0x04, 0x26, 0x19, 0x98, 0x01, 0xf6, 0x4f, 0xac, 0x9e, 0x34, 0xaa, 0x36, 0x9b, + 0xdf, 0x1b, 0x1c, 0xf0, 0x59, 0xed, 0x26, 0xd4, 0x7b, 0xbc, 0x7c, 0x20, 0x9c, 0xfe, 0x4a, 0xf2, + 0x21, 0x44, 0x65, 0xc1, 0x90, 0x38, 0x14, 0xdd, 0xb6, 0x7a, 0xd8, 0x0d, 0x78, 0xfa, 0x34, 0x86, + 0xfe, 0x90, 0x83, 0x0c, 0x89, 0xa3, 0x14, 0x7e, 0x7d, 0x1e, 0xe1, 0xbf, 0x05, 0x4d, 0xa6, 0x0c, + 0xac, 0x56, 0x77, 0x23, 0x51, 0xab, 0x2b, 0x4f, 0x2e, 0xac, 0x6c, 0xdd, 0x85, 0xd6, 0x37, 0x02, + 0xe2, 0x1a, 0x78, 0x80, 0x7d, 0xec, 0xf6, 0xb0, 0xb6, 0x0c, 0x15, 0xd3, 0xc7, 0x03, 0x21, 0xe3, + 0xb2, 0x81, 0x07, 0xd3, 0xeb, 0x4f, 0x5b, 0x1e, 0xd4, 0xc5, 0x33, 0xcd, 0x58, 0x5c, 0x39, 0xf3, + 0x59, 0xe6, 0x1e, 0x34, 0x24, 0x50, 0xb9, 0xe5, 0x2b, 0xb2, 0xaa, 0x58, 0x52, 0x3b, 0x20, 0x0e, + 0xdd, 0x7a, 0x17, 0x16, 0x12, 0x0a, 0xa8, 0xa4, 0x74, 0x2d, 0x4d, 0x29, 0x25, 0x4c, 0xa1, 0xb7, + 0x82, 0xd8, 0xfb, 0xd0, 0x66, 0xc4, 0xe2, 0x22, 0x9a, 0x8a, 0xde, 0xeb, 0x69, 0x7a, 0x17, 0x94, + 0x45, 0x01, 0x49, 0x72, 0x0f, 0x5a, 0x82, 0x64, 0x78, 0xcc, 0xde, 0xad, 0x8a, 0xe2, 0x8d, 0x34, + 0xc5, 0xd5, 0xf1, 0x7a, 0x06, 0x5d, 0x38, 0x4e, 0x50, 0x56, 0x0f, 0xe6, 0x26, 0x28, 0x17, 0x4a, + 0x82, 0x1f, 0x81, 0x96, 0x22, 0x18, 0x9d, 0x1d, 0x32, 0x54, 0x6f, 0xa5, 0xa9, 0xae, 0xab, 0xa8, + 0xb2, 0xd5, 0xe3, 0x2f, 0x47, 0xc4, 0xd0, 0x79, 0x5f, 0x8e, 0xd0, 0x74, 0x41, 0xcc, 0x81, 0x4b, + 0x9c, 0x58, 0xb6, 0x34, 0x91, 0x2b, 0xd8, 0xb7, 0xd3, 0xd4, 0xaf, 0x4e, 0xa9, 0x7b, 0x24, 0xe5, + 0xfc, 0x96, 0xe4, 0x3d, 0xf4, 0x2d, 0xf7, 0x48, 0x49, 0x7d, 0x35, 0x49, 0xbd, 0x29, 0x17, 0x3e, + 0x86, 0x4e, 0x62, 0xe1, 0xae, 0xef, 0x23, 0xb5, 0x82, 0xdf, 0x4c, 0xf3, 0x96, 0xf2, 0xa9, 0x89, + 0xb5, 0x92, 0xec, 0x6f, 0xca, 0xd0, 0x79, 0x8f, 0xb8, 0xe9, 0x1a, 0x2f, 0x86, 0xcd, 0x63, 0xa6, + 0xc1, 0x66, 0x54, 0x77, 0x32, 0x83, 0xe1, 0xa1, 0x99, 0xaa, 0xf4, 0xbf, 0x9c, 0x55, 0xf8, 0x6c, + 0x82, 0xd3, 0x2d, 0x18, 0xfa, 0x71, 0x5e, 0xf2, 0x63, 0xc3, 0x65, 0x9a, 0x30, 0x98, 0x7d, 0x14, + 0x22, 0xf5, 0x4e, 0xfc, 0x19, 0x5e, 0x4d, 0xee, 0x94, 0x7f, 0x4c, 0xee, 0x16, 0x8c, 0x8d, 0x41, + 0xfe, 0x21, 0xfa, 0x10, 0x36, 0x9e, 0x0c, 0xb1, 0x3f, 0x52, 0xef, 0x54, 0xce, 0xbe, 0xc9, 0xf7, + 0x29, 0xb6, 0x72, 0x9b, 0x8b, 0x4f, 0xd4, 0x20, 0xcd, 0x84, 0x75, 0x0f, 0x85, 0xc7, 0xea, 0x2d, + 0x78, 0xf1, 0x63, 0x6b, 0xdc, 0x0a, 0x95, 0x3b, 0xac, 0x79, 0x4a, 0x48, 0xdc, 0x24, 0xf9, 0xbc, + 0x04, 0xfa, 0x1e, 0x1a, 0x86, 0xc7, 0x3b, 0xbb, 0xbd, 0x1e, 0x0e, 0x82, 0x3b, 0xa4, 0x8f, 0xa7, + 0xf5, 0x39, 0x06, 0x36, 0x79, 0x2a, 0xab, 0xf2, 0xf4, 0xbf, 0xf6, 0x06, 0x0d, 0x08, 0xc4, 0xc3, + 0xf2, 0x48, 0x94, 0x2a, 0x8d, 0x70, 0xea, 0x07, 0x0c, 0x6e, 0x08, 0x3c, 0x9a, 0x35, 0xd1, 0x69, + 0xe2, 0x5b, 0xdf, 0x67, 0xfd, 0x09, 0x93, 0xfa, 0x6f, 0x71, 0x20, 0x4a, 0x01, 0x1e, 0xfb, 0x36, + 0x4d, 0x60, 0x42, 0xf2, 0x29, 0xe6, 0x48, 0x3c, 0xff, 0x6c, 0xb0, 0x09, 0x0a, 0x1c, 0x0b, 0x1e, + 0xb5, 0xd9, 0x32, 0xef, 0xb9, 0x82, 0xdf, 0x5f, 0x8a, 0xb0, 0x2e, 0x64, 0xe4, 0x79, 0xf6, 0x2c, + 0x1d, 0x95, 0xe7, 0x23, 0xa4, 0xd4, 0x73, 0x57, 0x26, 0x3f, 0x77, 0x75, 0xb6, 0xe7, 0x9e, 0xab, + 0xa7, 0xf1, 0xc3, 0x12, 0xac, 0x71, 0xc6, 0x1e, 0x38, 0xf4, 0xb9, 0xad, 0xf0, 0x3f, 0x4d, 0x33, + 0xfe, 0x05, 0x42, 0xf8, 0x73, 0x51, 0x0a, 0x61, 0x1f, 0x05, 0xc1, 0x53, 0xe2, 0xf7, 0xff, 0x07, + 0xde, 0xfc, 0xc7, 0xb0, 0x98, 0xe4, 0xeb, 0x19, 0xfa, 0x3d, 0x2c, 0x42, 0xe4, 0x24, 0xdc, 0x3f, + 0xaf, 0x40, 0x73, 0xcf, 0xc3, 0x3e, 0x92, 0x87, 0x4d, 0x56, 0xb7, 0x2f, 0xb2, 0x3a, 0x2d, 0x2f, + 0xd3, 0xeb, 0x50, 0x0f, 0x86, 0x8e, 0x83, 0xfc, 0x91, 0xcc, 0xb9, 0xc5, 0x70, 0x86, 0x9c, 0x3b, + 0x53, 0xae, 0xad, 0xcc, 0x55, 0xae, 0x7d, 0x09, 0x16, 0x89, 0xe4, 0xcd, 0xb4, 0xfa, 0x52, 0xbc, + 0xd1, 0xdc, 0x83, 0x7e, 0xaa, 0xf7, 0x53, 0x1b, 0xeb, 0xfd, 0x24, 0x7b, 0x46, 0xf5, 0xb1, 0x9e, + 0xd1, 0x57, 0x52, 0x3d, 0x9b, 0x06, 0x13, 0xdd, 0x86, 0x32, 0x3d, 0xe3, 0xa1, 0x3e, 0xd9, 0xad, + 0x79, 0x33, 0xd9, 0xad, 0x69, 0x66, 0x33, 0x3b, 0x99, 0xe0, 0xa4, 0x7a, 0x34, 0x89, 0xd6, 0x16, + 0xa4, 0x5b, 0x5b, 0x97, 0x01, 0xfa, 0xd8, 0xf3, 0x71, 0x0f, 0x85, 0xb8, 0x2f, 0x4e, 0xbd, 0x89, + 0x99, 0xb3, 0x75, 0x77, 0x54, 0xea, 0xd7, 0x9a, 0x47, 0xfd, 0x7e, 0x59, 0x84, 0x66, 0x9c, 0x45, + 0xdc, 0x86, 0xf6, 0x21, 0xe9, 0x27, 0xe2, 0xad, 0x48, 0x1c, 0x52, 0x09, 0x5e, 0x2a, 0xf1, 0xe8, + 0x16, 0x8c, 0xd6, 0x61, 0x2a, 0x13, 0x79, 0x08, 0x9a, 0x4b, 0x5c, 0x73, 0x8c, 0x0e, 0x4f, 0x0b, + 0x2e, 0xa5, 0x98, 0x1a, 0xcb, 0x61, 0xba, 0x05, 0xa3, 0xe3, 0x8e, 0xcd, 0xc5, 0xd1, 0xf3, 0x08, + 0x56, 0x55, 0x7d, 0x36, 0x6d, 0x6f, 0xb2, 0xbd, 0x6c, 0x64, 0xc4, 0x10, 0x27, 0xe6, 0x6a, 0x93, + 0xf9, 0xac, 0x08, 0xed, 0xb4, 0x76, 0x68, 0x5f, 0x82, 0xe6, 0xb8, 0x44, 0xd4, 0xb9, 0x7e, 0xb7, + 0x60, 0xc4, 0x98, 0x54, 0x9a, 0x9f, 0x04, 0xc4, 0xa5, 0x67, 0x30, 0x7e, 0x22, 0x53, 0xa5, 0xcb, + 0xa9, 0x23, 0x1b, 0x95, 0xe6, 0x27, 0xc9, 0x89, 0xf8, 0xf9, 0x7f, 0x5f, 0x86, 0x46, 0x74, 0x74, + 0x50, 0x9c, 0xec, 0x5e, 0x83, 0xf2, 0x11, 0x0e, 0x55, 0x27, 0x91, 0xc8, 0xfe, 0x0d, 0x8a, 0x41, + 0x11, 0xbd, 0x61, 0x28, 0xfc, 0x63, 0x1e, 0xa2, 0x37, 0x0c, 0xb5, 0xeb, 0x50, 0xf1, 0x48, 0x20, + 0x3b, 0x40, 0x39, 0x98, 0x0c, 0x45, 0xbb, 0x09, 0xb5, 0x3e, 0xb6, 0x71, 0x88, 0xc5, 0x89, 0x3a, + 0x07, 0x59, 0x20, 0x69, 0xb7, 0xa0, 0x4e, 0x3c, 0xde, 0x86, 0xac, 0x4d, 0xc2, 0x97, 0x58, 0x94, + 0x15, 0x9a, 0x92, 0x8a, 0x22, 0x57, 0x1e, 0x2b, 0x14, 0x85, 0x9e, 0xc9, 0x3c, 0x14, 0xf6, 0x8e, + 0x45, 0xfb, 0x22, 0x07, 0x97, 0xe3, 0x8c, 0xb9, 0x89, 0xe6, 0x5c, 0x6e, 0xe2, 0xcc, 0x1d, 0xa4, + 0xbf, 0x56, 0x61, 0x4d, 0x9d, 0x4d, 0x9e, 0xd7, 0x18, 0xcf, 0x6b, 0x8c, 0xff, 0xed, 0x35, 0xc6, + 0xa7, 0x50, 0x65, 0x17, 0x34, 0x94, 0x94, 0x8a, 0x73, 0x50, 0xd2, 0x6e, 0x42, 0x85, 0xdd, 0x36, + 0x29, 0xb1, 0x45, 0xeb, 0x0a, 0x87, 0x2f, 0xea, 0x26, 0x0c, 0x6d, 0xeb, 0x67, 0x55, 0x58, 0x1a, + 0xd3, 0xda, 0xf3, 0x9e, 0xd4, 0x79, 0x4f, 0xea, 0x4c, 0x3d, 0x29, 0x95, 0x0e, 0x6b, 0xf3, 0x58, + 0xc3, 0xb7, 0x01, 0xe2, 0x14, 0xe4, 0x39, 0xdf, 0xf9, 0xfa, 0x55, 0x0d, 0x2e, 0xe6, 0x14, 0x46, + 0xce, 0xaf, 0x29, 0x9c, 0x5f, 0x53, 0x38, 0xbf, 0xa6, 0x10, 0x9b, 0xe1, 0xdf, 0x8b, 0xd0, 0x88, + 0xca, 0xe9, 0xd3, 0x2f, 0x76, 0x6d, 0x47, 0xdd, 0x19, 0x9e, 0x76, 0xaf, 0x65, 0x6b, 0xd6, 0x2c, + 0xf0, 0xc8, 0xab, 0xaf, 0x37, 0xa1, 0xce, 0x2b, 0xab, 0x32, 0x78, 0xac, 0x64, 0x0b, 0xb2, 0x81, + 0x21, 0x71, 0xb4, 0x37, 0xa0, 0x21, 0xae, 0x2b, 0xc9, 0x93, 0xf5, 0x6a, 0xfa, 0x64, 0xcd, 0x61, + 0x46, 0x84, 0x75, 0xf6, 0x3b, 0xcd, 0x18, 0x56, 0x14, 0x97, 0x11, 0xb5, 0xf7, 0x26, 0x3b, 0xa4, + 0x6c, 0xcc, 0x8d, 0x5a, 0x0b, 0x6a, 0x97, 0xf4, 0x93, 0x22, 0xb4, 0xd2, 0x5d, 0x86, 0x1d, 0xea, + 0x88, 0xf8, 0x44, 0x74, 0x7b, 0x5c, 0x71, 0xe6, 0xee, 0x16, 0x8c, 0x08, 0xef, 0xf9, 0x9e, 0xaf, + 0x7e, 0x5a, 0x84, 0x66, 0x74, 0xb2, 0xd7, 0xee, 0x40, 0x4b, 0x6e, 0x63, 0xf6, 0x48, 0x1f, 0x8b, + 0x07, 0xbd, 0x9c, 0xfb, 0xa0, 0xbc, 0xdb, 0xb1, 0x28, 0x17, 0xdd, 0x21, 0x7d, 0x75, 0x2b, 0xb0, + 0x34, 0xcf, 0xdb, 0xf8, 0x75, 0x13, 0x6a, 0xc2, 0x51, 0x2b, 0x4e, 0x7c, 0x79, 0x09, 0x4a, 0xd4, + 0x5b, 0x2d, 0x4f, 0xb8, 0xf4, 0x57, 0x99, 0x78, 0xe9, 0x6f, 0x5a, 0xe2, 0x31, 0x66, 0x89, 0xb5, + 0x8c, 0x25, 0x26, 0x5c, 0x62, 0x7d, 0x06, 0x97, 0xd8, 0x98, 0xee, 0x12, 0x9b, 0x33, 0xb8, 0x44, + 0x98, 0xc9, 0x25, 0x2e, 0x4c, 0x76, 0x89, 0x8b, 0x13, 0x5c, 0x62, 0x6b, 0x82, 0x4b, 0x6c, 0x4f, + 0x72, 0x89, 0x4b, 0x53, 0x5c, 0x62, 0x27, 0xeb, 0x12, 0x5f, 0x81, 0x36, 0x25, 0x9e, 0x30, 0x36, + 0x7e, 0x12, 0x68, 0x39, 0xe8, 0x34, 0x91, 0x2b, 0x50, 0x34, 0xcb, 0x4d, 0xa2, 0x69, 0x02, 0xcd, + 0x72, 0x13, 0x68, 0xc9, 0x40, 0xbf, 0x32, 0x76, 0x4d, 0x73, 0xa6, 0x13, 0xc1, 0x47, 0x79, 0x2e, + 0xe0, 0x42, 0xb6, 0xb5, 0x94, 0xf7, 0xe9, 0x89, 0xda, 0x1b, 0x68, 0xd7, 0x44, 0xd8, 0x5f, 0xcb, + 0xda, 0xfd, 0xa3, 0x91, 0x87, 0x79, 0xee, 0xce, 0x92, 0x81, 0xd7, 0x65, 0xd0, 0xbf, 0x98, 0x3d, + 0xdc, 0x47, 0x4d, 0x73, 0x19, 0xee, 0xaf, 0x43, 0x0d, 0xd9, 0x36, 0xd5, 0x4f, 0x3d, 0xb7, 0x77, + 0x5e, 0x45, 0xb6, 0xbd, 0x37, 0xd0, 0xbe, 0x0c, 0x90, 0x78, 0xa2, 0xf5, 0xac, 0x33, 0x8f, 0xb9, + 0x35, 0x12, 0x98, 0xda, 0xcb, 0xd0, 0xea, 0x5b, 0xd4, 0x82, 0x1c, 0xcb, 0x45, 0x21, 0xf1, 0xf5, + 0x0d, 0xa6, 0x20, 0xe9, 0xc9, 0xf4, 0x95, 0xd7, 0xcd, 0xb1, 0x2b, 0xaf, 0x2f, 0x41, 0xf9, 0xd4, + 0xb1, 0xf5, 0x4b, 0x59, 0x8b, 0xfb, 0xd0, 0xb1, 0x0d, 0x0a, 0xcb, 0x96, 0x59, 0x5f, 0x78, 0xd6, + 0x5b, 0xb1, 0x97, 0x9f, 0xe1, 0x56, 0xec, 0x8b, 0xf3, 0x78, 0xac, 0x1f, 0x00, 0xc4, 0x71, 0x6f, + 0xce, 0x2f, 0x8d, 0xde, 0x86, 0x85, 0x81, 0x65, 0x63, 0x33, 0x3f, 0xa4, 0xc6, 0x37, 0x9e, 0xbb, + 0x05, 0x03, 0x06, 0xd1, 0x28, 0xf6, 0xe2, 0x21, 0xac, 0x28, 0xba, 0xb9, 0xda, 0x77, 0x27, 0xc7, + 0xaf, 0x6b, 0xd9, 0x84, 0x3a, 0xa7, 0x25, 0xac, 0x0e, 0x67, 0x7f, 0xaa, 0xc0, 0xc5, 0xbc, 0x66, + 0xb4, 0x03, 0x2f, 0x1c, 0xa2, 0xc0, 0xea, 0x99, 0x28, 0xf5, 0x95, 0x90, 0x19, 0xd5, 0x7c, 0xb9, + 0x68, 0x5e, 0x4b, 0x55, 0x58, 0xf3, 0xbf, 0x2a, 0xea, 0x16, 0x8c, 0xcd, 0xc3, 0x09, 0x1f, 0x1d, + 0xdd, 0x87, 0x0e, 0xf2, 0x2c, 0xf3, 0x53, 0x3c, 0x8a, 0x77, 0xe0, 0x92, 0x4c, 0xd5, 0xb5, 0xd2, + 0x5f, 0x59, 0x75, 0x0b, 0x46, 0x1b, 0xa5, 0xbf, 0xbb, 0xfa, 0x1e, 0xe8, 0x84, 0xb5, 0x25, 0x4c, + 0x4b, 0x34, 0xa4, 0x62, 0x7a, 0xe5, 0x6c, 0x57, 0x54, 0xdd, 0xbb, 0xea, 0x16, 0x8c, 0x35, 0xa2, + 0xee, 0x6a, 0xc5, 0xf4, 0x3d, 0xd1, 0xeb, 0x89, 0xe9, 0x57, 0xf2, 0xe8, 0x8f, 0xb7, 0x85, 0x62, + 0xfa, 0x99, 0x86, 0xd1, 0x11, 0x6c, 0x0a, 0xfa, 0x28, 0x6e, 0x24, 0xc6, 0x5b, 0xf0, 0x00, 0xf7, + 0x4a, 0x76, 0x0b, 0x45, 0xdb, 0xb1, 0x5b, 0x30, 0xd6, 0x49, 0x6e, 0x4f, 0x12, 0xc7, 0x1b, 0xb1, + 0xae, 0x2e, 0x4b, 0x17, 0xe2, 0x8d, 0x6a, 0x59, 0xef, 0x98, 0xd7, 0x03, 0xee, 0x16, 0x0c, 0x21, + 0x93, 0x2c, 0x2c, 0xd6, 0xf0, 0xe3, 0x58, 0xc3, 0x13, 0x2d, 0x01, 0xed, 0xfd, 0xc9, 0x1a, 0x7e, + 0x29, 0xa7, 0x6d, 0xc4, 0x2f, 0x16, 0xa8, 0xb5, 0xfa, 0x2a, 0x2c, 0x24, 0x6f, 0x2e, 0xac, 0xc6, + 0x1f, 0xf7, 0x95, 0xe3, 0x3b, 0x0e, 0xbf, 0x2d, 0x42, 0xf9, 0x11, 0x52, 0xdf, 0x8a, 0x98, 0xfe, + 0xb1, 0x5b, 0xc6, 0xb3, 0x95, 0xcf, 0xfc, 0x8d, 0xc8, 0x5c, 0x5f, 0x70, 0x5d, 0x81, 0x86, 0x8c, + 0x30, 0x39, 0xcf, 0xf7, 0x31, 0x2c, 0x7d, 0x30, 0x56, 0x6f, 0x7a, 0x8e, 0x1f, 0x93, 0xfc, 0xae, + 0x08, 0xe5, 0x0f, 0x1d, 0x5b, 0x29, 0xbd, 0x4b, 0xd0, 0xa4, 0xbf, 0x81, 0x87, 0x7a, 0xf2, 0x5e, + 0x49, 0x3c, 0x41, 0x93, 0x3f, 0xcf, 0xc7, 0x03, 0xeb, 0x54, 0x64, 0x79, 0x62, 0x44, 0x57, 0xa1, + 0x30, 0xf4, 0xad, 0xc3, 0x61, 0x88, 0xc5, 0x67, 0x7a, 0xf1, 0x04, 0x4d, 0x65, 0x9e, 0xfa, 0xc8, + 0xf3, 0x70, 0x5f, 0x1c, 0xc1, 0xe5, 0xf0, 0xcc, 0x7d, 0xcc, 0xdb, 0xaf, 0x42, 0x9b, 0xf8, 0x47, + 0x12, 0xd7, 0x3c, 0xd9, 0xb9, 0xbd, 0x28, 0xbe, 0x5d, 0xdd, 0xf7, 0x49, 0x48, 0xf6, 0x8b, 0xbf, + 0x28, 0x95, 0xf7, 0x76, 0x0f, 0x0e, 0x6b, 0xec, 0x63, 0xd0, 0x37, 0xff, 0x19, 0x00, 0x00, 0xff, + 0xff, 0xd4, 0x0a, 0xef, 0xca, 0xe4, 0x3a, 0x00, 0x00, +} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto new file mode 100644 index 00000000..2f336b3e --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto @@ -0,0 +1,662 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +syntax = "proto3"; + +package openapi.v2; + +import "google/protobuf/any.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIProto"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v2"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +message AdditionalPropertiesItem { + oneof oneof { + Schema schema = 1; + bool boolean = 2; + } +} + +message Any { + google.protobuf.Any value = 1; + string yaml = 2; +} + +message ApiKeySecurity { + string type = 1; + string name = 2; + string in = 3; + string description = 4; + repeated NamedAny vendor_extension = 5; +} + +message BasicAuthenticationSecurity { + string type = 1; + string description = 2; + repeated NamedAny vendor_extension = 3; +} + +message BodyParameter { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 1; + // The name of the parameter. + string name = 2; + // Determines the location of the parameter. + string in = 3; + // Determines whether or not this parameter is required or optional. + bool required = 4; + Schema schema = 5; + repeated NamedAny vendor_extension = 6; +} + +// Contact information for the owners of the API. +message Contact { + // The identifying name of the contact person/organization. + string name = 1; + // The URL pointing to the contact information. + string url = 2; + // The email address of the contact person/organization. + string email = 3; + repeated NamedAny vendor_extension = 4; +} + +message Default { + repeated NamedAny additional_properties = 1; +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +message Definitions { + repeated NamedSchema additional_properties = 1; +} + +message Document { + // The Swagger version of this document. + string swagger = 1; + Info info = 2; + // The host (name or ip) of the API. Example: 'swagger.io' + string host = 3; + // The base path to the API. Example: '/api'. + string base_path = 4; + // The transfer protocol of the API. + repeated string schemes = 5; + // A list of MIME types accepted by the API. + repeated string consumes = 6; + // A list of MIME types the API can produce. + repeated string produces = 7; + Paths paths = 8; + Definitions definitions = 9; + ParameterDefinitions parameters = 10; + ResponseDefinitions responses = 11; + repeated SecurityRequirement security = 12; + SecurityDefinitions security_definitions = 13; + repeated Tag tags = 14; + ExternalDocs external_docs = 15; + repeated NamedAny vendor_extension = 16; +} + +message Examples { + repeated NamedAny additional_properties = 1; +} + +// information about external documentation +message ExternalDocs { + string description = 1; + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// A deterministic version of a JSON Schema object. +message FileSchema { + string format = 1; + string title = 2; + string description = 3; + Any default = 4; + repeated string required = 5; + string type = 6; + bool read_only = 7; + ExternalDocs external_docs = 8; + Any example = 9; + repeated NamedAny vendor_extension = 10; +} + +message FormDataParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Header { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + string description = 18; + repeated NamedAny vendor_extension = 19; +} + +message HeaderParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +message Headers { + repeated NamedHeader additional_properties = 1; +} + +// General information about the API. +message Info { + // A unique and precise title of the API. + string title = 1; + // A semantic version number of the API. + string version = 2; + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + string description = 3; + // The terms of service for the API. + string terms_of_service = 4; + Contact contact = 5; + License license = 6; + repeated NamedAny vendor_extension = 7; +} + +message ItemsItem { + repeated Schema schema = 1; +} + +message JsonReference { + string _ref = 1; + string description = 2; +} + +message License { + // The name of the license type. It's encouraged to use an OSI compatible license. + string name = 1; + // The URL pointing to the license. + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +message NamedAny { + // Map key + string name = 1; + // Mapped value + Any value = 2; +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +message NamedHeader { + // Map key + string name = 1; + // Mapped value + Header value = 2; +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +message NamedParameter { + // Map key + string name = 1; + // Mapped value + Parameter value = 2; +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +message NamedPathItem { + // Map key + string name = 1; + // Mapped value + PathItem value = 2; +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +message NamedResponse { + // Map key + string name = 1; + // Mapped value + Response value = 2; +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +message NamedResponseValue { + // Map key + string name = 1; + // Mapped value + ResponseValue value = 2; +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +message NamedSchema { + // Map key + string name = 1; + // Mapped value + Schema value = 2; +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +message NamedSecurityDefinitionsItem { + // Map key + string name = 1; + // Mapped value + SecurityDefinitionsItem value = 2; +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +message NamedString { + // Map key + string name = 1; + // Mapped value + string value = 2; +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +message NamedStringArray { + // Map key + string name = 1; + // Mapped value + StringArray value = 2; +} + +message NonBodyParameter { + oneof oneof { + HeaderParameterSubSchema header_parameter_sub_schema = 1; + FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + QueryParameterSubSchema query_parameter_sub_schema = 3; + PathParameterSubSchema path_parameter_sub_schema = 4; + } +} + +message Oauth2AccessCodeSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string token_url = 5; + string description = 6; + repeated NamedAny vendor_extension = 7; +} + +message Oauth2ApplicationSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2ImplicitSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2PasswordSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2Scopes { + repeated NamedString additional_properties = 1; +} + +message Operation { + repeated string tags = 1; + // A brief summary of the operation. + string summary = 2; + // A longer description of the operation, GitHub Flavored Markdown is allowed. + string description = 3; + ExternalDocs external_docs = 4; + // A unique identifier of the operation. + string operation_id = 5; + // A list of MIME types the API can produce. + repeated string produces = 6; + // A list of MIME types the API can consume. + repeated string consumes = 7; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 8; + Responses responses = 9; + // The transfer protocol of the API. + repeated string schemes = 10; + bool deprecated = 11; + repeated SecurityRequirement security = 12; + repeated NamedAny vendor_extension = 13; +} + +message Parameter { + oneof oneof { + BodyParameter body_parameter = 1; + NonBodyParameter non_body_parameter = 2; + } +} + +// One or more JSON representations for parameters +message ParameterDefinitions { + repeated NamedParameter additional_properties = 1; +} + +message ParametersItem { + oneof oneof { + Parameter parameter = 1; + JsonReference json_reference = 2; + } +} + +message PathItem { + string _ref = 1; + Operation get = 2; + Operation put = 3; + Operation post = 4; + Operation delete = 5; + Operation options = 6; + Operation head = 7; + Operation patch = 8; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 9; + repeated NamedAny vendor_extension = 10; +} + +message PathParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +message Paths { + repeated NamedAny vendor_extension = 1; + repeated NamedPathItem path = 2; +} + +message PrimitivesItems { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + repeated NamedAny vendor_extension = 18; +} + +message Properties { + repeated NamedSchema additional_properties = 1; +} + +message QueryParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Response { + string description = 1; + SchemaItem schema = 2; + Headers headers = 3; + Examples examples = 4; + repeated NamedAny vendor_extension = 5; +} + +// One or more JSON representations for parameters +message ResponseDefinitions { + repeated NamedResponse additional_properties = 1; +} + +message ResponseValue { + oneof oneof { + Response response = 1; + JsonReference json_reference = 2; + } +} + +// Response objects names can either be any valid HTTP status code or 'default'. +message Responses { + repeated NamedResponseValue response_code = 1; + repeated NamedAny vendor_extension = 2; +} + +// A deterministic version of a JSON Schema object. +message Schema { + string _ref = 1; + string format = 2; + string title = 3; + string description = 4; + Any default = 5; + double multiple_of = 6; + double maximum = 7; + bool exclusive_maximum = 8; + double minimum = 9; + bool exclusive_minimum = 10; + int64 max_length = 11; + int64 min_length = 12; + string pattern = 13; + int64 max_items = 14; + int64 min_items = 15; + bool unique_items = 16; + int64 max_properties = 17; + int64 min_properties = 18; + repeated string required = 19; + repeated Any enum = 20; + AdditionalPropertiesItem additional_properties = 21; + TypeItem type = 22; + ItemsItem items = 23; + repeated Schema all_of = 24; + Properties properties = 25; + string discriminator = 26; + bool read_only = 27; + Xml xml = 28; + ExternalDocs external_docs = 29; + Any example = 30; + repeated NamedAny vendor_extension = 31; +} + +message SchemaItem { + oneof oneof { + Schema schema = 1; + FileSchema file_schema = 2; + } +} + +message SecurityDefinitions { + repeated NamedSecurityDefinitionsItem additional_properties = 1; +} + +message SecurityDefinitionsItem { + oneof oneof { + BasicAuthenticationSecurity basic_authentication_security = 1; + ApiKeySecurity api_key_security = 2; + Oauth2ImplicitSecurity oauth2_implicit_security = 3; + Oauth2PasswordSecurity oauth2_password_security = 4; + Oauth2ApplicationSecurity oauth2_application_security = 5; + Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + } +} + +message SecurityRequirement { + repeated NamedStringArray additional_properties = 1; +} + +message StringArray { + repeated string value = 1; +} + +message Tag { + string name = 1; + string description = 2; + ExternalDocs external_docs = 3; + repeated NamedAny vendor_extension = 4; +} + +message TypeItem { + repeated string value = 1; +} + +// Any property starting with x- is valid. +message VendorExtension { + repeated NamedAny additional_properties = 1; +} + +message Xml { + string name = 1; + string namespace = 2; + string prefix = 3; + bool attribute = 4; + bool wrapped = 5; + repeated NamedAny vendor_extension = 6; +} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md new file mode 100644 index 00000000..1131b6f1 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md @@ -0,0 +1,16 @@ +# OpenAPI v2 Protocol Buffer Models + +This directory contains a Protocol Buffer-language model +and related code for supporting OpenAPI v2. + +Gnostic applications and plugins can use OpenAPIv2.proto +to generate Protocol Buffer support code for their preferred languages. + +OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI +descriptions into the Protocol Buffer-based datastructures +generated from OpenAPIv2.proto. + +OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic +compiler generator, and OpenAPIv2.pb.go is generated by +protoc, the Protocol Buffer compiler, and protoc-gen-go, the +Protocol Buffer Go code generation plugin. diff --git a/vendor/github.com/googleapis/gnostic/README.md b/vendor/github.com/googleapis/gnostic/README.md new file mode 100644 index 00000000..98aaeaf5 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/README.md @@ -0,0 +1,103 @@ +[![Build Status](https://travis-ci.org/googleapis/gnostic.svg?branch=master)](https://travis-ci.org/googleapis/gnostic) + +# ⨁ gnostic + +This repository contains a Go command line tool which converts +JSON and YAML [OpenAPI](https://github.com/OAI/OpenAPI-Specification) +descriptions to and from equivalent Protocol Buffer representations. + +[Protocol Buffers](https://developers.google.com/protocol-buffers/) +provide a language-neutral, platform-neutral, extensible mechanism +for serializing structured data. +**gnostic**'s Protocol Buffer models for the OpenAPI Specification +can be used to generate code that includes data structures with +explicit fields for the elements of an OpenAPI description. +This makes it possible for developers to work with OpenAPI +descriptions in type-safe ways, which is particularly useful +in strongly-typed languages like Go and Swift. + +**gnostic** reads OpenAPI descriptions into +these generated data structures, reports errors, +resolves internal dependencies, and writes the results +in a binary form that can be used in any language that is +supported by the Protocol Buffer tools. +A plugin interface simplifies integration with API +tools written in a variety of different languages, +and when necessary, Protocol Buffer OpenAPI descriptions +can be reexported as JSON or YAML. + +**gnostic** compilation code and OpenAPI Protocol Buffer +models are automatically generated from an +[OpenAPI JSON Schema](https://github.com/OAI/OpenAPI-Specification/blob/master/schemas/v2.0/schema.json). +Source code for the generator is in the [generate-gnostic](generate-gnostic) directory. + +## Disclaimer + +This is prerelease software and work in progress. Feedback and +contributions are welcome, but we currently make no guarantees of +function or stability. + +## Requirements + +**gnostic** can be run in any environment that supports [Go](http://golang.org) +and the [Google Protocol Buffer Compiler](https://github.com/google/protobuf). + +## Installation + +1. Get this package by downloading it with `go get`. + + go get github.com/googleapis/gnostic + +2. [Optional] Build and run the compiler generator. +This uses the OpenAPI JSON schema to generate a Protocol Buffer language file +that describes the OpenAPI specification and a Go-language file of code that +will read a JSON or YAML OpenAPI representation into the generated protocol +buffers. Pre-generated versions of these files are in the OpenAPIv2 directory. + + cd $GOPATH/src/github.com/googleapis/gnostic/generate-gnostic + go install + cd .. + generate-gnostic --v2 + +3. [Optional] Generate Protocol Buffer support code. +A pre-generated version of this file is checked into the OpenAPIv2 directory. +This step requires a local installation of protoc, the Protocol Buffer Compiler. +You can get protoc [here](https://github.com/google/protobuf). + + ./COMPILE-PROTOS.sh + +4. [Optional] Rebuild **gnostic**. This is only necessary if you've performed steps +2 or 3 above. + + go install github.com/googleapis/gnostic + +5. Run **gnostic**. This will create a file in the current directory named "petstore.pb" that contains a binary +Protocol Buffer description of a sample API. + + gnostic --pb-out=. examples/petstore.json + +6. You can also compile files that you specify with a URL. Here's another way to compile the previous +example. This time we're creating "petstore.text", which contains a textual representation of the +Protocol Buffer description. This is mainly for use in testing and debugging. + + gnostic --text-out=petstore.text https://raw.githubusercontent.com/googleapis/gnostic/master/examples/petstore.json + +7. For a sample application, see apps/report. + + go install github.com/googleapis/gnostic/apps/report + report petstore.pb + +8. **gnostic** supports plugins. This builds and runs a sample plugin +that reports some basic information about an API. The "-" causes the plugin to +write its output to stdout. + + go install github.com/googleapis/gnostic/plugins/gnostic-go-sample + gnostic examples/petstore.json --go-sample-out=- + +## Copyright + +Copyright 2017, Google Inc. + +## License + +Released under the Apache 2.0 license. diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/googleapis/gnostic/compiler/README.md new file mode 100644 index 00000000..848b16c6 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/README.md @@ -0,0 +1,3 @@ +# Compiler support code + +This directory contains compiler support code used by Gnostic and Gnostic extensions. \ No newline at end of file diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go new file mode 100644 index 00000000..a64c1b75 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/context.go @@ -0,0 +1,43 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +// Context contains state of the compiler as it traverses a document. +type Context struct { + Parent *Context + Name string + ExtensionHandlers *[]ExtensionHandler +} + +// NewContextWithExtensions returns a new object representing the compiler state +func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { + return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers} +} + +// NewContext returns a new object representing the compiler state +func NewContext(name string, parent *Context) *Context { + if parent != nil { + return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} + } + return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} +} + +// Description returns a text description of the compiler state +func (context *Context) Description() string { + if context.Parent != nil { + return context.Parent.Description() + "." + context.Name + } + return context.Name +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go new file mode 100644 index 00000000..d8672c10 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/error.go @@ -0,0 +1,61 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +// Error represents compiler errors and their location in the document. +type Error struct { + Context *Context + Message string +} + +// NewError creates an Error. +func NewError(context *Context, message string) *Error { + return &Error{Context: context, Message: message} +} + +// Error returns the string value of an Error. +func (err *Error) Error() string { + if err.Context == nil { + return "ERROR " + err.Message + } + return "ERROR " + err.Context.Description() + " " + err.Message +} + +// ErrorGroup is a container for groups of Error values. +type ErrorGroup struct { + Errors []error +} + +// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty. +func NewErrorGroupOrNil(errors []error) error { + if len(errors) == 0 { + return nil + } else if len(errors) == 1 { + return errors[0] + } else { + return &ErrorGroup{Errors: errors} + } +} + +func (group *ErrorGroup) Error() string { + result := "" + for i, err := range group.Errors { + if i > 0 { + result += "\n" + } + result += err.Error() + } + return result +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go new file mode 100644 index 00000000..1f85b650 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go @@ -0,0 +1,101 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "bytes" + "fmt" + "os/exec" + + "strings" + + "errors" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + ext_plugin "github.com/googleapis/gnostic/extensions" + yaml "gopkg.in/yaml.v2" +) + +// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. +type ExtensionHandler struct { + Name string +} + +// HandleExtension calls a binary extension handler. +func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) { + handled := false + var errFromPlugin error + var outFromPlugin *any.Any + + if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 { + for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) { + outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName) + if outFromPlugin == nil { + continue + } else { + handled = true + break + } + } + } + return handled, outFromPlugin, errFromPlugin +} + +func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) { + if extensionHandlers.Name != "" { + binary, _ := yaml.Marshal(in) + + request := &ext_plugin.ExtensionHandlerRequest{} + + version := &ext_plugin.Version{} + version.Major = 0 + version.Minor = 1 + version.Patch = 0 + request.CompilerVersion = version + + request.Wrapper = &ext_plugin.Wrapper{} + + request.Wrapper.Version = "v2" + request.Wrapper.Yaml = string(binary) + request.Wrapper.ExtensionName = extensionName + + requestBytes, _ := proto.Marshal(request) + cmd := exec.Command(extensionHandlers.Name) + cmd.Stdin = bytes.NewReader(requestBytes) + output, err := cmd.Output() + + if err != nil { + fmt.Printf("Error: %+v\n", err) + return nil, err + } + response := &ext_plugin.ExtensionHandlerResponse{} + err = proto.Unmarshal(output, response) + if err != nil { + fmt.Printf("Error: %+v\n", err) + fmt.Printf("%s\n", string(output)) + return nil, err + } + if !response.Handled { + return nil, nil + } + if len(response.Error) != 0 { + message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ",")) + return nil, errors.New(message) + } + return response.Value, nil + } + return nil, nil +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go new file mode 100644 index 00000000..76df635f --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/helpers.go @@ -0,0 +1,197 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "fmt" + "gopkg.in/yaml.v2" + "regexp" + "sort" + "strconv" +) + +// compiler helper functions, usually called from generated code + +// UnpackMap gets a yaml.MapSlice if possible. +func UnpackMap(in interface{}) (yaml.MapSlice, bool) { + m, ok := in.(yaml.MapSlice) + if ok { + return m, true + } + // do we have an empty array? + a, ok := in.([]interface{}) + if ok && len(a) == 0 { + // if so, return an empty map + return yaml.MapSlice{}, true + } + return nil, false +} + +// SortedKeysForMap returns the sorted keys of a yaml.MapSlice. +func SortedKeysForMap(m yaml.MapSlice) []string { + keys := make([]string, 0) + for _, item := range m { + keys = append(keys, item.Key.(string)) + } + sort.Strings(keys) + return keys +} + +// MapHasKey returns true if a yaml.MapSlice contains a specified key. +func MapHasKey(m yaml.MapSlice, key string) bool { + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok && key == itemKey { + return true + } + } + return false +} + +// MapValueForKey gets the value of a map value for a specified key. +func MapValueForKey(m yaml.MapSlice, key string) interface{} { + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok && key == itemKey { + return item.Value + } + } + return nil +} + +// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible. +func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { + stringArray := make([]string, 0) + for _, item := range interfaceArray { + v, ok := item.(string) + if ok { + stringArray = append(stringArray, v) + } + } + return stringArray +} + +// MissingKeysInMap identifies which keys from a list of required keys are not in a map. +func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string { + missingKeys := make([]string, 0) + for _, k := range requiredKeys { + if !MapHasKey(m, k) { + missingKeys = append(missingKeys, k) + } + } + return missingKeys +} + +// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. +func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { + invalidKeys := make([]string, 0) + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok { + key := itemKey + found := false + // does the key match an allowed key? + for _, allowedKey := range allowedKeys { + if key == allowedKey { + found = true + break + } + } + if !found { + // does the key match an allowed pattern? + for _, allowedPattern := range allowedPatterns { + if allowedPattern.MatchString(key) { + found = true + break + } + } + if !found { + invalidKeys = append(invalidKeys, key) + } + } + } + } + return invalidKeys +} + +// DescribeMap describes a map (for debugging purposes). +func DescribeMap(in interface{}, indent string) string { + description := "" + m, ok := in.(map[string]interface{}) + if ok { + keys := make([]string, 0) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := m[k] + description += fmt.Sprintf("%s%s:\n", indent, k) + description += DescribeMap(v, indent+" ") + } + return description + } + a, ok := in.([]interface{}) + if ok { + for i, v := range a { + description += fmt.Sprintf("%s%d:\n", indent, i) + description += DescribeMap(v, indent+" ") + } + return description + } + description += fmt.Sprintf("%s%+v\n", indent, in) + return description +} + +// PluralProperties returns the string "properties" pluralized. +func PluralProperties(count int) string { + if count == 1 { + return "property" + } + return "properties" +} + +// StringArrayContainsValue returns true if a string array contains a specified value. +func StringArrayContainsValue(array []string, value string) bool { + for _, item := range array { + if item == value { + return true + } + } + return false +} + +// StringArrayContainsValues returns true if a string array contains all of a list of specified values. +func StringArrayContainsValues(array []string, values []string) bool { + for _, value := range values { + if !StringArrayContainsValue(array, value) { + return false + } + } + return true +} + +// StringValue returns the string value of an item. +func StringValue(item interface{}) (value string, ok bool) { + value, ok = item.(string) + if ok { + return value, ok + } + intValue, ok := item.(int) + if ok { + return strconv.Itoa(intValue), true + } + return "", false +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go new file mode 100644 index 00000000..9713a21c --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/main.go @@ -0,0 +1,16 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compiler provides support functions to generated compiler code. +package compiler diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go new file mode 100644 index 00000000..604a46a6 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -0,0 +1,167 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "fmt" + "gopkg.in/yaml.v2" + "io/ioutil" + "log" + "net/http" + "net/url" + "path/filepath" + "strings" +) + +var fileCache map[string][]byte +var infoCache map[string]interface{} +var count int64 + +var verboseReader = false + +func initializeFileCache() { + if fileCache == nil { + fileCache = make(map[string][]byte, 0) + } +} + +func initializeInfoCache() { + if infoCache == nil { + infoCache = make(map[string]interface{}, 0) + } +} + +// FetchFile gets a specified file from the local filesystem or a remote location. +func FetchFile(fileurl string) ([]byte, error) { + initializeFileCache() + bytes, ok := fileCache[fileurl] + if ok { + if verboseReader { + log.Printf("Cache hit %s", fileurl) + } + return bytes, nil + } + log.Printf("Fetching %s", fileurl) + response, err := http.Get(fileurl) + if err != nil { + return nil, err + } + defer response.Body.Close() + bytes, err = ioutil.ReadAll(response.Body) + if err == nil { + fileCache[fileurl] = bytes + } + return bytes, err +} + +// ReadBytesForFile reads the bytes of a file. +func ReadBytesForFile(filename string) ([]byte, error) { + // is the filename a url? + fileurl, _ := url.Parse(filename) + if fileurl.Scheme != "" { + // yes, fetch it + bytes, err := FetchFile(filename) + if err != nil { + return nil, err + } + return bytes, nil + } + // no, it's a local filename + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return bytes, nil +} + +// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice. +func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { + initializeInfoCache() + cachedInfo, ok := infoCache[filename] + if ok { + if verboseReader { + log.Printf("Cache hit info for file %s", filename) + } + return cachedInfo, nil + } + if verboseReader { + log.Printf("Reading info for file %s", filename) + } + var info yaml.MapSlice + err := yaml.Unmarshal(bytes, &info) + if err != nil { + return nil, err + } + infoCache[filename] = info + return info, nil +} + +// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. +func ReadInfoForRef(basefile string, ref string) (interface{}, error) { + initializeInfoCache() + { + info, ok := infoCache[ref] + if ok { + if verboseReader { + log.Printf("Cache hit for ref %s#%s", basefile, ref) + } + return info, nil + } + } + if verboseReader { + log.Printf("Reading info for ref %s#%s", basefile, ref) + } + count = count + 1 + basedir, _ := filepath.Split(basefile) + parts := strings.Split(ref, "#") + var filename string + if parts[0] != "" { + filename = basedir + parts[0] + } else { + filename = basefile + } + bytes, err := ReadBytesForFile(filename) + if err != nil { + return nil, err + } + info, err := ReadInfoFromBytes(filename, bytes) + if err != nil { + log.Printf("File error: %v\n", err) + } else { + if len(parts) > 1 { + path := strings.Split(parts[1], "/") + for i, key := range path { + if i > 0 { + m, ok := info.(yaml.MapSlice) + if ok { + found := false + for _, section := range m { + if section.Key == key { + info = section.Value + found = true + } + } + if !found { + infoCache[ref] = nil + return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) + } + } + } + } + } + } + infoCache[ref] = info + return info, nil +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/googleapis/gnostic/extensions/README.md new file mode 100644 index 00000000..ff1c2eb1 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/README.md @@ -0,0 +1,5 @@ +# Extensions + +This directory contains support code for building Gnostic extensions and associated examples. + +Extensions are used to compile vendor or specification extensions into protocol buffer structures. diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go new file mode 100644 index 00000000..b14f1f94 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -0,0 +1,219 @@ +// Code generated by protoc-gen-go. +// source: extension.proto +// DO NOT EDIT! + +/* +Package openapiextension_v1 is a generated protocol buffer package. + +It is generated from these files: + extension.proto + +It has these top-level messages: + Version + ExtensionHandlerRequest + ExtensionHandlerResponse + Wrapper +*/ +package openapiextension_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The version number of OpenAPI compiler. +type Version struct { + Major int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Version) GetMajor() int32 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *Version) GetMinor() int32 { + if m != nil { + return m.Minor + } + return 0 +} + +func (m *Version) GetPatch() int32 { + if m != nil { + return m.Patch + } + return 0 +} + +func (m *Version) GetSuffix() string { + if m != nil { + return m.Suffix + } + return "" +} + +// An encoded Request is written to the ExtensionHandler's stdin. +type ExtensionHandlerRequest struct { + // The OpenAPI descriptions that were explicitly listed on the command line. + // The specifications will appear in the order they are specified to openapic. + Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"` + // The version number of openapi compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` +} + +func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} } +func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerRequest) ProtoMessage() {} +func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper { + if m != nil { + return m.Wrapper + } + return nil +} + +func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version { + if m != nil { + return m.CompilerVersion + } + return nil +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +type ExtensionHandlerResponse struct { + // true if the extension is handled by the extension handler; false otherwise + Handled bool `protobuf:"varint,1,opt,name=handled" json:"handled,omitempty"` + // Error message. If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + Error []string `protobuf:"bytes,2,rep,name=error" json:"error,omitempty"` + // text output + Value *google_protobuf.Any `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` +} + +func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } +func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerResponse) ProtoMessage() {} +func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ExtensionHandlerResponse) GetHandled() bool { + if m != nil { + return m.Handled + } + return false +} + +func (m *ExtensionHandlerResponse) GetError() []string { + if m != nil { + return m.Error + } + return nil +} + +func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any { + if m != nil { + return m.Value + } + return nil +} + +type Wrapper struct { + // version of the OpenAPI specification in which this extension was written. + Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // Name of the extension + ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName" json:"extension_name,omitempty"` + // Must be a valid yaml for the proto + Yaml string `protobuf:"bytes,3,opt,name=yaml" json:"yaml,omitempty"` +} + +func (m *Wrapper) Reset() { *m = Wrapper{} } +func (m *Wrapper) String() string { return proto.CompactTextString(m) } +func (*Wrapper) ProtoMessage() {} +func (*Wrapper) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Wrapper) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Wrapper) GetExtensionName() string { + if m != nil { + return m.ExtensionName + } + return "" +} + +func (m *Wrapper) GetYaml() string { + if m != nil { + return m.Yaml + } + return "" +} + +func init() { + proto.RegisterType((*Version)(nil), "openapiextension.v1.Version") + proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest") + proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse") + proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") +} + +func init() { proto.RegisterFile("extension.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 355 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xf3, 0x40, + 0x1c, 0xc4, 0x49, 0xdf, 0xf2, 0x64, 0x1f, 0xb4, 0xb2, 0x16, 0x8d, 0xe2, 0xa1, 0x04, 0x84, 0x22, + 0xb8, 0xa5, 0x0a, 0xde, 0x5b, 0x28, 0xea, 0xc5, 0x96, 0x3d, 0xd4, 0x9b, 0x65, 0x9b, 0xfe, 0xdb, + 0x46, 0x92, 0xdd, 0x75, 0xf3, 0x62, 0xfb, 0x55, 0x3c, 0xfa, 0x49, 0x25, 0xbb, 0xd9, 0x7a, 0x50, + 0x6f, 0x99, 0x1f, 0x93, 0xfc, 0x67, 0x26, 0xa8, 0x0d, 0xdb, 0x0c, 0x78, 0x1a, 0x09, 0x4e, 0xa4, + 0x12, 0x99, 0xc0, 0xc7, 0x42, 0x02, 0x67, 0x32, 0xfa, 0xe6, 0xc5, 0xe0, 0xfc, 0x6c, 0x2d, 0xc4, + 0x3a, 0x86, 0xbe, 0xb6, 0x2c, 0xf2, 0x55, 0x9f, 0xf1, 0x9d, 0xf1, 0x07, 0x21, 0x72, 0x67, 0xa0, + 0x4a, 0x23, 0xee, 0xa0, 0x66, 0xc2, 0x5e, 0x85, 0xf2, 0x9d, 0xae, 0xd3, 0x6b, 0x52, 0x23, 0x34, + 0x8d, 0xb8, 0x50, 0x7e, 0xad, 0xa2, 0xa5, 0x28, 0xa9, 0x64, 0x59, 0xb8, 0xf1, 0xeb, 0x86, 0x6a, + 0x81, 0x4f, 0x50, 0x2b, 0xcd, 0x57, 0xab, 0x68, 0xeb, 0x37, 0xba, 0x4e, 0xcf, 0xa3, 0x95, 0x0a, + 0x3e, 0x1c, 0x74, 0x3a, 0xb6, 0x81, 0x1e, 0x18, 0x5f, 0xc6, 0xa0, 0x28, 0xbc, 0xe5, 0x90, 0x66, + 0xf8, 0x0e, 0xb9, 0xef, 0x8a, 0x49, 0x09, 0xe6, 0xee, 0xff, 0x9b, 0x0b, 0xf2, 0x4b, 0x05, 0xf2, + 0x6c, 0x3c, 0xd4, 0x9a, 0xf1, 0x3d, 0x3a, 0x0a, 0x45, 0x22, 0xa3, 0x18, 0xd4, 0xbc, 0x30, 0x0d, + 0x74, 0x98, 0xbf, 0x3e, 0x50, 0xb5, 0xa4, 0x6d, 0xfb, 0x56, 0x05, 0x82, 0x02, 0xf9, 0x3f, 0xb3, + 0xa5, 0x52, 0xf0, 0x14, 0xb0, 0x8f, 0xdc, 0x8d, 0x46, 0x4b, 0x1d, 0xee, 0x1f, 0xb5, 0xb2, 0x1c, + 0x00, 0x94, 0xd2, 0xb3, 0xd4, 0x7b, 0x1e, 0x35, 0x02, 0x5f, 0xa1, 0x66, 0xc1, 0xe2, 0x1c, 0xaa, + 0x24, 0x1d, 0x62, 0x86, 0x27, 0x76, 0x78, 0x32, 0xe4, 0x3b, 0x6a, 0x2c, 0xc1, 0x0b, 0x72, 0xab, + 0x52, 0xe5, 0x19, 0x5b, 0xc1, 0xd1, 0xc3, 0x59, 0x89, 0x2f, 0xd1, 0xe1, 0xbe, 0xc5, 0x9c, 0xb3, + 0x04, 0xf4, 0x6f, 0xf0, 0xe8, 0xc1, 0x9e, 0x3e, 0xb1, 0x04, 0x30, 0x46, 0x8d, 0x1d, 0x4b, 0x62, + 0x7d, 0xd6, 0xa3, 0xfa, 0x79, 0x74, 0x8d, 0xda, 0x42, 0xad, 0xed, 0x16, 0x21, 0x29, 0x06, 0x23, + 0x3c, 0x91, 0xc0, 0x87, 0xd3, 0xc7, 0x7d, 0xdf, 0xd9, 0x60, 0xea, 0x7c, 0xd6, 0xea, 0x93, 0xe1, + 0x78, 0xd1, 0xd2, 0x19, 0x6f, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x56, 0x40, 0x4d, 0x52, + 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto new file mode 100644 index 00000000..806760a1 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.proto @@ -0,0 +1,93 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +import "google/protobuf/any.proto"; +package openapiextension.v1; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIExtensionV1"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapic.v1"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +// +option objc_class_prefix = "OAE"; // "OpenAPI Extension" + +// The version number of OpenAPI compiler. +message Version { + int32 major = 1; + int32 minor = 2; + int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + string suffix = 4; +} + +// An encoded Request is written to the ExtensionHandler's stdin. +message ExtensionHandlerRequest { + + // The OpenAPI descriptions that were explicitly listed on the command line. + // The specifications will appear in the order they are specified to openapic. + Wrapper wrapper = 1; + + // The version number of openapi compiler. + Version compiler_version = 3; +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +message ExtensionHandlerResponse { + + // true if the extension is handled by the extension handler; false otherwise + bool handled = 1; + + // Error message. If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + repeated string error = 2; + + // text output + google.protobuf.Any value = 3; +} + +message Wrapper { + // version of the OpenAPI specification in which this extension was written. + string version = 1; + + // Name of the extension + string extension_name = 2; + + // Must be a valid yaml for the proto + string yaml = 3; +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go new file mode 100644 index 00000000..94a8e62a --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extensions.go @@ -0,0 +1,82 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openapiextension_v1 + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" +) + +type documentHandler func(version string, extensionName string, document string) +type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) + +func forInputYamlFromOpenapic(handler documentHandler) { + data, err := ioutil.ReadAll(os.Stdin) + if err != nil { + fmt.Println("File error:", err.Error()) + os.Exit(1) + } + if len(data) == 0 { + fmt.Println("No input data.") + os.Exit(1) + } + request := &ExtensionHandlerRequest{} + err = proto.Unmarshal(data, request) + if err != nil { + fmt.Println("Input error:", err.Error()) + os.Exit(1) + } + handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml) +} + +// ProcessExtension calles the handler for a specified extension. +func ProcessExtension(handleExtension extensionHandler) { + response := &ExtensionHandlerResponse{} + forInputYamlFromOpenapic( + func(version string, extensionName string, yamlInput string) { + var newObject proto.Message + var err error + + handled, newObject, err := handleExtension(extensionName, yamlInput) + if !handled { + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + + // If we reach here, then the extension is handled + response.Handled = true + if err != nil { + response.Error = append(response.Error, err.Error()) + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + response.Value, err = ptypes.MarshalAny(newObject) + if err != nil { + response.Error = append(response.Error, err.Error()) + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + }) + + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) +} diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt new file mode 100644 index 00000000..81316beb --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/LICENSE.txt @@ -0,0 +1,7 @@ +Copyright © 2012 Greg Jones (greg.jones@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md new file mode 100644 index 00000000..61bd830e --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/README.md @@ -0,0 +1,24 @@ +httpcache +========= + +[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache) + +Package httpcache provides a http.RoundTripper implementation that works as a mostly RFC-compliant cache for http responses. + +It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). + +Cache Backends +-------------- + +- The built-in 'memory' cache stores responses in an in-memory map. +- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library. +- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers. +- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage. +- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb). +- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries. +- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. + +License +------- + +- [MIT License](LICENSE.txt) diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go new file mode 100644 index 00000000..42e3129d --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go @@ -0,0 +1,61 @@ +// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package +// to supplement an in-memory map with persistent storage +// +package diskcache + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "github.com/peterbourgon/diskv" + "io" +) + +// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage +type Cache struct { + d *diskv.Diskv +} + +// Get returns the response corresponding to key if present +func (c *Cache) Get(key string) (resp []byte, ok bool) { + key = keyToFilename(key) + resp, err := c.d.Read(key) + if err != nil { + return []byte{}, false + } + return resp, true +} + +// Set saves a response to the cache as key +func (c *Cache) Set(key string, resp []byte) { + key = keyToFilename(key) + c.d.WriteStream(key, bytes.NewReader(resp), true) +} + +// Delete removes the response with key from the cache +func (c *Cache) Delete(key string) { + key = keyToFilename(key) + c.d.Erase(key) +} + +func keyToFilename(key string) string { + h := md5.New() + io.WriteString(h, key) + return hex.EncodeToString(h.Sum(nil)) +} + +// New returns a new Cache that will store files in basePath +func New(basePath string) *Cache { + return &Cache{ + d: diskv.New(diskv.Options{ + BasePath: basePath, + CacheSizeMax: 100 * 1024 * 1024, // 100MB + }), + } +} + +// NewWithDiskv returns a new Cache using the provided Diskv as underlying +// storage. +func NewWithDiskv(d *diskv.Diskv) *Cache { + return &Cache{d} +} diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go new file mode 100644 index 00000000..8239edc2 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/httpcache.go @@ -0,0 +1,553 @@ +// Package httpcache provides a http.RoundTripper implementation that works as a +// mostly RFC-compliant cache for http responses. +// +// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client +// and not for a shared proxy). +// +package httpcache + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httputil" + "strings" + "sync" + "time" +) + +const ( + stale = iota + fresh + transparent + // XFromCache is the header added to responses that are returned from the cache + XFromCache = "X-From-Cache" +) + +// A Cache interface is used by the Transport to store and retrieve responses. +type Cache interface { + // Get returns the []byte representation of a cached response and a bool + // set to true if the value isn't empty + Get(key string) (responseBytes []byte, ok bool) + // Set stores the []byte representation of a response against a key + Set(key string, responseBytes []byte) + // Delete removes the value associated with the key + Delete(key string) +} + +// cacheKey returns the cache key for req. +func cacheKey(req *http.Request) string { + return req.URL.String() +} + +// CachedResponse returns the cached http.Response for req if present, and nil +// otherwise. +func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) { + cachedVal, ok := c.Get(cacheKey(req)) + if !ok { + return + } + + b := bytes.NewBuffer(cachedVal) + return http.ReadResponse(bufio.NewReader(b), req) +} + +// MemoryCache is an implemtation of Cache that stores responses in an in-memory map. +type MemoryCache struct { + mu sync.RWMutex + items map[string][]byte +} + +// Get returns the []byte representation of the response and true if present, false if not +func (c *MemoryCache) Get(key string) (resp []byte, ok bool) { + c.mu.RLock() + resp, ok = c.items[key] + c.mu.RUnlock() + return resp, ok +} + +// Set saves response resp to the cache with key +func (c *MemoryCache) Set(key string, resp []byte) { + c.mu.Lock() + c.items[key] = resp + c.mu.Unlock() +} + +// Delete removes key from the cache +func (c *MemoryCache) Delete(key string) { + c.mu.Lock() + delete(c.items, key) + c.mu.Unlock() +} + +// NewMemoryCache returns a new Cache that will store items in an in-memory map +func NewMemoryCache() *MemoryCache { + c := &MemoryCache{items: map[string][]byte{}} + return c +} + +// Transport is an implementation of http.RoundTripper that will return values from a cache +// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since) +// to repeated requests allowing servers to return 304 / Not Modified +type Transport struct { + // The RoundTripper interface actually used to make requests + // If nil, http.DefaultTransport is used + Transport http.RoundTripper + Cache Cache + // If true, responses returned from the cache will be given an extra header, X-From-Cache + MarkCachedResponses bool +} + +// NewTransport returns a new Transport with the +// provided Cache implementation and MarkCachedResponses set to true +func NewTransport(c Cache) *Transport { + return &Transport{Cache: c, MarkCachedResponses: true} +} + +// Client returns an *http.Client that caches responses. +func (t *Transport) Client() *http.Client { + return &http.Client{Transport: t} +} + +// varyMatches will return false unless all of the cached values for the headers listed in Vary +// match the new request +func varyMatches(cachedResp *http.Response, req *http.Request) bool { + for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") { + header = http.CanonicalHeaderKey(header) + if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) { + return false + } + } + return true +} + +// RoundTrip takes a Request and returns a Response +// +// If there is a fresh Response already in cache, then it will be returned without connecting to +// the server. +// +// If there is a stale Response, then any validators it contains will be set on the new request +// to give the server a chance to respond with NotModified. If this happens, then the cached Response +// will be returned. +func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + cacheKey := cacheKey(req) + cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == "" + var cachedResp *http.Response + if cacheable { + cachedResp, err = CachedResponse(t.Cache, req) + } else { + // Need to invalidate an existing value + t.Cache.Delete(cacheKey) + } + + transport := t.Transport + if transport == nil { + transport = http.DefaultTransport + } + + if cacheable && cachedResp != nil && err == nil { + if t.MarkCachedResponses { + cachedResp.Header.Set(XFromCache, "1") + } + + if varyMatches(cachedResp, req) { + // Can only use cached value if the new request doesn't Vary significantly + freshness := getFreshness(cachedResp.Header, req.Header) + if freshness == fresh { + return cachedResp, nil + } + + if freshness == stale { + var req2 *http.Request + // Add validators if caller hasn't already done so + etag := cachedResp.Header.Get("etag") + if etag != "" && req.Header.Get("etag") == "" { + req2 = cloneRequest(req) + req2.Header.Set("if-none-match", etag) + } + lastModified := cachedResp.Header.Get("last-modified") + if lastModified != "" && req.Header.Get("last-modified") == "" { + if req2 == nil { + req2 = cloneRequest(req) + } + req2.Header.Set("if-modified-since", lastModified) + } + if req2 != nil { + req = req2 + } + } + } + + resp, err = transport.RoundTrip(req) + if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified { + // Replace the 304 response with the one from cache, but update with some new headers + endToEndHeaders := getEndToEndHeaders(resp.Header) + for _, header := range endToEndHeaders { + cachedResp.Header[header] = resp.Header[header] + } + cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK)) + cachedResp.StatusCode = http.StatusOK + + resp = cachedResp + } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && + req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { + // In case of transport failure and stale-if-error activated, returns cached content + // when available + cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK)) + cachedResp.StatusCode = http.StatusOK + return cachedResp, nil + } else { + if err != nil || resp.StatusCode != http.StatusOK { + t.Cache.Delete(cacheKey) + } + if err != nil { + return nil, err + } + } + } else { + reqCacheControl := parseCacheControl(req.Header) + if _, ok := reqCacheControl["only-if-cached"]; ok { + resp = newGatewayTimeoutResponse(req) + } else { + resp, err = transport.RoundTrip(req) + if err != nil { + return nil, err + } + } + } + + if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) { + for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") { + varyKey = http.CanonicalHeaderKey(varyKey) + fakeHeader := "X-Varied-" + varyKey + reqValue := req.Header.Get(varyKey) + if reqValue != "" { + resp.Header.Set(fakeHeader, reqValue) + } + } + switch req.Method { + case "GET": + // Delay caching until EOF is reached. + resp.Body = &cachingReadCloser{ + R: resp.Body, + OnEOF: func(r io.Reader) { + resp := *resp + resp.Body = ioutil.NopCloser(r) + respBytes, err := httputil.DumpResponse(&resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + }, + } + default: + respBytes, err := httputil.DumpResponse(resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + } + } else { + t.Cache.Delete(cacheKey) + } + return resp, nil +} + +// ErrNoDateHeader indicates that the HTTP headers contained no Date header. +var ErrNoDateHeader = errors.New("no Date header") + +// Date parses and returns the value of the Date header. +func Date(respHeaders http.Header) (date time.Time, err error) { + dateHeader := respHeaders.Get("date") + if dateHeader == "" { + err = ErrNoDateHeader + return + } + + return time.Parse(time.RFC1123, dateHeader) +} + +type realClock struct{} + +func (c *realClock) since(d time.Time) time.Duration { + return time.Since(d) +} + +type timer interface { + since(d time.Time) time.Duration +} + +var clock timer = &realClock{} + +// getFreshness will return one of fresh/stale/transparent based on the cache-control +// values of the request and the response +// +// fresh indicates the response can be returned +// stale indicates that the response needs validating before it is returned +// transparent indicates the response should not be used to fulfil the request +// +// Because this is only a private cache, 'public' and 'private' in cache-control aren't +// signficant. Similarly, smax-age isn't used. +func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + if _, ok := reqCacheControl["no-cache"]; ok { + return transparent + } + if _, ok := respCacheControl["no-cache"]; ok { + return stale + } + if _, ok := reqCacheControl["only-if-cached"]; ok { + return fresh + } + + date, err := Date(respHeaders) + if err != nil { + return stale + } + currentAge := clock.since(date) + + var lifetime time.Duration + var zeroDuration time.Duration + + // If a response includes both an Expires header and a max-age directive, + // the max-age directive overrides the Expires header, even if the Expires header is more restrictive. + if maxAge, ok := respCacheControl["max-age"]; ok { + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } else { + expiresHeader := respHeaders.Get("Expires") + if expiresHeader != "" { + expires, err := time.Parse(time.RFC1123, expiresHeader) + if err != nil { + lifetime = zeroDuration + } else { + lifetime = expires.Sub(date) + } + } + } + + if maxAge, ok := reqCacheControl["max-age"]; ok { + // the client is willing to accept a response whose age is no greater than the specified time in seconds + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } + if minfresh, ok := reqCacheControl["min-fresh"]; ok { + // the client wants a response that will still be fresh for at least the specified number of seconds. + minfreshDuration, err := time.ParseDuration(minfresh + "s") + if err == nil { + currentAge = time.Duration(currentAge + minfreshDuration) + } + } + + if maxstale, ok := reqCacheControl["max-stale"]; ok { + // Indicates that the client is willing to accept a response that has exceeded its expiration time. + // If max-stale is assigned a value, then the client is willing to accept a response that has exceeded + // its expiration time by no more than the specified number of seconds. + // If no value is assigned to max-stale, then the client is willing to accept a stale response of any age. + // + // Responses served only because of a max-stale value are supposed to have a Warning header added to them, + // but that seems like a hassle, and is it actually useful? If so, then there needs to be a different + // return-value available here. + if maxstale == "" { + return fresh + } + maxstaleDuration, err := time.ParseDuration(maxstale + "s") + if err == nil { + currentAge = time.Duration(currentAge - maxstaleDuration) + } + } + + if lifetime > currentAge { + return fresh + } + + return stale +} + +// Returns true if either the request or the response includes the stale-if-error +// cache control extension: https://tools.ietf.org/html/rfc5861 +func canStaleOnError(respHeaders, reqHeaders http.Header) bool { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + + var err error + lifetime := time.Duration(-1) + + if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + + if lifetime >= 0 { + date, err := Date(respHeaders) + if err != nil { + return false + } + currentAge := clock.since(date) + if lifetime > currentAge { + return true + } + } + + return false +} + +func getEndToEndHeaders(respHeaders http.Header) []string { + // These headers are always hop-by-hop + hopByHopHeaders := map[string]struct{}{ + "Connection": struct{}{}, + "Keep-Alive": struct{}{}, + "Proxy-Authenticate": struct{}{}, + "Proxy-Authorization": struct{}{}, + "Te": struct{}{}, + "Trailers": struct{}{}, + "Transfer-Encoding": struct{}{}, + "Upgrade": struct{}{}, + } + + for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { + // any header listed in connection, if present, is also considered hop-by-hop + if strings.Trim(extra, " ") != "" { + hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{} + } + } + endToEndHeaders := []string{} + for respHeader, _ := range respHeaders { + if _, ok := hopByHopHeaders[respHeader]; !ok { + endToEndHeaders = append(endToEndHeaders, respHeader) + } + } + return endToEndHeaders +} + +func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) { + if _, ok := respCacheControl["no-store"]; ok { + return false + } + if _, ok := reqCacheControl["no-store"]; ok { + return false + } + return true +} + +func newGatewayTimeoutResponse(req *http.Request) *http.Response { + var braw bytes.Buffer + braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n") + resp, err := http.ReadResponse(bufio.NewReader(&braw), req) + if err != nil { + panic(err) + } + return resp +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +// (This function copyright goauth2 authors: https://code.google.com/p/goauth2) +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +type cacheControl map[string]string + +func parseCacheControl(headers http.Header) cacheControl { + cc := cacheControl{} + ccHeader := headers.Get("Cache-Control") + for _, part := range strings.Split(ccHeader, ",") { + part = strings.Trim(part, " ") + if part == "" { + continue + } + if strings.ContainsRune(part, '=') { + keyval := strings.Split(part, "=") + cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") + } else { + cc[part] = "" + } + } + return cc +} + +// headerAllCommaSepValues returns all comma-separated values (each +// with whitespace trimmed) for header name in headers. According to +// Section 4.2 of the HTTP/1.1 spec +// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), +// values from multiple occurrences of a header should be concatenated, if +// the header's value is a comma-separated list. +func headerAllCommaSepValues(headers http.Header, name string) []string { + var vals []string + for _, val := range headers[http.CanonicalHeaderKey(name)] { + fields := strings.Split(val, ",") + for i, f := range fields { + fields[i] = strings.TrimSpace(f) + } + vals = append(vals, fields...) + } + return vals +} + +// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF +// handler with a full copy of the content read from R when EOF is +// reached. +type cachingReadCloser struct { + // Underlying ReadCloser. + R io.ReadCloser + // OnEOF is called with a copy of the content of R when EOF is reached. + OnEOF func(io.Reader) + + buf bytes.Buffer // buf stores a copy of the content of R. +} + +// Read reads the next len(p) bytes from R or until R is drained. The +// return value n is the number of bytes read. If R has no data to +// return, err is io.EOF and OnEOF is called with a full copy of what +// has been read so far. +func (r *cachingReadCloser) Read(p []byte) (n int, err error) { + n, err = r.R.Read(p) + r.buf.Write(p[:n]) + if err == io.EOF { + r.OnEOF(bytes.NewReader(r.buf.Bytes())) + } + return n, err +} + +func (r *cachingReadCloser) Close() error { + return r.R.Close() +} + +// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation +func NewMemoryCacheTransport() *Transport { + c := NewMemoryCache() + t := NewTransport(c) + return t +} diff --git a/vendor/github.com/ulule/deepcopier/LICENSE b/vendor/github.com/json-iterator/go/LICENSE similarity index 95% rename from vendor/github.com/ulule/deepcopier/LICENSE rename to vendor/github.com/json-iterator/go/LICENSE index d5c4ea02..2cf4f5ab 100644 --- a/vendor/github.com/ulule/deepcopier/LICENSE +++ b/vendor/github.com/json-iterator/go/LICENSE @@ -1,6 +1,6 @@ -The MIT License (MIT) +MIT License -Copyright (c) 2015 Ulule +Copyright (c) 2016 json-iterator Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -19,4 +19,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md new file mode 100644 index 00000000..eca7ab97 --- /dev/null +++ b/vendor/github.com/json-iterator/go/README.md @@ -0,0 +1,80 @@ +[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go) +[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) +[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) +[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) +[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) + +A high-performance 100% compatible drop-in replacement of "encoding/json" + +``` +Go开发者们请加入我们,滴滴出行平台技术部 taowen@didichuxing.com +``` + +# Benchmark + +![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) + +Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go + +Raw Result (easyjson requires static code generation) + +| | ns/op | allocation bytes | allocation times | +| --- | --- | --- | --- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | + +# Usage + +100% compatibility with standard lib + +Replace + +```go +import "encoding/json" +json.Marshal(&data) +``` + +with + +```go +import "github.com/json-iterator/go" +jsoniter.Marshal(&data) +``` + +Replace + +```go +import "encoding/json" +json.Unmarshal(input, &data) +``` + +with + +```go +import "github.com/json-iterator/go" +jsoniter.Unmarshal(input, &data) +``` + +[More documentation](http://jsoniter.com/migrate-from-go-std.html) + +# How to get + +``` +go get github.com/json-iterator/go +``` + +# Contribution Welcomed ! + +Contributors + +* [thockin](https://github.com/thockin) +* [mattn](https://github.com/mattn) +* [cch123](https://github.com/cch123) + +Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/feature_adapter.go b/vendor/github.com/json-iterator/go/feature_adapter.go new file mode 100644 index 00000000..edb477c4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_adapter.go @@ -0,0 +1,127 @@ +package jsoniter + +import ( + "bytes" + "io" +) + +// RawMessage to make replace json with jsoniter +type RawMessage []byte + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information +func Unmarshal(data []byte, v interface{}) error { + return ConfigDefault.Unmarshal(data, v) +} + +func lastNotSpacePos(data []byte) int { + for i := len(data) - 1; i >= 0; i-- { + if data[i] != ' ' && data[i] != '\t' && data[i] != '\r' && data[i] != '\n' { + return i + 1 + } + } + return 0 +} + +// UnmarshalFromString convenient method to read from string instead of []byte +func UnmarshalFromString(str string, v interface{}) error { + return ConfigDefault.UnmarshalFromString(str, v) +} + +// Get quick method to get value from deeply nested JSON structure +func Get(data []byte, path ...interface{}) Any { + return ConfigDefault.Get(data, path...) +} + +// Marshal adapts to json/encoding Marshal API +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information +func Marshal(v interface{}) ([]byte, error) { + return ConfigDefault.Marshal(v) +} + +// MarshalIndent same as json.MarshalIndent. Prefix is not supported. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return ConfigDefault.MarshalIndent(v, prefix, indent) +} + +// MarshalToString convenient method to write as string instead of []byte +func MarshalToString(v interface{}) (string, error) { + return ConfigDefault.MarshalToString(v) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, an Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information +func NewDecoder(reader io.Reader) *Decoder { + return ConfigDefault.NewDecoder(reader) +} + +// Decoder reads and decodes JSON values from an input stream. +// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) +type Decoder struct { + iter *Iterator +} + +// Decode decode JSON into interface{} +func (adapter *Decoder) Decode(obj interface{}) error { + adapter.iter.ReadVal(obj) + err := adapter.iter.Error + if err == io.EOF { + return nil + } + return adapter.iter.Error +} + +// More is there more? +func (adapter *Decoder) More() bool { + return adapter.iter.head != adapter.iter.tail +} + +// Buffered remaining buffer +func (adapter *Decoder) Buffered() io.Reader { + remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] + return bytes.NewReader(remaining) +} + +// UseNumber for number JSON element, use float64 or json.NumberValue (alias of string) +func (adapter *Decoder) UseNumber() { + origCfg := adapter.iter.cfg.configBeforeFrozen + origCfg.UseNumber = true + adapter.iter.cfg = origCfg.Froze().(*frozenConfig) +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *Encoder { + return ConfigDefault.NewEncoder(writer) +} + +// Encoder same as json.Encoder +type Encoder struct { + stream *Stream +} + +// Encode encode interface{} as JSON to io.Writer +func (adapter *Encoder) Encode(val interface{}) error { + adapter.stream.WriteVal(val) + adapter.stream.Flush() + return adapter.stream.Error +} + +// SetIndent set the indention. Prefix is not supported +func (adapter *Encoder) SetIndent(prefix, indent string) { + adapter.stream.cfg.indentionStep = len(indent) +} + +// SetEscapeHTML escape html by default, set to false to disable +func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { + config := adapter.stream.cfg.configBeforeFrozen + config.EscapeHTML = escapeHTML + adapter.stream.cfg = config.Froze().(*frozenConfig) +} diff --git a/vendor/github.com/json-iterator/go/feature_any.go b/vendor/github.com/json-iterator/go/feature_any.go new file mode 100644 index 00000000..6733dce4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any.go @@ -0,0 +1,242 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" +) + +// Any generic object representation. +// The lazy json implementation holds []byte and parse lazily. +type Any interface { + LastError() error + ValueType() ValueType + MustBeValid() Any + ToBool() bool + ToInt() int + ToInt32() int32 + ToInt64() int64 + ToUint() uint + ToUint32() uint32 + ToUint64() uint64 + ToFloat32() float32 + ToFloat64() float64 + ToString() string + ToVal(val interface{}) + Get(path ...interface{}) Any + // TODO: add Set + Size() int + Keys() []string + GetInterface() interface{} + WriteTo(stream *Stream) +} + +type baseAny struct{} + +func (any *baseAny) Get(path ...interface{}) Any { + return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} +} + +func (any *baseAny) Size() int { + return 0 +} + +func (any *baseAny) Keys() []string { + return []string{} +} + +func (any *baseAny) ToVal(obj interface{}) { + panic("not implemented") +} + +// WrapInt32 turn int32 into Any interface +func WrapInt32(val int32) Any { + return &int32Any{baseAny{}, val} +} + +// WrapInt64 turn int64 into Any interface +func WrapInt64(val int64) Any { + return &int64Any{baseAny{}, val} +} + +// WrapUint32 turn uint32 into Any interface +func WrapUint32(val uint32) Any { + return &uint32Any{baseAny{}, val} +} + +// WrapUint64 turn uint64 into Any interface +func WrapUint64(val uint64) Any { + return &uint64Any{baseAny{}, val} +} + +// WrapFloat64 turn float64 into Any interface +func WrapFloat64(val float64) Any { + return &floatAny{baseAny{}, val} +} + +// WrapString turn string into Any interface +func WrapString(val string) Any { + return &stringAny{baseAny{}, val} +} + +// Wrap turn a go object into Any interface +func Wrap(val interface{}) Any { + if val == nil { + return &nilAny{} + } + asAny, isAny := val.(Any) + if isAny { + return asAny + } + typ := reflect.TypeOf(val) + switch typ.Kind() { + case reflect.Slice: + return wrapArray(val) + case reflect.Struct: + return wrapStruct(val) + case reflect.Map: + return wrapMap(val) + case reflect.String: + return WrapString(val.(string)) + case reflect.Int: + return WrapInt64(int64(val.(int))) + case reflect.Int8: + return WrapInt32(int32(val.(int8))) + case reflect.Int16: + return WrapInt32(int32(val.(int16))) + case reflect.Int32: + return WrapInt32(val.(int32)) + case reflect.Int64: + return WrapInt64(val.(int64)) + case reflect.Uint: + return WrapUint64(uint64(val.(uint))) + case reflect.Uint8: + return WrapUint32(uint32(val.(uint8))) + case reflect.Uint16: + return WrapUint32(uint32(val.(uint16))) + case reflect.Uint32: + return WrapUint32(uint32(val.(uint32))) + case reflect.Uint64: + return WrapUint64(val.(uint64)) + case reflect.Float32: + return WrapFloat64(float64(val.(float32))) + case reflect.Float64: + return WrapFloat64(val.(float64)) + case reflect.Bool: + if val.(bool) == true { + return &trueAny{} + } + return &falseAny{} + } + return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} +} + +// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. +func (iter *Iterator) ReadAny() Any { + return iter.readAny() +} + +func (iter *Iterator) readAny() Any { + c := iter.nextToken() + switch c { + case '"': + iter.unreadByte() + return &stringAny{baseAny{}, iter.ReadString()} + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + return &nilAny{} + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + return &trueAny{} + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + return &falseAny{} + case '{': + return iter.readObjectAny() + case '[': + return iter.readArrayAny() + case '-': + return iter.readNumberAny(false) + default: + return iter.readNumberAny(true) + } +} + +func (iter *Iterator) readNumberAny(positive bool) Any { + iter.startCapture(iter.head - 1) + iter.skipNumber() + lazyBuf := iter.stopCapture() + return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readObjectAny() Any { + iter.startCapture(iter.head - 1) + iter.skipObject() + lazyBuf := iter.stopCapture() + return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readArrayAny() Any { + iter.startCapture(iter.head - 1) + iter.skipArray() + lazyBuf := iter.stopCapture() + return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func locateObjectField(iter *Iterator, target string) []byte { + var found []byte + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + if field == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + return true + }) + return found +} + +func locateArrayElement(iter *Iterator, target int) []byte { + var found []byte + n := 0 + iter.ReadArrayCB(func(iter *Iterator) bool { + if n == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + n++ + return true + }) + return found +} + +func locatePath(iter *Iterator, path []interface{}) Any { + for i, pathKeyObj := range path { + switch pathKey := pathKeyObj.(type) { + case string: + valueBytes := locateObjectField(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int: + valueBytes := locateArrayElement(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int32: + if '*' == pathKey { + return iter.readAny().Get(path[i:]...) + } + return newInvalidAny(path[i:]) + default: + return newInvalidAny(path[i:]) + } + } + if iter.Error != nil && iter.Error != io.EOF { + return &invalidAny{baseAny{}, iter.Error} + } + return iter.readAny() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_array.go b/vendor/github.com/json-iterator/go/feature_any_array.go new file mode 100644 index 00000000..0449e9aa --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_array.go @@ -0,0 +1,278 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type arrayLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *arrayLazyAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayLazyAny) MustBeValid() Any { + return any +} + +func (any *arrayLazyAny) LastError() error { + return any.err +} + +func (any *arrayLazyAny) ToBool() bool { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.ReadArray() +} + +func (any *arrayLazyAny) ToInt() int { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt32() int32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt64() int64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint() uint { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint32() uint32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint64() uint64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat32() float32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat64() float64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *arrayLazyAny) ToVal(val interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(val) +} + +func (any *arrayLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateArrayElement(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + arr := make([]Any, 0) + iter.ReadArrayCB(func(iter *Iterator) bool { + found := iter.readAny().Get(path[1:]...) + if found.ValueType() != InvalidValue { + arr = append(arr, found) + } + return true + }) + return wrapArray(arr) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadArrayCB(func(iter *Iterator) bool { + size++ + iter.Skip() + return true + }) + return size +} + +func (any *arrayLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *arrayLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type arrayAny struct { + baseAny + val reflect.Value +} + +func wrapArray(val interface{}) *arrayAny { + return &arrayAny{baseAny{}, reflect.ValueOf(val)} +} + +func (any *arrayAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayAny) MustBeValid() Any { + return any +} + +func (any *arrayAny) LastError() error { + return nil +} + +func (any *arrayAny) ToBool() bool { + return any.val.Len() != 0 +} + +func (any *arrayAny) ToInt() int { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt32() int32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt64() int64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint() uint { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint32() uint32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint64() uint64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat32() float32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat64() float64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToString() string { + str, _ := MarshalToString(any.val.Interface()) + return str +} + +func (any *arrayAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + if firstPath < 0 || firstPath >= any.val.Len() { + return newInvalidAny(path) + } + return Wrap(any.val.Index(firstPath).Interface()) + case int32: + if '*' == firstPath { + mappedAll := make([]Any, 0) + for i := 0; i < any.val.Len(); i++ { + mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll = append(mappedAll, mapped) + } + } + return wrapArray(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayAny) Size() int { + return any.val.Len() +} + +func (any *arrayAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *arrayAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_bool.go b/vendor/github.com/json-iterator/go/feature_any_bool.go new file mode 100644 index 00000000..9452324a --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_bool.go @@ -0,0 +1,137 @@ +package jsoniter + +type trueAny struct { + baseAny +} + +func (any *trueAny) LastError() error { + return nil +} + +func (any *trueAny) ToBool() bool { + return true +} + +func (any *trueAny) ToInt() int { + return 1 +} + +func (any *trueAny) ToInt32() int32 { + return 1 +} + +func (any *trueAny) ToInt64() int64 { + return 1 +} + +func (any *trueAny) ToUint() uint { + return 1 +} + +func (any *trueAny) ToUint32() uint32 { + return 1 +} + +func (any *trueAny) ToUint64() uint64 { + return 1 +} + +func (any *trueAny) ToFloat32() float32 { + return 1 +} + +func (any *trueAny) ToFloat64() float64 { + return 1 +} + +func (any *trueAny) ToString() string { + return "true" +} + +func (any *trueAny) WriteTo(stream *Stream) { + stream.WriteTrue() +} + +func (any *trueAny) Parse() *Iterator { + return nil +} + +func (any *trueAny) GetInterface() interface{} { + return true +} + +func (any *trueAny) ValueType() ValueType { + return BoolValue +} + +func (any *trueAny) MustBeValid() Any { + return any +} + +type falseAny struct { + baseAny +} + +func (any *falseAny) LastError() error { + return nil +} + +func (any *falseAny) ToBool() bool { + return false +} + +func (any *falseAny) ToInt() int { + return 0 +} + +func (any *falseAny) ToInt32() int32 { + return 0 +} + +func (any *falseAny) ToInt64() int64 { + return 0 +} + +func (any *falseAny) ToUint() uint { + return 0 +} + +func (any *falseAny) ToUint32() uint32 { + return 0 +} + +func (any *falseAny) ToUint64() uint64 { + return 0 +} + +func (any *falseAny) ToFloat32() float32 { + return 0 +} + +func (any *falseAny) ToFloat64() float64 { + return 0 +} + +func (any *falseAny) ToString() string { + return "false" +} + +func (any *falseAny) WriteTo(stream *Stream) { + stream.WriteFalse() +} + +func (any *falseAny) Parse() *Iterator { + return nil +} + +func (any *falseAny) GetInterface() interface{} { + return false +} + +func (any *falseAny) ValueType() ValueType { + return BoolValue +} + +func (any *falseAny) MustBeValid() Any { + return any +} diff --git a/vendor/github.com/json-iterator/go/feature_any_float.go b/vendor/github.com/json-iterator/go/feature_any_float.go new file mode 100644 index 00000000..35fdb094 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_float.go @@ -0,0 +1,83 @@ +package jsoniter + +import ( + "strconv" +) + +type floatAny struct { + baseAny + val float64 +} + +func (any *floatAny) Parse() *Iterator { + return nil +} + +func (any *floatAny) ValueType() ValueType { + return NumberValue +} + +func (any *floatAny) MustBeValid() Any { + return any +} + +func (any *floatAny) LastError() error { + return nil +} + +func (any *floatAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *floatAny) ToInt() int { + return int(any.val) +} + +func (any *floatAny) ToInt32() int32 { + return int32(any.val) +} + +func (any *floatAny) ToInt64() int64 { + return int64(any.val) +} + +func (any *floatAny) ToUint() uint { + if any.val > 0 { + return uint(any.val) + } + return 0 +} + +func (any *floatAny) ToUint32() uint32 { + if any.val > 0 { + return uint32(any.val) + } + return 0 +} + +func (any *floatAny) ToUint64() uint64 { + if any.val > 0 { + return uint64(any.val) + } + return 0 +} + +func (any *floatAny) ToFloat32() float32 { + return float32(any.val) +} + +func (any *floatAny) ToFloat64() float64 { + return any.val +} + +func (any *floatAny) ToString() string { + return strconv.FormatFloat(any.val, 'E', -1, 64) +} + +func (any *floatAny) WriteTo(stream *Stream) { + stream.WriteFloat64(any.val) +} + +func (any *floatAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_int32.go b/vendor/github.com/json-iterator/go/feature_any_int32.go new file mode 100644 index 00000000..1b56f399 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_int32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int32Any struct { + baseAny + val int32 +} + +func (any *int32Any) LastError() error { + return nil +} + +func (any *int32Any) ValueType() ValueType { + return NumberValue +} + +func (any *int32Any) MustBeValid() Any { + return any +} + +func (any *int32Any) ToBool() bool { + return any.val != 0 +} + +func (any *int32Any) ToInt() int { + return int(any.val) +} + +func (any *int32Any) ToInt32() int32 { + return any.val +} + +func (any *int32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *int32Any) ToUint() uint { + return uint(any.val) +} + +func (any *int32Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *int32Any) WriteTo(stream *Stream) { + stream.WriteInt32(any.val) +} + +func (any *int32Any) Parse() *Iterator { + return nil +} + +func (any *int32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_int64.go b/vendor/github.com/json-iterator/go/feature_any_int64.go new file mode 100644 index 00000000..c440d72b --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_int64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int64Any struct { + baseAny + val int64 +} + +func (any *int64Any) LastError() error { + return nil +} + +func (any *int64Any) ValueType() ValueType { + return NumberValue +} + +func (any *int64Any) MustBeValid() Any { + return any +} + +func (any *int64Any) ToBool() bool { + return any.val != 0 +} + +func (any *int64Any) ToInt() int { + return int(any.val) +} + +func (any *int64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *int64Any) ToInt64() int64 { + return any.val +} + +func (any *int64Any) ToUint() uint { + return uint(any.val) +} + +func (any *int64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int64Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int64Any) ToString() string { + return strconv.FormatInt(any.val, 10) +} + +func (any *int64Any) WriteTo(stream *Stream) { + stream.WriteInt64(any.val) +} + +func (any *int64Any) Parse() *Iterator { + return nil +} + +func (any *int64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_invalid.go b/vendor/github.com/json-iterator/go/feature_any_invalid.go new file mode 100644 index 00000000..1d859eac --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_invalid.go @@ -0,0 +1,82 @@ +package jsoniter + +import "fmt" + +type invalidAny struct { + baseAny + err error +} + +func newInvalidAny(path []interface{}) *invalidAny { + return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} +} + +func (any *invalidAny) LastError() error { + return any.err +} + +func (any *invalidAny) ValueType() ValueType { + return InvalidValue +} + +func (any *invalidAny) MustBeValid() Any { + panic(any.err) +} + +func (any *invalidAny) ToBool() bool { + return false +} + +func (any *invalidAny) ToInt() int { + return 0 +} + +func (any *invalidAny) ToInt32() int32 { + return 0 +} + +func (any *invalidAny) ToInt64() int64 { + return 0 +} + +func (any *invalidAny) ToUint() uint { + return 0 +} + +func (any *invalidAny) ToUint32() uint32 { + return 0 +} + +func (any *invalidAny) ToUint64() uint64 { + return 0 +} + +func (any *invalidAny) ToFloat32() float32 { + return 0 +} + +func (any *invalidAny) ToFloat64() float64 { + return 0 +} + +func (any *invalidAny) ToString() string { + return "" +} + +func (any *invalidAny) WriteTo(stream *Stream) { +} + +func (any *invalidAny) Get(path ...interface{}) Any { + if any.err == nil { + return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} + } + return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} +} + +func (any *invalidAny) Parse() *Iterator { + return nil +} + +func (any *invalidAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/feature_any_nil.go b/vendor/github.com/json-iterator/go/feature_any_nil.go new file mode 100644 index 00000000..d04cb54c --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_nil.go @@ -0,0 +1,69 @@ +package jsoniter + +type nilAny struct { + baseAny +} + +func (any *nilAny) LastError() error { + return nil +} + +func (any *nilAny) ValueType() ValueType { + return NilValue +} + +func (any *nilAny) MustBeValid() Any { + return any +} + +func (any *nilAny) ToBool() bool { + return false +} + +func (any *nilAny) ToInt() int { + return 0 +} + +func (any *nilAny) ToInt32() int32 { + return 0 +} + +func (any *nilAny) ToInt64() int64 { + return 0 +} + +func (any *nilAny) ToUint() uint { + return 0 +} + +func (any *nilAny) ToUint32() uint32 { + return 0 +} + +func (any *nilAny) ToUint64() uint64 { + return 0 +} + +func (any *nilAny) ToFloat32() float32 { + return 0 +} + +func (any *nilAny) ToFloat64() float64 { + return 0 +} + +func (any *nilAny) ToString() string { + return "" +} + +func (any *nilAny) WriteTo(stream *Stream) { + stream.WriteNil() +} + +func (any *nilAny) Parse() *Iterator { + return nil +} + +func (any *nilAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/feature_any_number.go b/vendor/github.com/json-iterator/go/feature_any_number.go new file mode 100644 index 00000000..4e1c2764 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_number.go @@ -0,0 +1,104 @@ +package jsoniter + +import "unsafe" + +type numberLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *numberLazyAny) ValueType() ValueType { + return NumberValue +} + +func (any *numberLazyAny) MustBeValid() Any { + return any +} + +func (any *numberLazyAny) LastError() error { + return any.err +} + +func (any *numberLazyAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *numberLazyAny) ToInt() int { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToInt32() int32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt32() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToInt64() int64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt64() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToUint() uint { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToUint32() uint32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint32() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToUint64() uint64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint64() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToFloat32() float32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat32() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToFloat64() float64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat64() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *numberLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *numberLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_object.go b/vendor/github.com/json-iterator/go/feature_any_object.go new file mode 100644 index 00000000..c44ef5c9 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_object.go @@ -0,0 +1,374 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type objectLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *objectLazyAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectLazyAny) MustBeValid() Any { + return any +} + +func (any *objectLazyAny) LastError() error { + return any.err +} + +func (any *objectLazyAny) ToBool() bool { + return true +} + +func (any *objectLazyAny) ToInt() int { + return 0 +} + +func (any *objectLazyAny) ToInt32() int32 { + return 0 +} + +func (any *objectLazyAny) ToInt64() int64 { + return 0 +} + +func (any *objectLazyAny) ToUint() uint { + return 0 +} + +func (any *objectLazyAny) ToUint32() uint32 { + return 0 +} + +func (any *objectLazyAny) ToUint64() uint64 { + return 0 +} + +func (any *objectLazyAny) ToFloat32() float32 { + return 0 +} + +func (any *objectLazyAny) ToFloat64() float64 { + return 0 +} + +func (any *objectLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *objectLazyAny) ToVal(obj interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(obj) +} + +func (any *objectLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateObjectField(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + mapped := locatePath(iter, path[1:]) + if mapped.ValueType() != InvalidValue { + mappedAll[field] = mapped + } + return true + }) + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectLazyAny) Keys() []string { + keys := []string{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + iter.Skip() + keys = append(keys, field) + return true + }) + return keys +} + +func (any *objectLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + size++ + return true + }) + return size +} + +func (any *objectLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *objectLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type objectAny struct { + baseAny + err error + val reflect.Value +} + +func wrapStruct(val interface{}) *objectAny { + return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *objectAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectAny) MustBeValid() Any { + return any +} + +func (any *objectAny) Parse() *Iterator { + return nil +} + +func (any *objectAny) LastError() error { + return any.err +} + +func (any *objectAny) ToBool() bool { + return any.val.NumField() != 0 +} + +func (any *objectAny) ToInt() int { + return 0 +} + +func (any *objectAny) ToInt32() int32 { + return 0 +} + +func (any *objectAny) ToInt64() int64 { + return 0 +} + +func (any *objectAny) ToUint() uint { + return 0 +} + +func (any *objectAny) ToUint32() uint32 { + return 0 +} + +func (any *objectAny) ToUint64() uint64 { + return 0 +} + +func (any *objectAny) ToFloat32() float32 { + return 0 +} + +func (any *objectAny) ToFloat64() float64 { + return 0 +} + +func (any *objectAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *objectAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + field := any.val.FieldByName(firstPath) + if !field.IsValid() { + return newInvalidAny(path) + } + return Wrap(field.Interface()) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for i := 0; i < any.val.NumField(); i++ { + field := any.val.Field(i) + if field.CanInterface() { + mapped := Wrap(field.Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[any.val.Type().Field(i).Name] = mapped + } + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectAny) Keys() []string { + keys := make([]string, 0, any.val.NumField()) + for i := 0; i < any.val.NumField(); i++ { + keys = append(keys, any.val.Type().Field(i).Name) + } + return keys +} + +func (any *objectAny) Size() int { + return any.val.NumField() +} + +func (any *objectAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *objectAny) GetInterface() interface{} { + return any.val.Interface() +} + +type mapAny struct { + baseAny + err error + val reflect.Value +} + +func wrapMap(val interface{}) *mapAny { + return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *mapAny) ValueType() ValueType { + return ObjectValue +} + +func (any *mapAny) MustBeValid() Any { + return any +} + +func (any *mapAny) Parse() *Iterator { + return nil +} + +func (any *mapAny) LastError() error { + return any.err +} + +func (any *mapAny) ToBool() bool { + return true +} + +func (any *mapAny) ToInt() int { + return 0 +} + +func (any *mapAny) ToInt32() int32 { + return 0 +} + +func (any *mapAny) ToInt64() int64 { + return 0 +} + +func (any *mapAny) ToUint() uint { + return 0 +} + +func (any *mapAny) ToUint32() uint32 { + return 0 +} + +func (any *mapAny) ToUint64() uint64 { + return 0 +} + +func (any *mapAny) ToFloat32() float32 { + return 0 +} + +func (any *mapAny) ToFloat64() float64 { + return 0 +} + +func (any *mapAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *mapAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for _, key := range any.val.MapKeys() { + keyAsStr := key.String() + element := Wrap(any.val.MapIndex(key).Interface()) + mapped := element.Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[keyAsStr] = mapped + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + value := any.val.MapIndex(reflect.ValueOf(firstPath)) + if !value.IsValid() { + return newInvalidAny(path) + } + return Wrap(value.Interface()) + } +} + +func (any *mapAny) Keys() []string { + keys := make([]string, 0, any.val.Len()) + for _, key := range any.val.MapKeys() { + keys = append(keys, key.String()) + } + return keys +} + +func (any *mapAny) Size() int { + return any.val.Len() +} + +func (any *mapAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *mapAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_string.go b/vendor/github.com/json-iterator/go/feature_any_string.go new file mode 100644 index 00000000..abf060bd --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_string.go @@ -0,0 +1,166 @@ +package jsoniter + +import ( + "fmt" + "strconv" +) + +type stringAny struct { + baseAny + val string +} + +func (any *stringAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} +} + +func (any *stringAny) Parse() *Iterator { + return nil +} + +func (any *stringAny) ValueType() ValueType { + return StringValue +} + +func (any *stringAny) MustBeValid() Any { + return any +} + +func (any *stringAny) LastError() error { + return nil +} + +func (any *stringAny) ToBool() bool { + str := any.ToString() + if str == "0" { + return false + } + for _, c := range str { + switch c { + case ' ', '\n', '\r', '\t': + default: + return true + } + } + return false +} + +func (any *stringAny) ToInt() int { + return int(any.ToInt64()) + +} + +func (any *stringAny) ToInt32() int32 { + return int32(any.ToInt64()) +} + +func (any *stringAny) ToInt64() int64 { + if any.val == "" { + return 0 + } + + flag := 1 + startPos := 0 + endPos := 0 + if any.val[0] == '+' || any.val[0] == '-' { + startPos = 1 + } + + if any.val[0] == '-' { + flag = -1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) + return int64(flag) * parsed +} + +func (any *stringAny) ToUint() uint { + return uint(any.ToUint64()) +} + +func (any *stringAny) ToUint32() uint32 { + return uint32(any.ToUint64()) +} + +func (any *stringAny) ToUint64() uint64 { + if any.val == "" { + return 0 + } + + startPos := 0 + endPos := 0 + + if any.val[0] == '-' { + return 0 + } + if any.val[0] == '+' { + startPos = 1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) + return parsed +} + +func (any *stringAny) ToFloat32() float32 { + return float32(any.ToFloat64()) +} + +func (any *stringAny) ToFloat64() float64 { + if len(any.val) == 0 { + return 0 + } + + // first char invalid + if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { + return 0 + } + + // extract valid num expression from string + // eg 123true => 123, -12.12xxa => -12.12 + endPos := 1 + for i := 1; i < len(any.val); i++ { + if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { + endPos = i + 1 + continue + } + + // end position is the first char which is not digit + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + endPos = i + break + } + } + parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) + return parsed +} + +func (any *stringAny) ToString() string { + return any.val +} + +func (any *stringAny) WriteTo(stream *Stream) { + stream.WriteString(any.val) +} + +func (any *stringAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_uint32.go b/vendor/github.com/json-iterator/go/feature_any_uint32.go new file mode 100644 index 00000000..656bbd33 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_uint32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint32Any struct { + baseAny + val uint32 +} + +func (any *uint32Any) LastError() error { + return nil +} + +func (any *uint32Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint32Any) MustBeValid() Any { + return any +} + +func (any *uint32Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint32Any) ToInt() int { + return int(any.val) +} + +func (any *uint32Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint32Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint32Any) ToUint32() uint32 { + return any.val +} + +func (any *uint32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *uint32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *uint32Any) WriteTo(stream *Stream) { + stream.WriteUint32(any.val) +} + +func (any *uint32Any) Parse() *Iterator { + return nil +} + +func (any *uint32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_uint64.go b/vendor/github.com/json-iterator/go/feature_any_uint64.go new file mode 100644 index 00000000..7df2fce3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_uint64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint64Any struct { + baseAny + val uint64 +} + +func (any *uint64Any) LastError() error { + return nil +} + +func (any *uint64Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint64Any) MustBeValid() Any { + return any +} + +func (any *uint64Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint64Any) ToInt() int { + return int(any.val) +} + +func (any *uint64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint64Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint64Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *uint64Any) ToUint64() uint64 { + return any.val +} + +func (any *uint64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint64Any) ToString() string { + return strconv.FormatUint(any.val, 10) +} + +func (any *uint64Any) WriteTo(stream *Stream) { + stream.WriteUint64(any.val) +} + +func (any *uint64Any) Parse() *Iterator { + return nil +} + +func (any *uint64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_config.go b/vendor/github.com/json-iterator/go/feature_config.go new file mode 100644 index 00000000..fc055d50 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_config.go @@ -0,0 +1,312 @@ +package jsoniter + +import ( + "encoding/json" + "errors" + "io" + "reflect" + "sync/atomic" + "unsafe" +) + +// Config customize how the API should behave. +// The API is created from Config by Froze. +type Config struct { + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + TagKey string +} + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + decoderCache unsafe.Pointer + encoderCache unsafe.Pointer + extensions []Extension + streamPool chan *Stream + iteratorPool chan *Iterator +} + +// API the public interface of this package. +// Primary Marshal and Unmarshal. +type API interface { + IteratorPool + StreamPool + MarshalToString(v interface{}) (string, error) + Marshal(v interface{}) ([]byte, error) + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + UnmarshalFromString(str string, v interface{}) error + Unmarshal(data []byte, v interface{}) error + Get(data []byte, path ...interface{}) Any + NewEncoder(writer io.Writer) *Encoder + NewDecoder(reader io.Reader) *Decoder +} + +// ConfigDefault the default API +var ConfigDefault = Config{ + EscapeHTML: true, +}.Froze() + +// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior +var ConfigCompatibleWithStandardLibrary = Config{ + EscapeHTML: true, + SortMapKeys: true, +}.Froze() + +// ConfigFastest marshals float with only 6 digits precision +var ConfigFastest = Config{ + EscapeHTML: false, + MarshalFloatWith6Digits: true, +}.Froze() + +// Froze forge API from config +func (cfg Config) Froze() API { + // TODO: cache frozen config + frozenConfig := &frozenConfig{ + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + streamPool: make(chan *Stream, 16), + iteratorPool: make(chan *Iterator, 16), + } + atomic.StorePointer(&frozenConfig.decoderCache, unsafe.Pointer(&map[string]ValDecoder{})) + atomic.StorePointer(&frozenConfig.encoderCache, unsafe.Pointer(&map[string]ValEncoder{})) + if cfg.MarshalFloatWith6Digits { + frozenConfig.marshalFloatWith6Digits() + } + if cfg.EscapeHTML { + frozenConfig.escapeHTML() + } + if cfg.UseNumber { + frozenConfig.useNumber() + } + frozenConfig.configBeforeFrozen = cfg + return frozenConfig +} + +func (cfg *frozenConfig) useNumber() { + cfg.addDecoderToCache(reflect.TypeOf((*interface{})(nil)).Elem(), &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() == NumberValue { + *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) + } else { + *((*interface{})(ptr)) = iter.Read() + } + }}) +} +func (cfg *frozenConfig) getTagKey() string { + tagKey := cfg.configBeforeFrozen.TagKey + if tagKey == "" { + return "json" + } + return tagKey +} + +func (cfg *frozenConfig) registerExtension(extension Extension) { + cfg.extensions = append(cfg.extensions, extension) +} + +type lossyFloat32Encoder struct { +} + +func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32Lossy(*((*float32)(ptr))) +} + +func (encoder *lossyFloat32Encoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type lossyFloat64Encoder struct { +} + +func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64Lossy(*((*float64)(ptr))) +} + +func (encoder *lossyFloat64Encoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +// EnableLossyFloatMarshalling keeps 10**(-6) precision +// for float variables for better performance. +func (cfg *frozenConfig) marshalFloatWith6Digits() { + // for better performance + cfg.addEncoderToCache(reflect.TypeOf((*float32)(nil)).Elem(), &lossyFloat32Encoder{}) + cfg.addEncoderToCache(reflect.TypeOf((*float64)(nil)).Elem(), &lossyFloat64Encoder{}) +} + +type htmlEscapedStringEncoder struct { +} + +func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteStringWithHTMLEscaped(str) +} + +func (encoder *htmlEscapedStringEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +func (cfg *frozenConfig) escapeHTML() { + cfg.addEncoderToCache(reflect.TypeOf((*string)(nil)).Elem(), &htmlEscapedStringEncoder{}) +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey reflect.Type, decoder ValDecoder) { + done := false + for !done { + ptr := atomic.LoadPointer(&cfg.decoderCache) + cache := *(*map[reflect.Type]ValDecoder)(ptr) + copied := map[reflect.Type]ValDecoder{} + for k, v := range cache { + copied[k] = v + } + copied[cacheKey] = decoder + done = atomic.CompareAndSwapPointer(&cfg.decoderCache, ptr, unsafe.Pointer(&copied)) + } +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey reflect.Type, encoder ValEncoder) { + done := false + for !done { + ptr := atomic.LoadPointer(&cfg.encoderCache) + cache := *(*map[reflect.Type]ValEncoder)(ptr) + copied := map[reflect.Type]ValEncoder{} + for k, v := range cache { + copied[k] = v + } + copied[cacheKey] = encoder + done = atomic.CompareAndSwapPointer(&cfg.encoderCache, ptr, unsafe.Pointer(&copied)) + } +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey reflect.Type) ValDecoder { + ptr := atomic.LoadPointer(&cfg.decoderCache) + cache := *(*map[reflect.Type]ValDecoder)(ptr) + return cache[cacheKey] +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey reflect.Type) ValEncoder { + ptr := atomic.LoadPointer(&cfg.encoderCache) + cache := *(*map[reflect.Type]ValEncoder)(ptr) + return cache[cacheKey] +} + +func (cfg *frozenConfig) cleanDecoders() { + typeDecoders = map[string]ValDecoder{} + fieldDecoders = map[string]ValDecoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) cleanEncoders() { + typeEncoders = map[string]ValEncoder{} + fieldEncoders = map[string]ValEncoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return "", stream.Error + } + return string(stream.Buffer()), nil +} + +func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return nil, stream.Error + } + result := stream.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + return copied, nil +} + +func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + if prefix != "" { + panic("prefix is not supported") + } + for _, r := range indent { + if r != ' ' { + panic("indent can only be space") + } + } + newCfg := cfg.configBeforeFrozen + newCfg.IndentionStep = len(indent) + return newCfg.Froze().Marshal(v) +} + +func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { + data := []byte(str) + data = data[:lastNotSpacePos(data)] + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + if iter.head == iter.tail { + iter.loadMore() + } + if iter.Error == io.EOF { + return nil + } + if iter.Error == nil { + iter.ReportError("UnmarshalFromString", "there are bytes left after unmarshal") + } + return iter.Error +} + +func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + return locatePath(iter, path) +} + +func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { + data = data[:lastNotSpacePos(data)] + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + typ := reflect.TypeOf(v) + if typ.Kind() != reflect.Ptr { + // return non-pointer error + return errors.New("the second param must be ptr type") + } + iter.ReadVal(v) + if iter.head == iter.tail { + iter.loadMore() + } + if iter.Error == io.EOF { + return nil + } + if iter.Error == nil { + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + } + return iter.Error +} + +func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { + stream := NewStream(cfg, writer, 512) + return &Encoder{stream} +} + +func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { + iter := Parse(cfg, reader, 512) + return &Decoder{iter} +} diff --git a/vendor/github.com/json-iterator/go/feature_iter.go b/vendor/github.com/json-iterator/go/feature_iter.go new file mode 100644 index 00000000..4357d69b --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter.go @@ -0,0 +1,307 @@ +package jsoniter + +import ( + "encoding/json" + "fmt" + "io" +) + +// ValueType the type for JSON element +type ValueType int + +const ( + // InvalidValue invalid JSON element + InvalidValue ValueType = iota + // StringValue JSON element "string" + StringValue + // NumberValue JSON element 100 or 0.10 + NumberValue + // NilValue JSON element null + NilValue + // BoolValue JSON element true or false + BoolValue + // ArrayValue JSON element [] + ArrayValue + // ObjectValue JSON element {} + ObjectValue +) + +var hexDigits []byte +var valueTypes []ValueType + +func init() { + hexDigits = make([]byte, 256) + for i := 0; i < len(hexDigits); i++ { + hexDigits[i] = 255 + } + for i := '0'; i <= '9'; i++ { + hexDigits[i] = byte(i - '0') + } + for i := 'a'; i <= 'f'; i++ { + hexDigits[i] = byte((i - 'a') + 10) + } + for i := 'A'; i <= 'F'; i++ { + hexDigits[i] = byte((i - 'A') + 10) + } + valueTypes = make([]ValueType, 256) + for i := 0; i < len(valueTypes); i++ { + valueTypes[i] = InvalidValue + } + valueTypes['"'] = StringValue + valueTypes['-'] = NumberValue + valueTypes['0'] = NumberValue + valueTypes['1'] = NumberValue + valueTypes['2'] = NumberValue + valueTypes['3'] = NumberValue + valueTypes['4'] = NumberValue + valueTypes['5'] = NumberValue + valueTypes['6'] = NumberValue + valueTypes['7'] = NumberValue + valueTypes['8'] = NumberValue + valueTypes['9'] = NumberValue + valueTypes['t'] = BoolValue + valueTypes['f'] = BoolValue + valueTypes['n'] = NilValue + valueTypes['['] = ArrayValue + valueTypes['{'] = ObjectValue +} + +// Iterator is a io.Reader like object, with JSON specific read functions. +// Error is not returned as return value, but stored as Error member on this iterator instance. +type Iterator struct { + cfg *frozenConfig + reader io.Reader + buf []byte + head int + tail int + captureStartedAt int + captured []byte + Error error +} + +// NewIterator creates an empty Iterator instance +func NewIterator(cfg API) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: nil, + head: 0, + tail: 0, + } +} + +// Parse creates an Iterator instance from io.Reader +func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: reader, + buf: make([]byte, bufSize), + head: 0, + tail: 0, + } +} + +// ParseBytes creates an Iterator instance from byte array +func ParseBytes(cfg API, input []byte) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: input, + head: 0, + tail: len(input), + } +} + +// ParseString creates an Iterator instance from string +func ParseString(cfg API, input string) *Iterator { + return ParseBytes(cfg, []byte(input)) +} + +// Pool returns a pool can provide more iterator with same configuration +func (iter *Iterator) Pool() IteratorPool { + return iter.cfg +} + +// Reset reuse iterator instance by specifying another reader +func (iter *Iterator) Reset(reader io.Reader) *Iterator { + iter.reader = reader + iter.head = 0 + iter.tail = 0 + return iter +} + +// ResetBytes reuse iterator instance by specifying another byte array as input +func (iter *Iterator) ResetBytes(input []byte) *Iterator { + iter.reader = nil + iter.buf = input + iter.head = 0 + iter.tail = len(input) + return iter +} + +// WhatIsNext gets ValueType of relatively next json element +func (iter *Iterator) WhatIsNext() ValueType { + valueType := valueTypes[iter.nextToken()] + iter.unreadByte() + return valueType +} + +func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + return false + } + return true +} + +func (iter *Iterator) isObjectEnd() bool { + c := iter.nextToken() + if c == ',' { + return false + } + if c == '}' { + return true + } + iter.ReportError("isObjectEnd", "object ended prematurely") + return true +} + +func (iter *Iterator) nextToken() byte { + // a variation of skip whitespaces, returning the next non-whitespace token + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + 1 + return c + } + if !iter.loadMore() { + return 0 + } + } +} + +// ReportError record a error in iterator instance with current position. +func (iter *Iterator) ReportError(operation string, msg string) { + if iter.Error != nil { + if iter.Error != io.EOF { + return + } + } + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + iter.Error = fmt.Errorf("%s: %s, parsing %v ...%s... at %s", operation, msg, iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +// CurrentBuffer gets current buffer as string for debugging purpose +func (iter *Iterator) CurrentBuffer() string { + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + return fmt.Sprintf("parsing %v ...|%s|... at %s", iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +func (iter *Iterator) readByte() (ret byte) { + if iter.head == iter.tail { + if iter.loadMore() { + ret = iter.buf[iter.head] + iter.head++ + return ret + } + return 0 + } + ret = iter.buf[iter.head] + iter.head++ + return ret +} + +func (iter *Iterator) loadMore() bool { + if iter.reader == nil { + if iter.Error == nil { + iter.head = iter.tail + iter.Error = io.EOF + } + return false + } + if iter.captured != nil { + iter.captured = append(iter.captured, + iter.buf[iter.captureStartedAt:iter.tail]...) + iter.captureStartedAt = 0 + } + for { + n, err := iter.reader.Read(iter.buf) + if n == 0 { + if err != nil { + if iter.Error == nil { + iter.Error = err + } + return false + } + } else { + iter.head = 0 + iter.tail = n + return true + } + } +} + +func (iter *Iterator) unreadByte() { + if iter.Error != nil { + return + } + iter.head-- + return +} + +// Read read the next JSON element as generic interface{}. +func (iter *Iterator) Read() interface{} { + valueType := iter.WhatIsNext() + switch valueType { + case StringValue: + return iter.ReadString() + case NumberValue: + if iter.cfg.configBeforeFrozen.UseNumber { + return json.Number(iter.readNumberAsString()) + } + return iter.ReadFloat64() + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + return nil + case BoolValue: + return iter.ReadBool() + case ArrayValue: + arr := []interface{}{} + iter.ReadArrayCB(func(iter *Iterator) bool { + var elem interface{} + iter.ReadVal(&elem) + arr = append(arr, elem) + return true + }) + return arr + case ObjectValue: + obj := map[string]interface{}{} + iter.ReadMapCB(func(Iter *Iterator, field string) bool { + var elem interface{} + iter.ReadVal(&elem) + obj[field] = elem + return true + }) + return obj + default: + iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) + return nil + } +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_array.go b/vendor/github.com/json-iterator/go/feature_iter_array.go new file mode 100644 index 00000000..cbc3ec8d --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_array.go @@ -0,0 +1,58 @@ +package jsoniter + +// ReadArray read array element, tells if the array has more element to read. +func (iter *Iterator) ReadArray() (ret bool) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return false // null + case '[': + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + return true + } + return false + case ']': + return false + case ',': + return true + default: + iter.ReportError("ReadArray", "expect [ or , or ] or n, but found: "+string([]byte{c})) + return + } +} + +// ReadArrayCB read array with callback +func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { + c := iter.nextToken() + if c == '[' { + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + if !callback(iter) { + return false + } + c = iter.nextToken() + for c == ',' { + if !callback(iter) { + return false + } + c = iter.nextToken() + } + if c != ']' { + iter.ReportError("ReadArrayCB", "expect ] in the end") + return false + } + return true + } + return true + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadArrayCB", "expect [ or n, but found: "+string([]byte{c})) + return false +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_float.go b/vendor/github.com/json-iterator/go/feature_iter_float.go new file mode 100644 index 00000000..86f45991 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_float.go @@ -0,0 +1,341 @@ +package jsoniter + +import ( + "io" + "math/big" + "strconv" + "strings" + "unsafe" +) + +var floatDigits []int8 + +const invalidCharForNumber = int8(-1) +const endOfNumber = int8(-2) +const dotInNumber = int8(-3) + +func init() { + floatDigits = make([]int8, 256) + for i := 0; i < len(floatDigits); i++ { + floatDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + floatDigits[i] = i - int8('0') + } + floatDigits[','] = endOfNumber + floatDigits[']'] = endOfNumber + floatDigits['}'] = endOfNumber + floatDigits[' '] = endOfNumber + floatDigits['\t'] = endOfNumber + floatDigits['\n'] = endOfNumber + floatDigits['.'] = dotInNumber +} + +// ReadBigFloat read big.Float +func (iter *Iterator) ReadBigFloat() (ret *big.Float) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + prec := 64 + if len(str) > prec { + prec = len(str) + } + val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) + if err != nil { + iter.Error = err + return nil + } + return val +} + +// ReadBigInt read big.Int +func (iter *Iterator) ReadBigInt() (ret *big.Int) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + ret = big.NewInt(0) + var success bool + ret, success = ret.SetString(str, 10) + if !success { + iter.ReportError("ReadBigInt", "invalid big int") + return nil + } + return ret +} + +//ReadFloat32 read float32 +func (iter *Iterator) ReadFloat32() (ret float32) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat32() + } + iter.unreadByte() + return iter.readPositiveFloat32() +} + +func (iter *Iterator) readPositiveFloat32() (ret float32) { + value := uint64(0) + c := byte(' ') + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.ReportError("readFloat32", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat32", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat32", "leading zero is invalid") + return + } + } + value = uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.head = i + return float32(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat32SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float32(float64(value) / float64(pow10[decimalPlaces])) + } + // too many decimal places + return iter.readFloat32SlowPath() + case invalidCharForNumber: + fallthrough + case dotInNumber: + return iter.readFloat32SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat32SlowPath() +} + +func (iter *Iterator) readNumberAsString() (ret string) { + strBuf := [16]byte{} + str := strBuf[0:0] +load_loop: + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + str = append(str, c) + continue + default: + iter.head = i + break load_loop + } + } + if !iter.loadMore() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + return + } + if len(str) == 0 { + iter.ReportError("readNumberAsString", "invalid number") + } + return *(*string)(unsafe.Pointer(&str)) +} + +func (iter *Iterator) readFloat32SlowPath() (ret float32) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat32SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 32) + if err != nil { + iter.Error = err + return + } + return float32(val) +} + +// ReadFloat64 read float64 +func (iter *Iterator) ReadFloat64() (ret float64) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat64() + } + iter.unreadByte() + return iter.readPositiveFloat64() +} + +func (iter *Iterator) readPositiveFloat64() (ret float64) { + value := uint64(0) + c := byte(' ') + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.ReportError("readFloat64", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat64", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat64", "leading zero is invalid") + return + } + } + value = uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.head = i + return float64(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat64SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float64(value) / float64(pow10[decimalPlaces]) + } + // too many decimal places + return iter.readFloat64SlowPath() + case invalidCharForNumber: + fallthrough + case dotInNumber: + return iter.readFloat64SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat64SlowPath() +} + +func (iter *Iterator) readFloat64SlowPath() (ret float64) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat64SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 64) + if err != nil { + iter.Error = err + return + } + return val +} + +func validateFloat(str string) string { + // strconv.ParseFloat is not validating `1.` or `1.e1` + if len(str) == 0 { + return "empty number" + } + if str[0] == '-' { + return "-- is not valid" + } + dotPos := strings.IndexByte(str, '.') + if dotPos != -1 { + if dotPos == len(str)-1 { + return "dot can not be last character" + } + switch str[dotPos+1] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + return "missing digit after dot" + } + } + return "" +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_int.go b/vendor/github.com/json-iterator/go/feature_iter_int.go new file mode 100644 index 00000000..886879ef --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_int.go @@ -0,0 +1,258 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var intDigits []int8 + +const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 +const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 + +func init() { + intDigits = make([]int8, 256) + for i := 0; i < len(intDigits); i++ { + intDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + intDigits[i] = i - int8('0') + } +} + +// ReadUint read uint +func (iter *Iterator) ReadUint() uint { + return uint(iter.ReadUint64()) +} + +// ReadInt read int +func (iter *Iterator) ReadInt() int { + return int(iter.ReadInt64()) +} + +// ReadInt8 read int8 +func (iter *Iterator) ReadInt8() (ret int8) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt8+1 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int8(val) + } + val := iter.readUint32(c) + if val > math.MaxInt8 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int8(val) +} + +// ReadUint8 read uint8 +func (iter *Iterator) ReadUint8() (ret uint8) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint8 { + iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint8(val) +} + +// ReadInt16 read int16 +func (iter *Iterator) ReadInt16() (ret int16) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt16+1 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int16(val) + } + val := iter.readUint32(c) + if val > math.MaxInt16 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int16(val) +} + +// ReadUint16 read uint16 +func (iter *Iterator) ReadUint16() (ret uint16) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint16 { + iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint16(val) +} + +// ReadInt32 read int32 +func (iter *Iterator) ReadInt32() (ret int32) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt32+1 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int32(val) + } + val := iter.readUint32(c) + if val > math.MaxInt32 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int32(val) +} + +// ReadUint32 read uint32 +func (iter *Iterator) ReadUint32() (ret uint32) { + return iter.readUint32(iter.nextToken()) +} + +func (iter *Iterator) readUint32(c byte) (ret uint32) { + ind := intDigits[c] + if ind == 0 { + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint32(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + return value*10 + uint32(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + return value*100 + uint32(ind2)*10 + uint32(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + return value + } + if value > uint32SafeToMultiply10 { + value2 := (value << 3) + (value << 1) + uint32(ind) + if value2 < value { + iter.ReportError("readUint32", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint32(ind) + } + if !iter.loadMore() { + return value + } + } +} + +// ReadInt64 read int64 +func (iter *Iterator) ReadInt64() (ret int64) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint64(iter.readByte()) + if val > math.MaxInt64+1 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return -int64(val) + } + val := iter.readUint64(c) + if val > math.MaxInt64 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return int64(val) +} + +// ReadUint64 read uint64 +func (iter *Iterator) ReadUint64() uint64 { + return iter.readUint64(iter.nextToken()) +} + +func (iter *Iterator) readUint64(c byte) (ret uint64) { + ind := intDigits[c] + if ind == 0 { + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint64(ind) + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + return value + } + if value > uint64SafeToMultiple10 { + value2 := (value << 3) + (value << 1) + uint64(ind) + if value2 < value { + iter.ReportError("readUint64", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint64(ind) + } + if !iter.loadMore() { + return value + } + } +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_object.go b/vendor/github.com/json-iterator/go/feature_iter_object.go new file mode 100644 index 00000000..3bdb5576 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_object.go @@ -0,0 +1,212 @@ +package jsoniter + +import ( + "fmt" + "unicode" + "unsafe" +) + +// ReadObject read one field from object. +// If object ended, returns empty string. +// Otherwise, returns the field name. +func (iter *Iterator) ReadObject() (ret string) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return "" // null + case '{': + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + return string(iter.readObjectFieldAsBytes()) + } + if c == '}' { + return "" // end of object + } + iter.ReportError("ReadObject", `expect " after {`) + return + case ',': + return string(iter.readObjectFieldAsBytes()) + case '}': + return "" // end of object + default: + iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) + return + } +} + +func (iter *Iterator) readFieldHash() int32 { + hash := int64(0x811c9dc5) + c := iter.nextToken() + if c == '"' { + for { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + b := iter.buf[i] + if 'A' <= b && b <= 'Z' { + b += 'a' - 'A' + } + if b == '"' { + iter.head = i + 1 + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + } + return int32(hash) + } + hash ^= int64(b) + hash *= 0x1000193 + } + if !iter.loadMore() { + iter.ReportError("readFieldHash", `incomplete field name`) + return 0 + } + } + } + iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) + return 0 +} + +func calcHash(str string) int32 { + hash := int64(0x811c9dc5) + for _, b := range str { + hash ^= int64(unicode.ToLower(b)) + hash *= 0x1000193 + } + return int32(hash) +} + +// ReadObjectCB read object with callback, the key is ascii only and field name not copied +func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.readObjectFieldAsBytes() + if !callback(iter, *(*string)(unsafe.Pointer(&field))) { + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.readObjectFieldAsBytes() + if !callback(iter, *(*string)(unsafe.Pointer(&field))) { + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadObjectCB", `object not ended with }`) + return false + } + return true + } + if c == '}' { + return true + } + iter.ReportError("ReadObjectCB", `expect " after }`) + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadObjectCB", `expect { or n`) + return false +} + +// ReadMapCB read map with callback, the key can be any string +func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field") + return false + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field") + return false + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadMapCB", `object not ended with }`) + return false + } + return true + } + if c == '}' { + return true + } + iter.ReportError("ReadMapCB", `expect " after }`) + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadMapCB", `expect { or n`) + return false +} + +func (iter *Iterator) readObjectStart() bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '}' { + return false + } + iter.unreadByte() + return true + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return false + } + iter.ReportError("readObjectStart", "expect { or n") + return false +} + +func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { + str := iter.ReadStringAsSlice() + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if iter.buf[iter.head] != ':' { + iter.ReportError("readObjectFieldAsBytes", "expect : after object field") + return + } + iter.head++ + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if ret == nil { + return str + } + return ret +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip.go b/vendor/github.com/json-iterator/go/feature_iter_skip.go new file mode 100644 index 00000000..b008d98c --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_skip.go @@ -0,0 +1,127 @@ +package jsoniter + +import "fmt" + +// ReadNil reads a json object as nil and +// returns whether it's a nil or not +func (iter *Iterator) ReadNil() (ret bool) { + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') // null + return true + } + iter.unreadByte() + return false +} + +// ReadBool reads a json object as BoolValue +func (iter *Iterator) ReadBool() (ret bool) { + c := iter.nextToken() + if c == 't' { + iter.skipThreeBytes('r', 'u', 'e') + return true + } + if c == 'f' { + iter.skipFourBytes('a', 'l', 's', 'e') + return false + } + iter.ReportError("ReadBool", "expect t or f") + return +} + +// SkipAndReturnBytes skip next JSON element, and return its content as []byte. +// The []byte can be kept, it is a copy of data. +func (iter *Iterator) SkipAndReturnBytes() []byte { + iter.startCapture(iter.head) + iter.Skip() + return iter.stopCapture() +} + +type captureBuffer struct { + startedAt int + captured []byte +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + if iter.captured != nil { + panic("already in capture mode") + } + iter.captureStartedAt = captureStartedAt + iter.captured = make([]byte, 0, 32) +} + +func (iter *Iterator) stopCapture() []byte { + if iter.captured == nil { + panic("not in capture mode") + } + captured := iter.captured + remaining := iter.buf[iter.captureStartedAt:iter.head] + iter.captureStartedAt = -1 + iter.captured = nil + if len(captured) == 0 { + return remaining + } + captured = append(captured, remaining...) + return captured +} + +// Skip skips a json object and positions to relatively the next json object +func (iter *Iterator) Skip() { + c := iter.nextToken() + switch c { + case '"': + iter.skipString() + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + case '0': + iter.unreadByte() + iter.ReadFloat32() + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.skipNumber() + case '[': + iter.skipArray() + case '{': + iter.skipObject() + default: + iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) + return + } +} + +func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b4 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } +} + +func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go new file mode 100644 index 00000000..047d58a4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go @@ -0,0 +1,144 @@ +//+build jsoniter-sloppy + +package jsoniter + +// sloppy but faster implementation, do not validate the input json + +func (iter *Iterator) skipNumber() { + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + iter.head = i + return + } + } + if !iter.loadMore() { + return + } + } +} + +func (iter *Iterator) skipArray() { + level := 1 + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '[': // If open symbol, increase level + level++ + case ']': // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete array") + return + } + } +} + +func (iter *Iterator) skipObject() { + level := 1 + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '{': // If open symbol, increase level + level++ + case '}': // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete object") + return + } + } +} + +func (iter *Iterator) skipString() { + for { + end, escaped := iter.findStringEnd() + if end == -1 { + if !iter.loadMore() { + iter.ReportError("skipString", "incomplete string") + return + } + if escaped { + iter.head = 1 // skip the first char as last char read is \ + } + } else { + iter.head = end + return + } + } +} + +// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func (iter *Iterator) findStringEnd() (int, bool) { + escaped := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + if !escaped { + return i + 1, false + } + j := i - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return i + 1, true + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + } + } else if c == '\\' { + escaped = true + } + } + j := iter.tail - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return -1, false // do not end with \ + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + + } + return -1, true // end with \ +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go b/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go new file mode 100644 index 00000000..d2676382 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go @@ -0,0 +1,89 @@ +//+build !jsoniter-sloppy + +package jsoniter + +import "fmt" + +func (iter *Iterator) skipNumber() { + if !iter.trySkipNumber() { + iter.unreadByte() + iter.ReadFloat32() + } +} + +func (iter *Iterator) trySkipNumber() bool { + dotFound := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + case '.': + if dotFound { + iter.ReportError("validateNumber", `more than one dot found in number`) + return true // already failed + } + if i+1 == iter.tail { + return false + } + c = iter.buf[i+1] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + iter.ReportError("validateNumber", `missing digit after dot`) + return true // already failed + } + dotFound = true + default: + switch c { + case ',', ']', '}', ' ', '\t', '\n', '\r': + if iter.head == i { + return false // if - without following digits + } + iter.head = i + return true // must be valid + } + return false // may be invalid + } + } + return false +} + +func (iter *Iterator) skipString() { + if !iter.trySkipString() { + iter.unreadByte() + iter.ReadString() + } +} + +func (iter *Iterator) trySkipString() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + iter.head = i + 1 + return true // valid + } else if c == '\\' { + return false + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return true // already failed + } + } + return false +} + +func (iter *Iterator) skipObject() { + iter.unreadByte() + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + return true + }) +} + +func (iter *Iterator) skipArray() { + iter.unreadByte() + iter.ReadArrayCB(func(iter *Iterator) bool { + iter.Skip() + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_string.go b/vendor/github.com/json-iterator/go/feature_iter_string.go new file mode 100644 index 00000000..b7646004 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_string.go @@ -0,0 +1,215 @@ +package jsoniter + +import ( + "fmt" + "unicode/utf16" +) + +// ReadString read string from iterator +func (iter *Iterator) ReadString() (ret string) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + ret = string(iter.buf[iter.head:i]) + iter.head = i + 1 + return ret + } else if c == '\\' { + break + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return + } + } + return iter.readStringSlowPath() + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return "" + } + iter.ReportError("ReadString", `expects " or n`) + return +} + +func (iter *Iterator) readStringSlowPath() (ret string) { + var str []byte + var c byte + for iter.Error == nil { + c = iter.readByte() + if c == '"' { + return string(str) + } + if c == '\\' { + c = iter.readByte() + str = iter.readEscapedChar(c, str) + } else { + str = append(str, c) + } + } + iter.ReportError("ReadString", "unexpected end of input") + return +} + +func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { + switch c { + case 'u': + r := iter.readU4() + if utf16.IsSurrogate(r) { + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != '\\' { + iter.unreadByte() + str = appendRune(str, r) + return str + } + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != 'u' { + str = appendRune(str, r) + return iter.readEscapedChar(c, str) + } + r2 := iter.readU4() + if iter.Error != nil { + return nil + } + combined := utf16.DecodeRune(r, r2) + if combined == '\uFFFD' { + str = appendRune(str, r) + str = appendRune(str, r2) + } else { + str = appendRune(str, combined) + } + } else { + str = appendRune(str, r) + } + case '"': + str = append(str, '"') + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + default: + iter.ReportError("ReadString", + `invalid escape char after \`) + return nil + } + return str +} + +// ReadStringAsSlice read string from iterator without copying into string form. +// The []byte can not be kept, as it will change after next iterator call. +func (iter *Iterator) ReadStringAsSlice() (ret []byte) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + // for: field name, base64, number + if iter.buf[i] == '"' { + // fast path: reuse the underlying buffer + ret = iter.buf[iter.head:i] + iter.head = i + 1 + return ret + } + } + readLen := iter.tail - iter.head + copied := make([]byte, readLen, readLen*2) + copy(copied, iter.buf[iter.head:iter.tail]) + iter.head = iter.tail + for iter.Error == nil { + c := iter.readByte() + if c == '"' { + return copied + } + copied = append(copied, c) + } + return copied + } + iter.ReportError("ReadString", `expects " or n`) + return +} + +func (iter *Iterator) readU4() (ret rune) { + for i := 0; i < 4; i++ { + c := iter.readByte() + if iter.Error != nil { + return + } + if c >= '0' && c <= '9' { + ret = ret*16 + rune(c-'0') + } else if c >= 'a' && c <= 'f' { + ret = ret*16 + rune(c-'a'+10) + } else if c >= 'A' && c <= 'F' { + ret = ret*16 + rune(c-'A'+10) + } else { + iter.ReportError("readU4", "expects 0~9 or a~f") + return + } + } + return ret +} + +const ( + t1 = 0x00 // 0000 0000 + tx = 0x80 // 1000 0000 + t2 = 0xC0 // 1100 0000 + t3 = 0xE0 // 1110 0000 + t4 = 0xF0 // 1111 0000 + t5 = 0xF8 // 1111 1000 + + maskx = 0x3F // 0011 1111 + mask2 = 0x1F // 0001 1111 + mask3 = 0x0F // 0000 1111 + mask4 = 0x07 // 0000 0111 + + rune1Max = 1<<7 - 1 + rune2Max = 1<<11 - 1 + rune3Max = 1<<16 - 1 + + surrogateMin = 0xD800 + surrogateMax = 0xDFFF + + maxRune = '\U0010FFFF' // Maximum valid Unicode code point. + runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" +) + +func appendRune(p []byte, r rune) []byte { + // Negative values are erroneous. Making it unsigned addresses the problem. + switch i := uint32(r); { + case i <= rune1Max: + p = append(p, byte(r)) + return p + case i <= rune2Max: + p = append(p, t2|byte(r>>6)) + p = append(p, tx|byte(r)&maskx) + return p + case i > maxRune, surrogateMin <= i && i <= surrogateMax: + r = runeError + fallthrough + case i <= rune3Max: + p = append(p, t3|byte(r>>12)) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + default: + p = append(p, t4|byte(r>>18)) + p = append(p, tx|byte(r>>12)&maskx) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + } +} diff --git a/vendor/github.com/json-iterator/go/feature_json_number.go b/vendor/github.com/json-iterator/go/feature_json_number.go new file mode 100644 index 00000000..0439f672 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_json_number.go @@ -0,0 +1,15 @@ +package jsoniter + +import "encoding/json" + +type Number string + +func CastJsonNumber(val interface{}) (string, bool) { + switch typedVal := val.(type) { + case json.Number: + return string(typedVal), true + case Number: + return string(typedVal), true + } + return "", false +} diff --git a/vendor/github.com/json-iterator/go/feature_pool.go b/vendor/github.com/json-iterator/go/feature_pool.go new file mode 100644 index 00000000..73962bc6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_pool.go @@ -0,0 +1,57 @@ +package jsoniter + +import ( + "io" +) + +// IteratorPool a thread safe pool of iterators with same configuration +type IteratorPool interface { + BorrowIterator(data []byte) *Iterator + ReturnIterator(iter *Iterator) +} + +// StreamPool a thread safe pool of streams with same configuration +type StreamPool interface { + BorrowStream(writer io.Writer) *Stream + ReturnStream(stream *Stream) +} + +func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { + select { + case stream := <-cfg.streamPool: + stream.Reset(writer) + return stream + default: + return NewStream(cfg, writer, 512) + } +} + +func (cfg *frozenConfig) ReturnStream(stream *Stream) { + stream.Error = nil + select { + case cfg.streamPool <- stream: + return + default: + return + } +} + +func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { + select { + case iter := <-cfg.iteratorPool: + iter.ResetBytes(data) + return iter + default: + return ParseBytes(cfg, data) + } +} + +func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { + iter.Error = nil + select { + case cfg.iteratorPool <- iter: + return + default: + return + } +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect.go b/vendor/github.com/json-iterator/go/feature_reflect.go new file mode 100644 index 00000000..05d91b49 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect.go @@ -0,0 +1,691 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "fmt" + "reflect" + "time" + "unsafe" +) + +// ValDecoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValDecoder with json.Decoder. +// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). +// +// Reflection on type to create decoders, which is then cached +// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions +// 1. create instance of new value, for example *int will need a int to be allocated +// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New +// 3. assignment to map, both key and value will be reflect.Value +// For a simple struct binding, it will be reflect.Value free and allocation free +type ValDecoder interface { + Decode(ptr unsafe.Pointer, iter *Iterator) +} + +// ValEncoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValEncoder with json.Encoder. +// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). +type ValEncoder interface { + IsEmpty(ptr unsafe.Pointer) bool + Encode(ptr unsafe.Pointer, stream *Stream) + EncodeInterface(val interface{}, stream *Stream) +} + +type checkIsEmpty interface { + IsEmpty(ptr unsafe.Pointer) bool +} + +// WriteToStream the default implementation for TypeEncoder method EncodeInterface +func WriteToStream(val interface{}, stream *Stream, encoder ValEncoder) { + e := (*emptyInterface)(unsafe.Pointer(&val)) + if e.word == nil { + stream.WriteNil() + return + } + if reflect.TypeOf(val).Kind() == reflect.Ptr { + encoder.Encode(unsafe.Pointer(&e.word), stream) + } else { + encoder.Encode(e.word, stream) + } +} + +var jsonNumberType reflect.Type +var jsoniterNumberType reflect.Type +var jsonRawMessageType reflect.Type +var jsoniterRawMessageType reflect.Type +var anyType reflect.Type +var marshalerType reflect.Type +var unmarshalerType reflect.Type +var textMarshalerType reflect.Type +var textUnmarshalerType reflect.Type + +func init() { + jsonNumberType = reflect.TypeOf((*json.Number)(nil)).Elem() + jsoniterNumberType = reflect.TypeOf((*Number)(nil)).Elem() + jsonRawMessageType = reflect.TypeOf((*json.RawMessage)(nil)).Elem() + jsoniterRawMessageType = reflect.TypeOf((*RawMessage)(nil)).Elem() + anyType = reflect.TypeOf((*Any)(nil)).Elem() + marshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +} + +type optionalDecoder struct { + valueType reflect.Type + valueDecoder ValDecoder +} + +func (decoder *optionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*unsafe.Pointer)(ptr)) = nil + } else { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + value := reflect.New(decoder.valueType) + newPtr := extractInterface(value.Interface()).word + decoder.valueDecoder.Decode(newPtr, iter) + *((*uintptr)(ptr)) = uintptr(newPtr) + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } + } +} + +type deferenceDecoder struct { + // only to deference a pointer + valueType reflect.Type + valueDecoder ValDecoder +} + +func (decoder *deferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + value := reflect.New(decoder.valueType) + newPtr := extractInterface(value.Interface()).word + decoder.valueDecoder.Decode(newPtr, iter) + *((*uintptr)(ptr)) = uintptr(newPtr) + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } +} + +type optionalEncoder struct { + valueEncoder ValEncoder +} + +func (encoder *optionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.valueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *optionalEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *optionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if *((*unsafe.Pointer)(ptr)) == nil { + return true + } + return false +} + +type placeholderEncoder struct { + cfg *frozenConfig + cacheKey reflect.Type +} + +func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.getRealEncoder().Encode(ptr, stream) +} + +func (encoder *placeholderEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.getRealEncoder().IsEmpty(ptr) +} + +func (encoder *placeholderEncoder) getRealEncoder() ValEncoder { + for i := 0; i < 30; i++ { + realDecoder := encoder.cfg.getEncoderFromCache(encoder.cacheKey) + _, isPlaceholder := realDecoder.(*placeholderEncoder) + if isPlaceholder { + time.Sleep(time.Second) + } else { + return realDecoder + } + } + panic(fmt.Sprintf("real encoder not found for cache key: %v", encoder.cacheKey)) +} + +type placeholderDecoder struct { + cfg *frozenConfig + cacheKey reflect.Type +} + +func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + for i := 0; i < 30; i++ { + realDecoder := decoder.cfg.getDecoderFromCache(decoder.cacheKey) + _, isPlaceholder := realDecoder.(*placeholderDecoder) + if isPlaceholder { + time.Sleep(time.Second) + } else { + realDecoder.Decode(ptr, iter) + return + } + } + panic(fmt.Sprintf("real decoder not found for cache key: %v", decoder.cacheKey)) +} + +// emptyInterface is the header for an interface{} value. +type emptyInterface struct { + typ unsafe.Pointer + word unsafe.Pointer +} + +// emptyInterface is the header for an interface with method (not interface{}) +type nonEmptyInterface struct { + // see ../runtime/iface.go:/Itab + itab *struct { + ityp unsafe.Pointer // static interface type + typ unsafe.Pointer // dynamic concrete type + link unsafe.Pointer + bad int32 + unused int32 + fun [100000]unsafe.Pointer // method table + } + word unsafe.Pointer +} + +// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal +func (iter *Iterator) ReadVal(obj interface{}) { + typ := reflect.TypeOf(obj) + cacheKey := typ.Elem() + decoder, err := decoderOfType(iter.cfg, cacheKey) + if err != nil { + iter.Error = err + return + } + e := (*emptyInterface)(unsafe.Pointer(&obj)) + decoder.Decode(e.word, iter) +} + +// WriteVal copy the go interface into underlying JSON, same as json.Marshal +func (stream *Stream) WriteVal(val interface{}) { + if nil == val { + stream.WriteNil() + return + } + typ := reflect.TypeOf(val) + cacheKey := typ + encoder, err := encoderOfType(stream.cfg, cacheKey) + if err != nil { + stream.Error = err + return + } + encoder.EncodeInterface(val, stream) +} + +type prefix string + +func (p prefix) addToDecoder(decoder ValDecoder, err error) (ValDecoder, error) { + if err != nil { + return nil, fmt.Errorf("%s: %s", p, err.Error()) + } + return decoder, err +} + +func (p prefix) addToEncoder(encoder ValEncoder, err error) (ValEncoder, error) { + if err != nil { + return nil, fmt.Errorf("%s: %s", p, err.Error()) + } + return encoder, err +} + +func decoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + cacheKey := typ + decoder := cfg.getDecoderFromCache(cacheKey) + if decoder != nil { + return decoder, nil + } + decoder = getTypeDecoderFromExtension(typ) + if decoder != nil { + cfg.addDecoderToCache(cacheKey, decoder) + return decoder, nil + } + decoder = &placeholderDecoder{cfg: cfg, cacheKey: cacheKey} + cfg.addDecoderToCache(cacheKey, decoder) + decoder, err := createDecoderOfType(cfg, typ) + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + cfg.addDecoderToCache(cacheKey, decoder) + return decoder, err +} + +func createDecoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + typeName := typ.String() + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{}, nil + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{}, nil + } + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{}, nil + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{}, nil + } + if typ.Implements(unmarshalerType) { + templateInterface := reflect.New(typ).Elem().Interface() + var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} + if typ.Kind() == reflect.Ptr { + decoder = &optionalDecoder{typ.Elem(), decoder} + } + return decoder, nil + } + if reflect.PtrTo(typ).Implements(unmarshalerType) { + templateInterface := reflect.New(typ).Interface() + var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} + return decoder, nil + } + if typ.Implements(textUnmarshalerType) { + templateInterface := reflect.New(typ).Elem().Interface() + var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} + if typ.Kind() == reflect.Ptr { + decoder = &optionalDecoder{typ.Elem(), decoder} + } + return decoder, nil + } + if reflect.PtrTo(typ).Implements(textUnmarshalerType) { + templateInterface := reflect.New(typ).Interface() + var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} + return decoder, nil + } + if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { + sliceDecoder, err := prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ)) + if err != nil { + return nil, err + } + return &base64Codec{sliceDecoder: sliceDecoder}, nil + } + if typ.Implements(anyType) { + return &anyCodec{}, nil + } + switch typ.Kind() { + case reflect.String: + if typeName != "string" { + return decoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem()) + } + return &stringCodec{}, nil + case reflect.Int: + if typeName != "int" { + return decoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem()) + } + return &intCodec{}, nil + case reflect.Int8: + if typeName != "int8" { + return decoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem()) + } + return &int8Codec{}, nil + case reflect.Int16: + if typeName != "int16" { + return decoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem()) + } + return &int16Codec{}, nil + case reflect.Int32: + if typeName != "int32" { + return decoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem()) + } + return &int32Codec{}, nil + case reflect.Int64: + if typeName != "int64" { + return decoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem()) + } + return &int64Codec{}, nil + case reflect.Uint: + if typeName != "uint" { + return decoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem()) + } + return &uintCodec{}, nil + case reflect.Uint8: + if typeName != "uint8" { + return decoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem()) + } + return &uint8Codec{}, nil + case reflect.Uint16: + if typeName != "uint16" { + return decoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem()) + } + return &uint16Codec{}, nil + case reflect.Uint32: + if typeName != "uint32" { + return decoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem()) + } + return &uint32Codec{}, nil + case reflect.Uintptr: + if typeName != "uintptr" { + return decoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem()) + } + return &uintptrCodec{}, nil + case reflect.Uint64: + if typeName != "uint64" { + return decoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem()) + } + return &uint64Codec{}, nil + case reflect.Float32: + if typeName != "float32" { + return decoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem()) + } + return &float32Codec{}, nil + case reflect.Float64: + if typeName != "float64" { + return decoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem()) + } + return &float64Codec{}, nil + case reflect.Bool: + if typeName != "bool" { + return decoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem()) + } + return &boolCodec{}, nil + case reflect.Interface: + if typ.NumMethod() == 0 { + return &emptyInterfaceCodec{}, nil + } + return &nonEmptyInterfaceCodec{}, nil + case reflect.Struct: + return prefix(fmt.Sprintf("[%s]", typeName)).addToDecoder(decoderOfStruct(cfg, typ)) + case reflect.Array: + return prefix("[array]").addToDecoder(decoderOfArray(cfg, typ)) + case reflect.Slice: + return prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ)) + case reflect.Map: + return prefix("[map]").addToDecoder(decoderOfMap(cfg, typ)) + case reflect.Ptr: + return prefix("[optional]").addToDecoder(decoderOfOptional(cfg, typ)) + default: + return nil, fmt.Errorf("unsupported type: %v", typ) + } +} + +func encoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + cacheKey := typ + encoder := cfg.getEncoderFromCache(cacheKey) + if encoder != nil { + return encoder, nil + } + encoder = getTypeEncoderFromExtension(typ) + if encoder != nil { + cfg.addEncoderToCache(cacheKey, encoder) + return encoder, nil + } + encoder = &placeholderEncoder{cfg: cfg, cacheKey: cacheKey} + cfg.addEncoderToCache(cacheKey, encoder) + encoder, err := createEncoderOfType(cfg, typ) + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + cfg.addEncoderToCache(cacheKey, encoder) + return encoder, err +} + +func createEncoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{}, nil + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{}, nil + } + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{}, nil + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{}, nil + } + if typ.Implements(marshalerType) { + checkIsEmpty, err := createCheckIsEmpty(typ) + if err != nil { + return nil, err + } + templateInterface := reflect.New(typ).Elem().Interface() + var encoder ValEncoder = &marshalerEncoder{ + templateInterface: extractInterface(templateInterface), + checkIsEmpty: checkIsEmpty, + } + if typ.Kind() == reflect.Ptr { + encoder = &optionalEncoder{encoder} + } + return encoder, nil + } + if typ.Implements(textMarshalerType) { + checkIsEmpty, err := createCheckIsEmpty(typ) + if err != nil { + return nil, err + } + templateInterface := reflect.New(typ).Elem().Interface() + var encoder ValEncoder = &textMarshalerEncoder{ + templateInterface: extractInterface(templateInterface), + checkIsEmpty: checkIsEmpty, + } + if typ.Kind() == reflect.Ptr { + encoder = &optionalEncoder{encoder} + } + return encoder, nil + } + if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { + return &base64Codec{}, nil + } + if typ.Implements(anyType) { + return &anyCodec{}, nil + } + return createEncoderOfSimpleType(cfg, typ) +} + +func createCheckIsEmpty(typ reflect.Type) (checkIsEmpty, error) { + kind := typ.Kind() + switch kind { + case reflect.String: + return &stringCodec{}, nil + case reflect.Int: + return &intCodec{}, nil + case reflect.Int8: + return &int8Codec{}, nil + case reflect.Int16: + return &int16Codec{}, nil + case reflect.Int32: + return &int32Codec{}, nil + case reflect.Int64: + return &int64Codec{}, nil + case reflect.Uint: + return &uintCodec{}, nil + case reflect.Uint8: + return &uint8Codec{}, nil + case reflect.Uint16: + return &uint16Codec{}, nil + case reflect.Uint32: + return &uint32Codec{}, nil + case reflect.Uintptr: + return &uintptrCodec{}, nil + case reflect.Uint64: + return &uint64Codec{}, nil + case reflect.Float32: + return &float32Codec{}, nil + case reflect.Float64: + return &float64Codec{}, nil + case reflect.Bool: + return &boolCodec{}, nil + case reflect.Interface: + if typ.NumMethod() == 0 { + return &emptyInterfaceCodec{}, nil + } + return &nonEmptyInterfaceCodec{}, nil + case reflect.Struct: + return &structEncoder{}, nil + case reflect.Array: + return &arrayEncoder{}, nil + case reflect.Slice: + return &sliceEncoder{}, nil + case reflect.Map: + return &mapEncoder{}, nil + case reflect.Ptr: + return &optionalEncoder{}, nil + default: + return nil, fmt.Errorf("unsupported type: %v", typ) + } +} + +func createEncoderOfSimpleType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + typeName := typ.String() + kind := typ.Kind() + switch kind { + case reflect.String: + if typeName != "string" { + return encoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem()) + } + return &stringCodec{}, nil + case reflect.Int: + if typeName != "int" { + return encoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem()) + } + return &intCodec{}, nil + case reflect.Int8: + if typeName != "int8" { + return encoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem()) + } + return &int8Codec{}, nil + case reflect.Int16: + if typeName != "int16" { + return encoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem()) + } + return &int16Codec{}, nil + case reflect.Int32: + if typeName != "int32" { + return encoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem()) + } + return &int32Codec{}, nil + case reflect.Int64: + if typeName != "int64" { + return encoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem()) + } + return &int64Codec{}, nil + case reflect.Uint: + if typeName != "uint" { + return encoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem()) + } + return &uintCodec{}, nil + case reflect.Uint8: + if typeName != "uint8" { + return encoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem()) + } + return &uint8Codec{}, nil + case reflect.Uint16: + if typeName != "uint16" { + return encoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem()) + } + return &uint16Codec{}, nil + case reflect.Uint32: + if typeName != "uint32" { + return encoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem()) + } + return &uint32Codec{}, nil + case reflect.Uintptr: + if typeName != "uintptr" { + return encoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem()) + } + return &uintptrCodec{}, nil + case reflect.Uint64: + if typeName != "uint64" { + return encoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem()) + } + return &uint64Codec{}, nil + case reflect.Float32: + if typeName != "float32" { + return encoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem()) + } + return &float32Codec{}, nil + case reflect.Float64: + if typeName != "float64" { + return encoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem()) + } + return &float64Codec{}, nil + case reflect.Bool: + if typeName != "bool" { + return encoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem()) + } + return &boolCodec{}, nil + case reflect.Interface: + if typ.NumMethod() == 0 { + return &emptyInterfaceCodec{}, nil + } + return &nonEmptyInterfaceCodec{}, nil + case reflect.Struct: + return prefix(fmt.Sprintf("[%s]", typeName)).addToEncoder(encoderOfStruct(cfg, typ)) + case reflect.Array: + return prefix("[array]").addToEncoder(encoderOfArray(cfg, typ)) + case reflect.Slice: + return prefix("[slice]").addToEncoder(encoderOfSlice(cfg, typ)) + case reflect.Map: + return prefix("[map]").addToEncoder(encoderOfMap(cfg, typ)) + case reflect.Ptr: + return prefix("[optional]").addToEncoder(encoderOfOptional(cfg, typ)) + default: + return nil, fmt.Errorf("unsupported type: %v", typ) + } +} + +func decoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + elemType := typ.Elem() + decoder, err := decoderOfType(cfg, elemType) + if err != nil { + return nil, err + } + return &optionalDecoder{elemType, decoder}, nil +} + +func encoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + elemType := typ.Elem() + elemEncoder, err := encoderOfType(cfg, elemType) + if err != nil { + return nil, err + } + encoder := &optionalEncoder{elemEncoder} + if elemType.Kind() == reflect.Map { + encoder = &optionalEncoder{encoder} + } + return encoder, nil +} + +func decoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + decoder, err := decoderOfType(cfg, typ.Elem()) + if err != nil { + return nil, err + } + mapInterface := reflect.New(typ).Interface() + return &mapDecoder{typ, typ.Key(), typ.Elem(), decoder, extractInterface(mapInterface)}, nil +} + +func extractInterface(val interface{}) emptyInterface { + return *((*emptyInterface)(unsafe.Pointer(&val))) +} + +func encoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + elemType := typ.Elem() + encoder, err := encoderOfType(cfg, elemType) + if err != nil { + return nil, err + } + mapInterface := reflect.New(typ).Elem().Interface() + if cfg.sortMapKeys { + return &sortKeysMapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil + } + return &mapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_array.go b/vendor/github.com/json-iterator/go/feature_reflect_array.go new file mode 100644 index 00000000..e23f187b --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_array.go @@ -0,0 +1,99 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "unsafe" +) + +func decoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + decoder, err := decoderOfType(cfg, typ.Elem()) + if err != nil { + return nil, err + } + return &arrayDecoder{typ, typ.Elem(), decoder}, nil +} + +func encoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + encoder, err := encoderOfType(cfg, typ.Elem()) + if err != nil { + return nil, err + } + if typ.Elem().Kind() == reflect.Map { + encoder = &optionalEncoder{encoder} + } + return &arrayEncoder{typ, typ.Elem(), encoder}, nil +} + +type arrayEncoder struct { + arrayType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder +} + +func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(ptr) + encoder.elemEncoder.Encode(elemPtr, stream) + for i := 1; i < encoder.arrayType.Len(); i++ { + stream.WriteMore() + elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) + encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) + } +} + +func (encoder *arrayEncoder) EncodeInterface(val interface{}, stream *Stream) { + // special optimization for interface{} + e := (*emptyInterface)(unsafe.Pointer(&val)) + if e.word == nil { + stream.WriteArrayStart() + stream.WriteNil() + stream.WriteArrayEnd() + return + } + elemType := encoder.arrayType.Elem() + if encoder.arrayType.Len() == 1 && (elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map) { + ptr := uintptr(e.word) + e.word = unsafe.Pointer(&ptr) + } + if reflect.TypeOf(val).Kind() == reflect.Ptr { + encoder.Encode(unsafe.Pointer(&e.word), stream) + } else { + encoder.Encode(e.word, stream) + } +} + +func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type arrayDecoder struct { + arrayType reflect.Type + elemType reflect.Type + elemDecoder ValDecoder +} + +func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) + } +} + +func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + offset := uintptr(0) + iter.ReadArrayCB(func(iter *Iterator) bool { + if offset < decoder.arrayType.Size() { + decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(ptr)+offset), iter) + offset += decoder.elemType.Size() + } else { + iter.Skip() + } + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_extension.go b/vendor/github.com/json-iterator/go/feature_reflect_extension.go new file mode 100644 index 00000000..3dd38299 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_extension.go @@ -0,0 +1,413 @@ +package jsoniter + +import ( + "fmt" + "reflect" + "sort" + "strings" + "unicode" + "unsafe" +) + +var typeDecoders = map[string]ValDecoder{} +var fieldDecoders = map[string]ValDecoder{} +var typeEncoders = map[string]ValEncoder{} +var fieldEncoders = map[string]ValEncoder{} +var extensions = []Extension{} + +// StructDescriptor describe how should we encode/decode the struct +type StructDescriptor struct { + onePtrEmbedded bool + onePtrOptimization bool + Type reflect.Type + Fields []*Binding +} + +// GetField get one field from the descriptor by its name. +// Can not use map here to keep field orders. +func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { + for _, binding := range structDescriptor.Fields { + if binding.Field.Name == fieldName { + return binding + } + } + return nil +} + +// Binding describe how should we encode/decode the struct field +type Binding struct { + levels []int + Field *reflect.StructField + FromNames []string + ToNames []string + Encoder ValEncoder + Decoder ValDecoder +} + +// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. +// Can also rename fields by UpdateStructDescriptor. +type Extension interface { + UpdateStructDescriptor(structDescriptor *StructDescriptor) + CreateDecoder(typ reflect.Type) ValDecoder + CreateEncoder(typ reflect.Type) ValEncoder + DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder + DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder +} + +// DummyExtension embed this type get dummy implementation for all methods of Extension +type DummyExtension struct { +} + +// UpdateStructDescriptor No-op +func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateDecoder No-op +func (extension *DummyExtension) CreateDecoder(typ reflect.Type) ValDecoder { + return nil +} + +// CreateEncoder No-op +func (extension *DummyExtension) CreateEncoder(typ reflect.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension *DummyExtension) DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension *DummyExtension) DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type funcDecoder struct { + fun DecoderFunc +} + +func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.fun(ptr, iter) +} + +type funcEncoder struct { + fun EncoderFunc + isEmptyFunc func(ptr unsafe.Pointer) bool +} + +func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.fun(ptr, stream) +} + +func (encoder *funcEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if encoder.isEmptyFunc == nil { + return false + } + return encoder.isEmptyFunc(ptr) +} + +// DecoderFunc the function form of TypeDecoder +type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) + +// EncoderFunc the function form of TypeEncoder +type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) + +// RegisterTypeDecoderFunc register TypeDecoder for a type with function +func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { + typeDecoders[typ] = &funcDecoder{fun} +} + +// RegisterTypeDecoder register TypeDecoder for a typ +func RegisterTypeDecoder(typ string, decoder ValDecoder) { + typeDecoders[typ] = decoder +} + +// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function +func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { + RegisterFieldDecoder(typ, field, &funcDecoder{fun}) +} + +// RegisterFieldDecoder register TypeDecoder for a struct field +func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { + fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder +} + +// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function +func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} +} + +// RegisterTypeEncoder register TypeEncoder for a type +func RegisterTypeEncoder(typ string, encoder ValEncoder) { + typeEncoders[typ] = encoder +} + +// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function +func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) +} + +// RegisterFieldEncoder register TypeEncoder for a struct field +func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { + fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder +} + +// RegisterExtension register extension +func RegisterExtension(extension Extension) { + extensions = append(extensions, extension) +} + +func getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(typ) + if decoder != nil { + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + } + return decoder +} +func _getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { + for _, extension := range extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + typeName := typ.String() + decoder := typeDecoders[typeName] + if decoder != nil { + return decoder + } + if typ.Kind() == reflect.Ptr { + decoder := typeDecoders[typ.Elem().String()] + if decoder != nil { + return &optionalDecoder{typ.Elem(), decoder} + } + } + return nil +} + +func getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(typ) + if encoder != nil { + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + } + return encoder +} + +func _getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { + for _, extension := range extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + typeName := typ.String() + encoder := typeEncoders[typeName] + if encoder != nil { + return encoder + } + if typ.Kind() == reflect.Ptr { + encoder := typeEncoders[typ.Elem().String()] + if encoder != nil { + return &optionalEncoder{encoder} + } + } + return nil +} + +func describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, error) { + embeddedBindings := []*Binding{} + bindings := []*Binding{} + for i := 0; i < typ.NumField(); i++ { + field := typ.Field(i) + tag := field.Tag.Get(cfg.getTagKey()) + tagParts := strings.Split(tag, ",") + if tag == "-" { + continue + } + if field.Anonymous && (tag == "" || tagParts[0] == "") { + if field.Type.Kind() == reflect.Struct { + structDescriptor, err := describeStruct(cfg, field.Type) + if err != nil { + return nil, err + } + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} + binding.Decoder = &structFieldDecoder{&field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } else if field.Type.Kind() == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct { + structDescriptor, err := describeStruct(cfg, field.Type.Elem()) + if err != nil { + return nil, err + } + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &optionalEncoder{binding.Encoder} + binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} + binding.Decoder = &deferenceDecoder{field.Type.Elem(), binding.Decoder} + binding.Decoder = &structFieldDecoder{&field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } + } + fieldNames := calcFieldNames(field.Name, tagParts[0], tag) + fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name) + decoder := fieldDecoders[fieldCacheKey] + if decoder == nil { + var err error + decoder, err = decoderOfType(cfg, field.Type) + if err != nil { + return nil, err + } + } + encoder := fieldEncoders[fieldCacheKey] + if encoder == nil { + var err error + encoder, err = encoderOfType(cfg, field.Type) + if err != nil { + return nil, err + } + // map is stored as pointer in the struct + if field.Type.Kind() == reflect.Map { + encoder = &optionalEncoder{encoder} + } + } + binding := &Binding{ + Field: &field, + FromNames: fieldNames, + ToNames: fieldNames, + Decoder: decoder, + Encoder: encoder, + } + binding.levels = []int{i} + bindings = append(bindings, binding) + } + return createStructDescriptor(cfg, typ, bindings, embeddedBindings), nil +} +func createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { + onePtrEmbedded := false + onePtrOptimization := false + if typ.NumField() == 1 { + firstField := typ.Field(0) + switch firstField.Type.Kind() { + case reflect.Ptr: + if firstField.Anonymous && firstField.Type.Elem().Kind() == reflect.Struct { + onePtrEmbedded = true + } + fallthrough + case reflect.Map: + onePtrOptimization = true + case reflect.Struct: + onePtrOptimization = isStructOnePtr(firstField.Type) + } + } + structDescriptor := &StructDescriptor{ + onePtrEmbedded: onePtrEmbedded, + onePtrOptimization: onePtrOptimization, + Type: typ, + Fields: bindings, + } + for _, extension := range extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + processTags(structDescriptor, cfg) + // merge normal & embedded bindings & sort with original order + allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) + sort.Sort(allBindings) + structDescriptor.Fields = allBindings + return structDescriptor +} + +func isStructOnePtr(typ reflect.Type) bool { + if typ.NumField() == 1 { + firstField := typ.Field(0) + switch firstField.Type.Kind() { + case reflect.Ptr: + return true + case reflect.Map: + return true + case reflect.Struct: + return isStructOnePtr(firstField.Type) + } + } + return false +} + +type sortableBindings []*Binding + +func (bindings sortableBindings) Len() int { + return len(bindings) +} + +func (bindings sortableBindings) Less(i, j int) bool { + left := bindings[i].levels + right := bindings[j].levels + k := 0 + for { + if left[k] < right[k] { + return true + } else if left[k] > right[k] { + return false + } + k++ + } +} + +func (bindings sortableBindings) Swap(i, j int) { + bindings[i], bindings[j] = bindings[j], bindings[i] +} + +func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { + for _, binding := range structDescriptor.Fields { + shouldOmitEmpty := false + tagParts := strings.Split(binding.Field.Tag.Get(cfg.getTagKey()), ",") + for _, tagPart := range tagParts[1:] { + if tagPart == "omitempty" { + shouldOmitEmpty = true + } else if tagPart == "string" { + if binding.Field.Type.Kind() == reflect.String { + binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} + binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} + } else { + binding.Decoder = &stringModeNumberDecoder{binding.Decoder} + binding.Encoder = &stringModeNumberEncoder{binding.Encoder} + } + } + } + binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} + binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} + } +} + +func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { + // ignore? + if wholeTag == "-" { + return []string{} + } + // rename? + var fieldNames []string + if tagProvidedFieldName == "" { + fieldNames = []string{originalFieldName} + } else { + fieldNames = []string{tagProvidedFieldName} + } + // private? + isNotExported := unicode.IsLower(rune(originalFieldName[0])) + if isNotExported { + fieldNames = []string{} + } + return fieldNames +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_map.go b/vendor/github.com/json-iterator/go/feature_reflect_map.go new file mode 100644 index 00000000..005671e0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_map.go @@ -0,0 +1,244 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "reflect" + "sort" + "strconv" + "unsafe" +) + +type mapDecoder struct { + mapType reflect.Type + keyType reflect.Type + elemType reflect.Type + elemDecoder ValDecoder + mapInterface emptyInterface +} + +func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + // dark magic to cast unsafe.Pointer back to interface{} using reflect.Type + mapInterface := decoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface).Elem() + if iter.ReadNil() { + realVal.Set(reflect.Zero(decoder.mapType)) + return + } + if realVal.IsNil() { + realVal.Set(reflect.MakeMap(realVal.Type())) + } + iter.ReadMapCB(func(iter *Iterator, keyStr string) bool { + elem := reflect.New(decoder.elemType) + decoder.elemDecoder.Decode(unsafe.Pointer(elem.Pointer()), iter) + // to put into map, we have to use reflection + keyType := decoder.keyType + // TODO: remove this from loop + switch { + case keyType.Kind() == reflect.String: + realVal.SetMapIndex(reflect.ValueOf(keyStr).Convert(keyType), elem.Elem()) + return true + case keyType.Implements(textUnmarshalerType): + textUnmarshaler := reflect.New(keyType.Elem()).Interface().(encoding.TextUnmarshaler) + err := textUnmarshaler.UnmarshalText([]byte(keyStr)) + if err != nil { + iter.ReportError("read map key as TextUnmarshaler", err.Error()) + return false + } + realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler), elem.Elem()) + return true + case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + textUnmarshaler := reflect.New(keyType).Interface().(encoding.TextUnmarshaler) + err := textUnmarshaler.UnmarshalText([]byte(keyStr)) + if err != nil { + iter.ReportError("read map key as TextUnmarshaler", err.Error()) + return false + } + realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler).Elem(), elem.Elem()) + return true + default: + switch keyType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(keyStr, 10, 64) + if err != nil || reflect.Zero(keyType).OverflowInt(n) { + iter.ReportError("read map key as int64", "read int64 failed") + return false + } + realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(keyStr, 10, 64) + if err != nil || reflect.Zero(keyType).OverflowUint(n) { + iter.ReportError("read map key as uint64", "read uint64 failed") + return false + } + realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) + return true + } + } + iter.ReportError("read map key", "unexpected map key type "+keyType.String()) + return true + }) +} + +type mapEncoder struct { + mapType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder + mapInterface emptyInterface +} + +func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + stream.WriteObjectStart() + for i, key := range realVal.MapKeys() { + if i != 0 { + stream.WriteMore() + } + encodeMapKey(key, stream) + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + val := realVal.MapIndex(key).Interface() + encoder.elemEncoder.EncodeInterface(val, stream) + } + stream.WriteObjectEnd() +} + +func encodeMapKey(key reflect.Value, stream *Stream) { + if key.Kind() == reflect.String { + stream.WriteString(key.String()) + return + } + if tm, ok := key.Interface().(encoding.TextMarshaler); ok { + buf, err := tm.MarshalText() + if err != nil { + stream.Error = err + return + } + stream.writeByte('"') + stream.Write(buf) + stream.writeByte('"') + return + } + switch key.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + stream.writeByte('"') + stream.WriteInt64(key.Int()) + stream.writeByte('"') + return + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + stream.writeByte('"') + stream.WriteUint64(key.Uint()) + stream.writeByte('"') + return + } + stream.Error = &json.UnsupportedTypeError{Type: key.Type()} +} + +func (encoder *mapEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + return realVal.Len() == 0 +} + +type sortKeysMapEncoder struct { + mapType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder + mapInterface emptyInterface +} + +func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + + // Extract and sort the keys. + keys := realVal.MapKeys() + sv := stringValues(make([]reflectWithString, len(keys))) + for i, v := range keys { + sv[i].v = v + if err := sv[i].resolve(); err != nil { + stream.Error = err + return + } + } + sort.Sort(sv) + + stream.WriteObjectStart() + for i, key := range sv { + if i != 0 { + stream.WriteMore() + } + stream.WriteVal(key.s) // might need html escape, so can not WriteString directly + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + val := realVal.MapIndex(key.v).Interface() + encoder.elemEncoder.EncodeInterface(val, stream) + } + stream.WriteObjectEnd() +} + +// stringValues is a slice of reflect.Value holding *reflect.StringValue. +// It implements the methods to sort by string. +type stringValues []reflectWithString + +type reflectWithString struct { + v reflect.Value + s string +} + +func (w *reflectWithString) resolve() error { + if w.v.Kind() == reflect.String { + w.s = w.v.String() + return nil + } + if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok { + buf, err := tm.MarshalText() + w.s = string(buf) + return err + } + switch w.v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + w.s = strconv.FormatInt(w.v.Int(), 10) + return nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + w.s = strconv.FormatUint(w.v.Uint(), 10) + return nil + } + return &json.UnsupportedTypeError{Type: w.v.Type()} +} + +func (sv stringValues) Len() int { return len(sv) } +func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv stringValues) Less(i, j int) bool { return sv[i].s < sv[j].s } + +func (encoder *sortKeysMapEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + return realVal.Len() == 0 +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_native.go b/vendor/github.com/json-iterator/go/feature_reflect_native.go new file mode 100644 index 00000000..b37dab3d --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_native.go @@ -0,0 +1,672 @@ +package jsoniter + +import ( + "encoding" + "encoding/base64" + "encoding/json" + "unsafe" +) + +type stringCodec struct { +} + +func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*string)(ptr)) = iter.ReadString() +} + +func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteString(str) +} + +func (codec *stringCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +type intCodec struct { +} + +func (codec *intCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*int)(ptr)) = iter.ReadInt() +} + +func (codec *intCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt(*((*int)(ptr))) +} + +func (codec *intCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *intCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int)(ptr)) == 0 +} + +type uintptrCodec struct { +} + +func (codec *uintptrCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*uintptr)(ptr)) = uintptr(iter.ReadUint64()) +} + +func (codec *uintptrCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(uint64(*((*uintptr)(ptr)))) +} + +func (codec *uintptrCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uintptrCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uintptr)(ptr)) == 0 +} + +type int8Codec struct { +} + +func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*int8)(ptr)) = iter.ReadInt8() +} + +func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt8(*((*int8)(ptr))) +} + +func (codec *int8Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int8)(ptr)) == 0 +} + +type int16Codec struct { +} + +func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*int16)(ptr)) = iter.ReadInt16() +} + +func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt16(*((*int16)(ptr))) +} + +func (codec *int16Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int16)(ptr)) == 0 +} + +type int32Codec struct { +} + +func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*int32)(ptr)) = iter.ReadInt32() +} + +func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt32(*((*int32)(ptr))) +} + +func (codec *int32Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int32)(ptr)) == 0 +} + +type int64Codec struct { +} + +func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*int64)(ptr)) = iter.ReadInt64() +} + +func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt64(*((*int64)(ptr))) +} + +func (codec *int64Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int64)(ptr)) == 0 +} + +type uintCodec struct { +} + +func (codec *uintCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*uint)(ptr)) = iter.ReadUint() +} + +func (codec *uintCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint(*((*uint)(ptr))) +} + +func (codec *uintCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uintCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint)(ptr)) == 0 +} + +type uint8Codec struct { +} + +func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*uint8)(ptr)) = iter.ReadUint8() +} + +func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint8(*((*uint8)(ptr))) +} + +func (codec *uint8Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint8)(ptr)) == 0 +} + +type uint16Codec struct { +} + +func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*uint16)(ptr)) = iter.ReadUint16() +} + +func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint16(*((*uint16)(ptr))) +} + +func (codec *uint16Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint16)(ptr)) == 0 +} + +type uint32Codec struct { +} + +func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*uint32)(ptr)) = iter.ReadUint32() +} + +func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint32(*((*uint32)(ptr))) +} + +func (codec *uint32Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint32)(ptr)) == 0 +} + +type uint64Codec struct { +} + +func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*uint64)(ptr)) = iter.ReadUint64() +} + +func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(*((*uint64)(ptr))) +} + +func (codec *uint64Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint64)(ptr)) == 0 +} + +type float32Codec struct { +} + +func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*float32)(ptr)) = iter.ReadFloat32() +} + +func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32(*((*float32)(ptr))) +} + +func (codec *float32Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type float64Codec struct { +} + +func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*float64)(ptr)) = iter.ReadFloat64() +} + +func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64(*((*float64)(ptr))) +} + +func (codec *float64Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +type boolCodec struct { +} + +func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*bool)(ptr)) = iter.ReadBool() +} + +func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteBool(*((*bool)(ptr))) +} + +func (codec *boolCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { + return !(*((*bool)(ptr))) +} + +type emptyInterfaceCodec struct { +} + +func (codec *emptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*interface{})(ptr)) = iter.Read() +} + +func (codec *emptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteVal(*((*interface{})(ptr))) +} + +func (codec *emptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteVal(val) +} + +func (codec *emptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { + return ptr == nil +} + +type nonEmptyInterfaceCodec struct { +} + +func (codec *nonEmptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + nonEmptyInterface := (*nonEmptyInterface)(ptr) + if nonEmptyInterface.itab == nil { + iter.ReportError("read non-empty interface", "do not know which concrete type to decode to") + return + } + var i interface{} + e := (*emptyInterface)(unsafe.Pointer(&i)) + e.typ = nonEmptyInterface.itab.typ + e.word = nonEmptyInterface.word + iter.ReadVal(&i) + nonEmptyInterface.word = e.word +} + +func (codec *nonEmptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + nonEmptyInterface := (*nonEmptyInterface)(ptr) + var i interface{} + e := (*emptyInterface)(unsafe.Pointer(&i)) + e.typ = nonEmptyInterface.itab.typ + e.word = nonEmptyInterface.word + stream.WriteVal(i) +} + +func (codec *nonEmptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteVal(val) +} + +func (codec *nonEmptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { + nonEmptyInterface := (*nonEmptyInterface)(ptr) + return nonEmptyInterface.word == nil +} + +type anyCodec struct { +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*Any)(ptr)) = iter.ReadAny() +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + (*((*Any)(ptr))).WriteTo(stream) +} + +func (codec *anyCodec) EncodeInterface(val interface{}, stream *Stream) { + (val.(Any)).WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + return (*((*Any)(ptr))).Size() == 0 +} + +type jsonNumberCodec struct { +} + +func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) +} + +func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*json.Number)(ptr)))) +} + +func (codec *jsonNumberCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteRaw(string(val.(json.Number))) +} + +func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.Number)(ptr))) == 0 +} + +type jsoniterNumberCodec struct { +} + +func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) +} + +func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*Number)(ptr)))) +} + +func (codec *jsoniterNumberCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteRaw(string(val.(Number))) +} + +func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*Number)(ptr))) == 0 +} + +type jsonRawMessageCodec struct { +} + +func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) +} + +func (codec *jsonRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteRaw(string(val.(json.RawMessage))) +} + +func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 +} + +type jsoniterRawMessageCodec struct { +} + +func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) +} + +func (codec *jsoniterRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteRaw(string(val.(RawMessage))) +} + +func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*RawMessage)(ptr))) == 0 +} + +type base64Codec struct { + sliceDecoder ValDecoder +} + +func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + ptrSlice := (*sliceHeader)(ptr) + ptrSlice.Len = 0 + ptrSlice.Cap = 0 + ptrSlice.Data = nil + return + } + switch iter.WhatIsNext() { + case StringValue: + encoding := base64.StdEncoding + src := iter.SkipAndReturnBytes() + src = src[1 : len(src)-1] + decodedLen := encoding.DecodedLen(len(src)) + dst := make([]byte, decodedLen) + len, err := encoding.Decode(dst, src) + if err != nil { + iter.ReportError("decode base64", err.Error()) + } else { + dst = dst[:len] + dstSlice := (*sliceHeader)(unsafe.Pointer(&dst)) + ptrSlice := (*sliceHeader)(ptr) + ptrSlice.Data = dstSlice.Data + ptrSlice.Cap = dstSlice.Cap + ptrSlice.Len = dstSlice.Len + } + case ArrayValue: + codec.sliceDecoder.Decode(ptr, iter) + default: + iter.ReportError("base64Codec", "invalid input") + } +} + +func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + src := *((*[]byte)(ptr)) + if len(src) == 0 { + stream.WriteNil() + return + } + encoding := base64.StdEncoding + stream.writeByte('"') + toGrow := encoding.EncodedLen(len(src)) + stream.ensure(toGrow) + encoding.Encode(stream.buf[stream.n:], src) + stream.n += toGrow + stream.writeByte('"') +} + +func (codec *base64Codec) EncodeInterface(val interface{}, stream *Stream) { + ptr := extractInterface(val).word + src := *((*[]byte)(ptr)) + if len(src) == 0 { + stream.WriteNil() + return + } + encoding := base64.StdEncoding + stream.writeByte('"') + toGrow := encoding.EncodedLen(len(src)) + stream.ensure(toGrow) + encoding.Encode(stream.buf[stream.n:], src) + stream.n += toGrow + stream.writeByte('"') +} + +func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*[]byte)(ptr))) == 0 +} + +type stringModeNumberDecoder struct { + elemDecoder ValDecoder +} + +func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect "`) + return + } + decoder.elemDecoder.Decode(ptr, iter) + if iter.Error != nil { + return + } + c = iter.readByte() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect "`) + return + } +} + +type stringModeStringDecoder struct { + elemDecoder ValDecoder + cfg *frozenConfig +} + +func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.elemDecoder.Decode(ptr, iter) + str := *((*string)(ptr)) + tempIter := decoder.cfg.BorrowIterator([]byte(str)) + defer decoder.cfg.ReturnIterator(tempIter) + *((*string)(ptr)) = tempIter.ReadString() +} + +type stringModeNumberEncoder struct { + elemEncoder ValEncoder +} + +func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.elemEncoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *stringModeNumberEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type stringModeStringEncoder struct { + elemEncoder ValEncoder + cfg *frozenConfig +} + +func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + tempStream := encoder.cfg.BorrowStream(nil) + defer encoder.cfg.ReturnStream(tempStream) + encoder.elemEncoder.Encode(ptr, tempStream) + stream.WriteString(string(tempStream.Buffer())) +} + +func (encoder *stringModeStringEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type marshalerEncoder struct { + templateInterface emptyInterface + checkIsEmpty checkIsEmpty +} + +func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + templateInterface := encoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + marshaler := (*realInterface).(json.Marshaler) + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} +func (encoder *marshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type textMarshalerEncoder struct { + templateInterface emptyInterface + checkIsEmpty checkIsEmpty +} + +func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + templateInterface := encoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + marshaler := (*realInterface).(encoding.TextMarshaler) + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + stream.WriteString(string(bytes)) + } +} + +func (encoder *textMarshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type unmarshalerDecoder struct { + templateInterface emptyInterface +} + +func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + templateInterface := decoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + unmarshaler := (*realInterface).(json.Unmarshaler) + iter.nextToken() + iter.unreadByte() // skip spaces + bytes := iter.SkipAndReturnBytes() + err := unmarshaler.UnmarshalJSON(bytes) + if err != nil { + iter.ReportError("unmarshalerDecoder", err.Error()) + } +} + +type textUnmarshalerDecoder struct { + templateInterface emptyInterface +} + +func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + templateInterface := decoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + unmarshaler := (*realInterface).(encoding.TextUnmarshaler) + str := iter.ReadString() + err := unmarshaler.UnmarshalText([]byte(str)) + if err != nil { + iter.ReportError("textUnmarshalerDecoder", err.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_object.go b/vendor/github.com/json-iterator/go/feature_reflect_object.go new file mode 100644 index 00000000..59b1235c --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_object.go @@ -0,0 +1,196 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "strings" + "unsafe" +) + +func encoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + type bindingTo struct { + binding *Binding + toName string + ignored bool + } + orderedBindings := []*bindingTo{} + structDescriptor, err := describeStruct(cfg, typ) + if err != nil { + return nil, err + } + for _, binding := range structDescriptor.Fields { + for _, toName := range binding.ToNames { + new := &bindingTo{ + binding: binding, + toName: toName, + } + for _, old := range orderedBindings { + if old.toName != toName { + continue + } + old.ignored, new.ignored = resolveConflictBinding(cfg, old.binding, new.binding) + } + orderedBindings = append(orderedBindings, new) + } + } + if len(orderedBindings) == 0 { + return &emptyStructEncoder{}, nil + } + finalOrderedFields := []structFieldTo{} + for _, bindingTo := range orderedBindings { + if !bindingTo.ignored { + finalOrderedFields = append(finalOrderedFields, structFieldTo{ + encoder: bindingTo.binding.Encoder.(*structFieldEncoder), + toName: bindingTo.toName, + }) + } + } + return &structEncoder{structDescriptor.onePtrEmbedded, structDescriptor.onePtrOptimization, finalOrderedFields}, nil +} + +func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { + newTagged := new.Field.Tag.Get(cfg.getTagKey()) != "" + oldTagged := old.Field.Tag.Get(cfg.getTagKey()) != "" + if newTagged { + if oldTagged { + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } else { + return true, false + } + } else { + if oldTagged { + return true, false + } + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } +} + +func decoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + bindings := map[string]*Binding{} + structDescriptor, err := describeStruct(cfg, typ) + if err != nil { + return nil, err + } + for _, binding := range structDescriptor.Fields { + for _, fromName := range binding.FromNames { + old := bindings[fromName] + if old == nil { + bindings[fromName] = binding + continue + } + ignoreOld, ignoreNew := resolveConflictBinding(cfg, old, binding) + if ignoreOld { + delete(bindings, fromName) + } + if !ignoreNew { + bindings[fromName] = binding + } + } + } + fields := map[string]*structFieldDecoder{} + for k, binding := range bindings { + fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) + } + return createStructDecoder(typ, fields) +} + +type structFieldEncoder struct { + field *reflect.StructField + fieldEncoder ValEncoder + omitempty bool +} + +func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) + encoder.fieldEncoder.Encode(fieldPtr, stream) + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%s: %s", encoder.field.Name, stream.Error.Error()) + } +} + +func (encoder *structFieldEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { + fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) + return encoder.fieldEncoder.IsEmpty(fieldPtr) +} + +type structEncoder struct { + onePtrEmbedded bool + onePtrOptimization bool + fields []structFieldTo +} + +type structFieldTo struct { + encoder *structFieldEncoder + toName string +} + +func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + isNotFirst := false + for _, field := range encoder.fields { + if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { + continue + } + if isNotFirst { + stream.WriteMore() + } + stream.WriteObjectField(field.toName) + field.encoder.Encode(ptr, stream) + isNotFirst = true + } + stream.WriteObjectEnd() +} + +func (encoder *structEncoder) EncodeInterface(val interface{}, stream *Stream) { + e := (*emptyInterface)(unsafe.Pointer(&val)) + if encoder.onePtrOptimization { + if e.word == nil && encoder.onePtrEmbedded { + stream.WriteObjectStart() + stream.WriteObjectEnd() + return + } + ptr := uintptr(e.word) + e.word = unsafe.Pointer(&ptr) + } + if reflect.TypeOf(val).Kind() == reflect.Ptr { + encoder.Encode(unsafe.Pointer(&e.word), stream) + } else { + encoder.Encode(e.word, stream) + } +} + +func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type emptyStructEncoder struct { +} + +func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyObject() +} + +func (encoder *emptyStructEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_slice.go b/vendor/github.com/json-iterator/go/feature_reflect_slice.go new file mode 100644 index 00000000..7377eec7 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_slice.go @@ -0,0 +1,149 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "unsafe" +) + +func decoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + decoder, err := decoderOfType(cfg, typ.Elem()) + if err != nil { + return nil, err + } + return &sliceDecoder{typ, typ.Elem(), decoder}, nil +} + +func encoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + encoder, err := encoderOfType(cfg, typ.Elem()) + if err != nil { + return nil, err + } + if typ.Elem().Kind() == reflect.Map { + encoder = &optionalEncoder{encoder} + } + return &sliceEncoder{typ, typ.Elem(), encoder}, nil +} + +type sliceEncoder struct { + sliceType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder +} + +func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + slice := (*sliceHeader)(ptr) + if slice.Data == nil { + stream.WriteNil() + return + } + if slice.Len == 0 { + stream.WriteEmptyArray() + return + } + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(slice.Data) + encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) + for i := 1; i < slice.Len; i++ { + stream.WriteMore() + elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) + encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) + } +} + +func (encoder *sliceEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + slice := (*sliceHeader)(ptr) + return slice.Len == 0 +} + +type sliceDecoder struct { + sliceType reflect.Type + elemType reflect.Type + elemDecoder ValDecoder +} + +// sliceHeader is a safe version of SliceHeader used within this package. +type sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int +} + +func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) + } +} + +func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + slice := (*sliceHeader)(ptr) + if iter.ReadNil() { + slice.Len = 0 + slice.Cap = 0 + slice.Data = nil + return + } + reuseSlice(slice, decoder.sliceType, 4) + slice.Len = 0 + offset := uintptr(0) + iter.ReadArrayCB(func(iter *Iterator) bool { + growOne(slice, decoder.sliceType, decoder.elemType) + decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(slice.Data)+offset), iter) + offset += decoder.elemType.Size() + return true + }) +} + +// grow grows the slice s so that it can hold extra more values, allocating +// more capacity if needed. It also returns the old and new slice lengths. +func growOne(slice *sliceHeader, sliceType reflect.Type, elementType reflect.Type) { + newLen := slice.Len + 1 + if newLen <= slice.Cap { + slice.Len = newLen + return + } + newCap := slice.Cap + if newCap == 0 { + newCap = 1 + } else { + for newCap < newLen { + if slice.Len < 1024 { + newCap += newCap + } else { + newCap += newCap / 4 + } + } + } + newVal := reflect.MakeSlice(sliceType, newLen, newCap) + dst := unsafe.Pointer(newVal.Pointer()) + // copy old array into new array + originalBytesCount := uintptr(slice.Len) * elementType.Size() + srcPtr := (*[1 << 30]byte)(slice.Data) + dstPtr := (*[1 << 30]byte)(dst) + for i := uintptr(0); i < originalBytesCount; i++ { + dstPtr[i] = srcPtr[i] + } + slice.Data = dst + slice.Len = newLen + slice.Cap = newCap +} + +func reuseSlice(slice *sliceHeader, sliceType reflect.Type, expectedCap int) { + if expectedCap <= slice.Cap { + return + } + newVal := reflect.MakeSlice(sliceType, 0, expectedCap) + dst := unsafe.Pointer(newVal.Pointer()) + slice.Data = dst + slice.Cap = expectedCap +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go new file mode 100644 index 00000000..b3417fd7 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go @@ -0,0 +1,916 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "strings" + "unsafe" +) + +func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder) (ValDecoder, error) { + knownHash := map[int32]struct{}{ + 0: {}, + } + switch len(fields) { + case 0: + return &skipObjectDecoder{typ}, nil + case 1: + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}, nil + } + case 2: + var fieldHash1 int32 + var fieldHash2 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldHash1 == 0 { + fieldHash1 = fieldHash + fieldDecoder1 = fieldDecoder + } else { + fieldHash2 = fieldHash + fieldDecoder2 = fieldDecoder + } + } + return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}, nil + case 3: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } + } + return &threeFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3}, nil + case 4: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } + } + return &fourFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4}, nil + case 5: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } + } + return &fiveFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, fieldName5, fieldDecoder5}, nil + case 6: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } + } + return &sixFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6}, nil + case 7: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } + } + return &sevenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7}, nil + case 8: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldName8 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } + } + return &eightFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, fieldName8, fieldDecoder8}, nil + case 9: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldName8 int32 + var fieldName9 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } + } + return &nineFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9}, nil + case 10: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldName8 int32 + var fieldName9 int32 + var fieldName10 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + var fieldDecoder10 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else if fieldName9 == 0 { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } else { + fieldName10 = fieldHash + fieldDecoder10 = fieldDecoder + } + } + return &tenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9, + fieldName10, fieldDecoder10}, nil + } + return &generalStructDecoder{typ, fields}, nil +} + +type generalStructDecoder struct { + typ reflect.Type + fields map[string]*structFieldDecoder +} + +func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + fieldBytes := iter.readObjectFieldAsBytes() + field := *(*string)(unsafe.Pointer(&fieldBytes)) + fieldDecoder := decoder.fields[strings.ToLower(field)] + if fieldDecoder == nil { + iter.Skip() + } else { + fieldDecoder.Decode(ptr, iter) + } + for iter.nextToken() == ',' { + fieldBytes = iter.readObjectFieldAsBytes() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + fieldDecoder = decoder.fields[strings.ToLower(field)] + if fieldDecoder == nil { + iter.Skip() + } else { + fieldDecoder.Decode(ptr, iter) + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type skipObjectDecoder struct { + typ reflect.Type +} + +func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valueType := iter.WhatIsNext() + if valueType != ObjectValue && valueType != NilValue { + iter.ReportError("skipObjectDecoder", "expect object or null") + return + } + iter.Skip() +} + +type oneFieldStructDecoder struct { + typ reflect.Type + fieldHash int32 + fieldDecoder *structFieldDecoder +} + +func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + if iter.readFieldHash() == decoder.fieldHash { + decoder.fieldDecoder.Decode(ptr, iter) + } else { + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type twoFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder +} + +func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type threeFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder +} + +func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type fourFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder +} + +func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type fiveFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder +} + +func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type sixFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder +} + +func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type sevenFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder +} + +func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type eightFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder + fieldHash8 int32 + fieldDecoder8 *structFieldDecoder +} + +func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type nineFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder + fieldHash8 int32 + fieldDecoder8 *structFieldDecoder + fieldHash9 int32 + fieldDecoder9 *structFieldDecoder +} + +func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type tenFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder + fieldHash8 int32 + fieldDecoder8 *structFieldDecoder + fieldHash9 int32 + fieldDecoder9 *structFieldDecoder + fieldHash10 int32 + fieldDecoder10 *structFieldDecoder +} + +func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + case decoder.fieldHash10: + decoder.fieldDecoder10.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type structFieldDecoder struct { + field *reflect.StructField + fieldDecoder ValDecoder +} + +func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + fieldPtr := unsafe.Pointer(uintptr(ptr) + decoder.field.Offset) + decoder.fieldDecoder.Decode(fieldPtr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%s: %s", decoder.field.Name, iter.Error.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/feature_stream.go b/vendor/github.com/json-iterator/go/feature_stream.go new file mode 100644 index 00000000..9c8470a0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream.go @@ -0,0 +1,305 @@ +package jsoniter + +import ( + "io" +) + +// Stream is a io.Writer like object, with JSON specific write functions. +// Error is not returned as return value, but stored as Error member on this stream instance. +type Stream struct { + cfg *frozenConfig + out io.Writer + buf []byte + n int + Error error + indention int +} + +// NewStream create new stream instance. +// cfg can be jsoniter.ConfigDefault. +// out can be nil if write to internal buffer. +// bufSize is the initial size for the internal buffer in bytes. +func NewStream(cfg API, out io.Writer, bufSize int) *Stream { + return &Stream{ + cfg: cfg.(*frozenConfig), + out: out, + buf: make([]byte, bufSize), + n: 0, + Error: nil, + indention: 0, + } +} + +// Pool returns a pool can provide more stream with same configuration +func (stream *Stream) Pool() StreamPool { + return stream.cfg +} + +// Reset reuse this stream instance by assign a new writer +func (stream *Stream) Reset(out io.Writer) { + stream.out = out + stream.n = 0 +} + +// Available returns how many bytes are unused in the buffer. +func (stream *Stream) Available() int { + return len(stream.buf) - stream.n +} + +// Buffered returns the number of bytes that have been written into the current buffer. +func (stream *Stream) Buffered() int { + return stream.n +} + +// Buffer if writer is nil, use this method to take the result +func (stream *Stream) Buffer() []byte { + return stream.buf[:stream.n] +} + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (stream *Stream) Write(p []byte) (nn int, err error) { + for len(p) > stream.Available() && stream.Error == nil { + if stream.out == nil { + stream.growAtLeast(len(p)) + } else { + var n int + if stream.Buffered() == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, stream.Error = stream.out.Write(p) + } else { + n = copy(stream.buf[stream.n:], p) + stream.n += n + stream.Flush() + } + nn += n + p = p[n:] + } + } + if stream.Error != nil { + return nn, stream.Error + } + n := copy(stream.buf[stream.n:], p) + stream.n += n + nn += n + return nn, nil +} + +// WriteByte writes a single byte. +func (stream *Stream) writeByte(c byte) { + if stream.Error != nil { + return + } + if stream.Available() < 1 { + stream.growAtLeast(1) + } + stream.buf[stream.n] = c + stream.n++ +} + +func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 2 { + stream.growAtLeast(2) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.n += 2 +} + +func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 3 { + stream.growAtLeast(3) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.buf[stream.n+2] = c3 + stream.n += 3 +} + +func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 4 { + stream.growAtLeast(4) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.buf[stream.n+2] = c3 + stream.buf[stream.n+3] = c4 + stream.n += 4 +} + +func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 5 { + stream.growAtLeast(5) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.buf[stream.n+2] = c3 + stream.buf[stream.n+3] = c4 + stream.buf[stream.n+4] = c5 + stream.n += 5 +} + +// Flush writes any buffered data to the underlying io.Writer. +func (stream *Stream) Flush() error { + if stream.out == nil { + return nil + } + if stream.Error != nil { + return stream.Error + } + if stream.n == 0 { + return nil + } + n, err := stream.out.Write(stream.buf[0:stream.n]) + if n < stream.n && err == nil { + err = io.ErrShortWrite + } + if err != nil { + if n > 0 && n < stream.n { + copy(stream.buf[0:stream.n-n], stream.buf[n:stream.n]) + } + stream.n -= n + stream.Error = err + return err + } + stream.n = 0 + return nil +} + +func (stream *Stream) ensure(minimal int) { + available := stream.Available() + if available < minimal { + stream.growAtLeast(minimal) + } +} + +func (stream *Stream) growAtLeast(minimal int) { + if stream.out != nil { + stream.Flush() + } + toGrow := len(stream.buf) + if toGrow < minimal { + toGrow = minimal + } + newBuf := make([]byte, len(stream.buf)+toGrow) + copy(newBuf, stream.Buffer()) + stream.buf = newBuf +} + +// WriteRaw write string out without quotes, just like []byte +func (stream *Stream) WriteRaw(s string) { + stream.ensure(len(s)) + if stream.Error != nil { + return + } + n := copy(stream.buf[stream.n:], s) + stream.n += n +} + +// WriteNil write null to stream +func (stream *Stream) WriteNil() { + stream.writeFourBytes('n', 'u', 'l', 'l') +} + +// WriteTrue write true to stream +func (stream *Stream) WriteTrue() { + stream.writeFourBytes('t', 'r', 'u', 'e') +} + +// WriteFalse write false to stream +func (stream *Stream) WriteFalse() { + stream.writeFiveBytes('f', 'a', 'l', 's', 'e') +} + +// WriteBool write true or false into stream +func (stream *Stream) WriteBool(val bool) { + if val { + stream.WriteTrue() + } else { + stream.WriteFalse() + } +} + +// WriteObjectStart write { with possible indention +func (stream *Stream) WriteObjectStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('{') + stream.writeIndention(0) +} + +// WriteObjectField write "field": with possible indention +func (stream *Stream) WriteObjectField(field string) { + stream.WriteString(field) + if stream.indention > 0 { + stream.writeTwoBytes(':', ' ') + } else { + stream.writeByte(':') + } +} + +// WriteObjectEnd write } with possible indention +func (stream *Stream) WriteObjectEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte('}') +} + +// WriteEmptyObject write {} +func (stream *Stream) WriteEmptyObject() { + stream.writeByte('{') + stream.writeByte('}') +} + +// WriteMore write , with possible indention +func (stream *Stream) WriteMore() { + stream.writeByte(',') + stream.writeIndention(0) +} + +// WriteArrayStart write [ with possible indention +func (stream *Stream) WriteArrayStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('[') + stream.writeIndention(0) +} + +// WriteEmptyArray write [] +func (stream *Stream) WriteEmptyArray() { + stream.writeByte('[') + stream.writeByte(']') +} + +// WriteArrayEnd write ] with possible indention +func (stream *Stream) WriteArrayEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte(']') +} + +func (stream *Stream) writeIndention(delta int) { + if stream.indention == 0 { + return + } + stream.writeByte('\n') + toWrite := stream.indention - delta + stream.ensure(toWrite) + for i := 0; i < toWrite && stream.n < len(stream.buf); i++ { + stream.buf[stream.n] = ' ' + stream.n++ + } +} diff --git a/vendor/github.com/json-iterator/go/feature_stream_float.go b/vendor/github.com/json-iterator/go/feature_stream_float.go new file mode 100644 index 00000000..9a404e11 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream_float.go @@ -0,0 +1,96 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var pow10 []uint64 + +func init() { + pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} +} + +// WriteFloat32 write float32 to stream +func (stream *Stream) WriteFloat32(val float32) { + abs := math.Abs(float64(val)) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if float32(abs) < 1e-6 || float32(abs) >= 1e21 { + fmt = 'e' + } + } + stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 32)) +} + +// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat32Lossy(val float32) { + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat32(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(float64(val)*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + stream.ensure(10) + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[stream.n-1] == '0' { + stream.n-- + } +} + +// WriteFloat64 write float64 to stream +func (stream *Stream) WriteFloat64(val float64) { + abs := math.Abs(val) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 64)) +} + +// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat64Lossy(val float64) { + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat64(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(val*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + stream.ensure(10) + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[stream.n-1] == '0' { + stream.n-- + } +} diff --git a/vendor/github.com/json-iterator/go/feature_stream_int.go b/vendor/github.com/json-iterator/go/feature_stream_int.go new file mode 100644 index 00000000..7cfd522c --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream_int.go @@ -0,0 +1,320 @@ +package jsoniter + +var digits []uint32 + +func init() { + digits = make([]uint32, 1000) + for i := uint32(0); i < 1000; i++ { + digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' + if i < 10 { + digits[i] += 2 << 24 + } else if i < 100 { + digits[i] += 1 << 24 + } + } +} + +func writeFirstBuf(buf []byte, v uint32, n int) int { + start := v >> 24 + if start == 0 { + buf[n] = byte(v >> 16) + n++ + buf[n] = byte(v >> 8) + n++ + } else if start == 1 { + buf[n] = byte(v >> 8) + n++ + } + buf[n] = byte(v) + n++ + return n +} + +func writeBuf(buf []byte, v uint32, n int) { + buf[n] = byte(v >> 16) + buf[n+1] = byte(v >> 8) + buf[n+2] = byte(v) +} + +// WriteUint8 write uint8 to stream +func (stream *Stream) WriteUint8(val uint8) { + stream.ensure(3) + stream.n = writeFirstBuf(stream.buf, digits[val], stream.n) +} + +// WriteInt8 write int8 to stream +func (stream *Stream) WriteInt8(nval int8) { + stream.ensure(4) + n := stream.n + var val uint8 + if nval < 0 { + val = uint8(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint8(nval) + } + stream.n = writeFirstBuf(stream.buf, digits[val], n) +} + +// WriteUint16 write uint16 to stream +func (stream *Stream) WriteUint16(val uint16) { + stream.ensure(5) + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], stream.n) + return + } + r1 := val - q1*1000 + n := writeFirstBuf(stream.buf, digits[q1], stream.n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return +} + +// WriteInt16 write int16 to stream +func (stream *Stream) WriteInt16(nval int16) { + stream.ensure(6) + n := stream.n + var val uint16 + if nval < 0 { + val = uint16(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint16(nval) + } + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + n = writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return +} + +// WriteUint32 write uint32 to stream +func (stream *Stream) WriteUint32(val uint32) { + stream.ensure(10) + n := stream.n + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + } else { + r3 := q2 - q3*1000 + stream.buf[n] = byte(q3 + '0') + n++ + writeBuf(stream.buf, digits[r3], n) + n += 3 + } + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 +} + +// WriteInt32 write int32 to stream +func (stream *Stream) WriteInt32(nval int32) { + stream.ensure(11) + n := stream.n + var val uint32 + if nval < 0 { + val = uint32(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint32(nval) + } + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + } else { + r3 := q2 - q3*1000 + stream.buf[n] = byte(q3 + '0') + n++ + writeBuf(stream.buf, digits[r3], n) + n += 3 + } + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 +} + +// WriteUint64 write uint64 to stream +func (stream *Stream) WriteUint64(val uint64) { + stream.ensure(20) + n := stream.n + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + n = writeFirstBuf(stream.buf, digits[q3], n) + writeBuf(stream.buf, digits[r3], n) + writeBuf(stream.buf, digits[r2], n+3) + writeBuf(stream.buf, digits[r1], n+6) + stream.n = n + 9 + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + n = writeFirstBuf(stream.buf, digits[q4], n) + writeBuf(stream.buf, digits[r4], n) + writeBuf(stream.buf, digits[r3], n+3) + writeBuf(stream.buf, digits[r2], n+6) + writeBuf(stream.buf, digits[r1], n+9) + stream.n = n + 12 + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + n = writeFirstBuf(stream.buf, digits[q5], n) + } else { + n = writeFirstBuf(stream.buf, digits[q6], n) + r6 := q5 - q6*1000 + writeBuf(stream.buf, digits[r6], n) + n += 3 + } + writeBuf(stream.buf, digits[r5], n) + writeBuf(stream.buf, digits[r4], n+3) + writeBuf(stream.buf, digits[r3], n+6) + writeBuf(stream.buf, digits[r2], n+9) + writeBuf(stream.buf, digits[r1], n+12) + stream.n = n + 15 +} + +// WriteInt64 write int64 to stream +func (stream *Stream) WriteInt64(nval int64) { + stream.ensure(20) + n := stream.n + var val uint64 + if nval < 0 { + val = uint64(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint64(nval) + } + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + n = writeFirstBuf(stream.buf, digits[q3], n) + writeBuf(stream.buf, digits[r3], n) + writeBuf(stream.buf, digits[r2], n+3) + writeBuf(stream.buf, digits[r1], n+6) + stream.n = n + 9 + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + n = writeFirstBuf(stream.buf, digits[q4], n) + writeBuf(stream.buf, digits[r4], n) + writeBuf(stream.buf, digits[r3], n+3) + writeBuf(stream.buf, digits[r2], n+6) + writeBuf(stream.buf, digits[r1], n+9) + stream.n = n + 12 + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + n = writeFirstBuf(stream.buf, digits[q5], n) + } else { + stream.buf[n] = byte(q6 + '0') + n++ + r6 := q5 - q6*1000 + writeBuf(stream.buf, digits[r6], n) + n += 3 + } + writeBuf(stream.buf, digits[r5], n) + writeBuf(stream.buf, digits[r4], n+3) + writeBuf(stream.buf, digits[r3], n+6) + writeBuf(stream.buf, digits[r2], n+9) + writeBuf(stream.buf, digits[r1], n+12) + stream.n = n + 15 +} + +// WriteInt write int to stream +func (stream *Stream) WriteInt(val int) { + stream.WriteInt64(int64(val)) +} + +// WriteUint write uint to stream +func (stream *Stream) WriteUint(val uint) { + stream.WriteUint64(uint64(val)) +} diff --git a/vendor/github.com/json-iterator/go/feature_stream_string.go b/vendor/github.com/json-iterator/go/feature_stream_string.go new file mode 100644 index 00000000..334282f0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream_string.go @@ -0,0 +1,396 @@ +package jsoniter + +import ( + "unicode/utf8" +) + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML