diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000..da3ba441 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,7 @@ +# GitHub code owners +# See https://help.github.com/articles/about-codeowners/ +# +# KEEP THIS FILE SORTED. Order is important. Last match takes precedence. + +* @mrunalp @runcom +pkg/storage/** @nalind @runcom @rhatdan diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 00000000..296f83e6 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,58 @@ + + +**Description** + + + +**Steps to reproduce the issue:** +1. +2. +3. + +**Describe the results you received:** + + +**Describe the results you expected:** + + +**Additional information you deem important (e.g. issue happens only occasionally):** + +**Output of `crio --version`:** + +``` +(paste your output here) +``` + +**Additional environment details (AWS, VirtualBox, physical, etc.):** diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..d25e940f --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,23 @@ + + +**- What I did** + +**- How I did it** + +**- How to verify it** + +**- Description for the changelog** + diff --git a/.gitignore b/.gitignore index f9c8e7d9..8217e6ba 100644 --- a/.gitignore +++ b/.gitignore @@ -1,17 +1,18 @@ /.artifacts/ /_output/ -/conmon/conmon /conmon/conmon.o /docs/*.[158] /docs/*.[158].gz -/kpod -/crioctl -/crio /crio.conf *.o *.orig -/pause/pause /pause/pause.o +/bin/ /test/bin2img/bin2img /test/checkseccomp/checkseccomp /test/copyimg/copyimg + +Vagrantfile +.vagrant/ + +.vscode/ diff --git a/.mailmap b/.mailmap new file mode 100644 index 00000000..29d8c860 --- /dev/null +++ b/.mailmap @@ -0,0 +1,10 @@ +Aleksa Sarai +Antonio Murdaca +CuiHaozhi +Daniel J Walsh +Haiyan Meng +Lorenzo Fontana +Mrunal Patel +Mrunal Patel +Pengfei Ni +Tobias Klauser diff --git a/.travis.yml b/.travis.yml index be326c1a..3e1047b2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -47,8 +47,6 @@ jobs: go: 1.9.x - script: - make .gitvalidation - - make gofmt - - make lint - make testunit - make docs - make diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..cc549116 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,142 @@ +# Contributing to CRI-O + +We'd love to have you join the community! Below summarizes the processes +that we follow. + +## Topics + +* [Reporting Issues](#reporting-issues) +* [Submitting Pull Requests](#submitting-pull-requests) +* [Communications](#communications) +* [Becoming a Maintainer](#becoming-a-maintainer) + +## Reporting Issues + +Before reporting an issue, check our backlog of +[open issues](https://github.com/kubernetes-incubator/cri-o/issues) +to see if someone else has already reported it. If so, feel free to add +your scenario, or additional information, to the discussion. Or simply +"subscribe" to it to be notified when it is updated. + +If you find a new issue with the project we'd love to hear about it! The most +important aspect of a bug report is that it includes enough information for +us to reproduce it. So, please include as much detail as possible and try +to remove the extra stuff that doesn't really relate to the issue itself. +The easier it is for us to reproduce it, the faster it'll be fixed! + +Please don't include any private/sensitive information in your issue! + +## Submitting Pull Requests + +No Pull Request (PR) is too small! Typos, additional comments in the code, +new testcases, bug fixes, new features, more documentation, ... it's all +welcome! + +While bug fixes can first be identified via an "issue", that is not required. +It's ok to just open up a PR with the fix, but make sure you include the same +information you would have included in an issue - like how to reproduce it. + +PRs for new features should include some background on what use cases the +new code is trying to address. When possible and when it makes sense, try to break-up +larger PRs into smaller ones - it's easier to review smaller +code changes. But only if those smaller ones make sense as stand-alone PRs. + +Regardless of the type of PR, all PRs should include: +* well documented code changes +* additional testcases. Ideally, they should fail w/o your code change applied +* documentation changes + +Squash your commits into logical pieces of work that might want to be reviewed +separate from the rest of the PRs. But, squashing down to just one commit is ok +too since in the end the entire PR will be reviewed anyway. When in doubt, +squash. + +PRs that fix issues should include a reference like `Closes #XXXX` in the +commit message so that github will automatically close the referenced issue +when the PR is merged. + + + +### Sign your PRs + +The sign-off is a line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. + +## Communications + +For general questions, or discussions, please use the +IRC group on `irc.freenode.net` called `cri-o` +that has been setup. + +For discussions around issues/bugs and features, you can use the github +[issues](https://github.com/kubernetes-incubator/cri-o/issues) +and +[PRs](https://github.com/kubernetes-incubator/cri-o/pulls) +tracking system. + + diff --git a/Dockerfile b/Dockerfile index 3caf5ac5..1fb5e569 100644 --- a/Dockerfile +++ b/Dockerfile @@ -38,6 +38,7 @@ RUN apt-get update && apt-get install -y \ netcat \ socat \ --no-install-recommends \ + bsdmainutils \ && apt-get clean # install bats @@ -56,7 +57,7 @@ RUN mkdir -p /usr/src/criu \ && rm -rf /usr/src/criu # Install runc -ENV RUNC_COMMIT 84a082bfef6f932de921437815355186db37aeb1 +ENV RUNC_COMMIT c6e4a1ebeb1a72b529c6f1b6ee2b1ae5b868b14f RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ @@ -64,7 +65,7 @@ RUN set -x \ && git fetch origin --tags \ && git checkout -q "$RUNC_COMMIT" \ && make static BUILDTAGS="seccomp selinux" \ - && cp runc /usr/local/bin/runc \ + && cp runc /usr/bin/runc \ && rm -rf "$GOPATH" # Install CNI plugins @@ -97,7 +98,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Install crictl -ENV CRICTL_COMMIT 16e6fe4d7199c5689db4630a9330e6a8a12cecd1 +ENV CRICTL_COMMIT b42fc3f364dd48f649d55926c34492beeb9b2e99 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/kubernetes-incubator/cri-tools.git "$GOPATH/src/github.com/kubernetes-incubator/cri-tools" \ diff --git a/KPOD_VERSION b/KPOD_VERSION deleted file mode 100644 index ceab6e11..00000000 --- a/KPOD_VERSION +++ /dev/null @@ -1 +0,0 @@ -0.1 \ No newline at end of file diff --git a/Makefile b/Makefile index ef6129eb..7cc1a4c1 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,9 @@ LIBEXECDIR ?= ${PREFIX}/libexec MANDIR ?= ${PREFIX}/share/man ETCDIR ?= ${DESTDIR}/etc ETCDIR_CRIO ?= ${ETCDIR}/crio -BUILDTAGS ?= selinux seccomp $(shell hack/btrfs_tag.sh) $(shell hack/libdm_tag.sh) $(shell hack/btrfs_installed_tag.sh) +BUILDTAGS ?= seccomp $(shell hack/btrfs_tag.sh) $(shell hack/libdm_installed.sh) $(shell hack/libdm_no_deferred_remove_tag.sh) $(shell hack/btrfs_installed_tag.sh) $(shell hack/ostree_tag.sh) $(shell hack/selinux_tag.sh) +CRICTL_CONFIG_DIR=${DESTDIR}/etc + BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions OCIUMOUNTINSTALLDIR=$(PREFIX)/share/oci-umount/oci-umount.d @@ -22,9 +24,6 @@ COMMIT_NO := $(shell git rev-parse HEAD 2> /dev/null || true) GIT_COMMIT := $(if $(shell git status --porcelain --untracked-files=no),"${COMMIT_NO}-dirty","${COMMIT_NO}") BUILD_INFO := $(shell date +%s) -VERSION := ${shell cat ./VERSION} -KPOD_VERSION := ${shell cat ./KPOD_VERSION} - # If GOPATH not specified, use one in the local directory ifeq ($(GOPATH),) export GOPATH := $(CURDIR)/_output @@ -35,8 +34,9 @@ GOPKGBASEDIR := $(shell dirname "$(GOPKGDIR)") # Update VPATH so make finds .gopathok VPATH := $(VPATH):$(GOPATH) - -LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} -X main.buildInfo=${BUILD_INFO} -X main.version=${VERSION} -X main.kpodVersion=${KPOD_VERSION}' +SHRINKFLAGS := -s -w +BASE_LDFLAGS := ${SHRINKFLAGS} -X main.gitCommit=${GIT_COMMIT} -X main.buildInfo=${BUILD_INFO} +LDFLAGS := -ldflags '${BASE_LDFLAGS}' all: binaries crio.conf docs @@ -46,7 +46,7 @@ help: @echo "Usage: make " @echo @echo " * 'install' - Install binaries to system locations" - @echo " * 'binaries' - Build crio, conmon and crioctl" + @echo " * 'binaries' - Build crio, conmon and pause" @echo " * 'integration' - Execute integration tests" @echo " * 'clean' - Clean artifacts" @echo " * 'lint' - Execute the source code linter" @@ -64,7 +64,8 @@ lint: .gopathok @./.tool/lint gofmt: - @./hack/verify-gofmt.sh + find . -name '*.go' ! -path './vendor/*' -exec gofmt -s -w {} \+ + git diff --exit-code conmon: $(MAKE) -C $@ @@ -73,36 +74,30 @@ pause: $(MAKE) -C $@ test/bin2img/bin2img: .gopathok $(wildcard test/bin2img/*.go) - $(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/test/bin2img + $(GO) build -i $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/bin2img test/copyimg/copyimg: .gopathok $(wildcard test/copyimg/*.go) - $(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/test/copyimg + $(GO) build -i $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/copyimg test/checkseccomp/checkseccomp: .gopathok $(wildcard test/checkseccomp/*.go) - $(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/test/checkseccomp + $(GO) build -i $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/checkseccomp crio: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/crio $(PROJECT)) - $(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/cmd/crio - -crioctl: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/crioctl $(PROJECT)) - $(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/cmd/crioctl - -kpod: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/kpod $(PROJECT)) - $(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/cmd/kpod + $(GO) build -i $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o bin/$@ $(PROJECT)/cmd/crio crio.conf: crio - ./crio --config="" config --default > crio.conf + ./bin/crio --config="" config --default > crio.conf clean: ifneq ($(GOPATH),) rm -f "$(GOPATH)/.gopathok" endif rm -rf _output - rm -f docs/*.1 docs/*.5 docs/*.8 + rm -f docs/*.5 docs/*.8 rm -fr test/testdata/redis-image find . -name \*~ -delete find . -name \#\* -delete - rm -f crioctl crio kpod + rm -f bin/crio make -C conmon clean make -C pause clean rm -f test/bin2img/bin2img @@ -113,25 +108,23 @@ crioimage: docker build -t ${CRIO_IMAGE} . dbuild: crioimage - docker run --name=${CRIO_INSTANCE} --privileged ${CRIO_IMAGE} -v ${PWD}:/go/src/${PROJECT} --rm make binaries + docker run --name=${CRIO_INSTANCE} -e BUILDTAGS --privileged -v ${PWD}:/go/src/${PROJECT} --rm ${CRIO_IMAGE} make binaries integration: crioimage - docker run -e STORAGE_OPTS="--storage-driver=vfs" -e TESTFLAGS -e TRAVIS -t --privileged --rm -v ${CURDIR}:/go/src/${PROJECT} ${CRIO_IMAGE} make localintegration + docker run -e STORAGE_OPTIONS="--storage-driver=vfs" -e TESTFLAGS -e TRAVIS -t --privileged --rm -v ${CURDIR}:/go/src/${PROJECT} ${CRIO_IMAGE} make localintegration testunit: $(GO) test -tags "$(BUILDTAGS)" -cover $(PACKAGES) -localintegration: clean binaries +localintegration: clean binaries test-binaries ./test/test_runner.sh ${TESTFLAGS} -binaries: crio crioctl kpod conmon pause test/bin2img/bin2img test/copyimg/copyimg test/checkseccomp/checkseccomp +binaries: crio conmon pause +test-binaries: test/bin2img/bin2img test/copyimg/copyimg test/checkseccomp/checkseccomp MANPAGES_MD := $(wildcard docs/*.md) MANPAGES := $(MANPAGES_MD:%.md=%) -docs/%.1: docs/%.1.md .gopathok - (go-md2man -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@) || ($(GOPATH)/bin/go-md2man -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@) - docs/%.5: docs/%.5.md .gopathok (go-md2man -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@) || ($(GOPATH)/bin/go-md2man -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@) @@ -143,28 +136,24 @@ docs: $(MANPAGES) install: .gopathok install.bin install.man install.bin: - install ${SELINUXOPT} -D -m 755 crio $(BINDIR)/crio - install ${SELINUXOPT} -D -m 755 crioctl $(BINDIR)/crioctl - install ${SELINUXOPT} -D -m 755 kpod $(BINDIR)/kpod - install ${SELINUXOPT} -D -m 755 conmon/conmon $(LIBEXECDIR)/crio/conmon - install ${SELINUXOPT} -D -m 755 pause/pause $(LIBEXECDIR)/crio/pause + install ${SELINUXOPT} -D -m 755 bin/crio $(BINDIR)/crio + install ${SELINUXOPT} -D -m 755 bin/conmon $(LIBEXECDIR)/crio/conmon + install ${SELINUXOPT} -D -m 755 bin/pause $(LIBEXECDIR)/crio/pause install.man: - install ${SELINUXOPT} -d -m 755 $(MANDIR)/man1 install ${SELINUXOPT} -d -m 755 $(MANDIR)/man5 install ${SELINUXOPT} -d -m 755 $(MANDIR)/man8 - install ${SELINUXOPT} -m 644 $(filter %.1,$(MANPAGES)) -t $(MANDIR)/man1 install ${SELINUXOPT} -m 644 $(filter %.5,$(MANPAGES)) -t $(MANDIR)/man5 install ${SELINUXOPT} -m 644 $(filter %.8,$(MANPAGES)) -t $(MANDIR)/man8 -install.config: +install.config: crio.conf install ${SELINUXOPT} -D -m 644 crio.conf $(ETCDIR_CRIO)/crio.conf install ${SELINUXOPT} -D -m 644 seccomp.json $(ETCDIR_CRIO)/seccomp.json install ${SELINUXOPT} -D -m 644 crio-umount.conf $(OCIUMOUNTINSTALLDIR)/crio-umount.conf + install ${SELINUXOPT} -D -m 644 crictl.yaml $(CRICTL_CONFIG_DIR) install.completions: install ${SELINUXOPT} -d -m 755 ${BASHINSTALLDIR} - install ${SELINUXOPT} -m 644 -D completions/bash/kpod ${BASHINSTALLDIR} install.systemd: install ${SELINUXOPT} -D -m 644 contrib/systemd/crio.service $(PREFIX)/lib/systemd/system/crio.service @@ -173,7 +162,6 @@ install.systemd: uninstall: rm -f $(BINDIR)/crio - rm -f $(BINDIR)/crioctl rm -f $(LIBEXECDIR)/crio/conmon rm -f $(LIBEXECDIR)/crio/pause for i in $(filter %.1,$(MANPAGES)); do \ diff --git a/OWNERS b/OWNERS index 7696ec17..6b945bba 100644 --- a/OWNERS +++ b/OWNERS @@ -1,4 +1,4 @@ -assignees: +approvers: - mrunalp - runcom - cyphar diff --git a/README.md b/README.md index d1cddd1f..69feaa34 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,32 @@ -![cri-o logo](https://cdn.rawgit.com/kubernetes-incubator/cri-o/master/logo/crio-logo.svg) -# cri-o - OCI-based implementation of Kubernetes Container Runtime Interface +![CRI-O logo](https://cdn.rawgit.com/kubernetes-incubator/cri-o/master/logo/crio-logo.svg) +# CRI-O - OCI-based implementation of Kubernetes Container Runtime Interface [![Build Status](https://img.shields.io/travis/kubernetes-incubator/cri-o.svg?maxAge=2592000&style=flat-square)](https://travis-ci.org/kubernetes-incubator/cri-o) [![Go Report Card](https://goreportcard.com/badge/github.com/kubernetes-incubator/cri-o?style=flat-square)](https://goreportcard.com/report/github.com/kubernetes-incubator/cri-o) -### Status: Release Candidate 3 +### Status: Stable + +## Compatibility matrix: CRI-O <-> Kubernetes clusters + +| Version - Branch | Kubernetes branch/version | Maintenance status | +|----------------------------|-------------------------------|--------------------| +| CRI-O 1.0.x - release-1.0 | Kubernetes 1.7 branch, v1.7.x | = | +| CRI-O 1.8.x - release-1.8 | Kubernetes 1.8 branch, v1.8.x | = | +| CRI-O 1.9.x - release-1.9 | Kubernetes 1.9 branch, v1.9.x | = | +| CRI-O HEAD - master | Kubernetes master branch | ✓ | + +Key: + +* `✓` Changes in main Kubernetes repo about CRI are actively implemented in CRI-O +* `=` Maintenance is manual, only bugs will be patched. ## What is the scope of this project? -cri-o is meant to provide an integration path between OCI conformant runtimes and the kubelet. +CRI-O is meant to provide an integration path between OCI conformant runtimes and the kubelet. Specifically, it implements the Kubelet [Container Runtime Interface (CRI)](https://github.com/kubernetes/community/blob/master/contributors/devel/container-runtime-interface.md) using OCI conformant runtimes. -The scope of cri-o is tied to the scope of the CRI. +The scope of CRI-O is tied to the scope of the CRI. -At a high level, we expect the scope of cri-o to be restricted to the following functionalities: +At a high level, we expect the scope of CRI-O to be restricted to the following functionalities: * Support multiple image formats including the existing Docker image format * Support for multiple means to download images including trust & image verification @@ -24,7 +38,7 @@ At a high level, we expect the scope of cri-o to be restricted to the following ## What is not in scope for this project? * Building, signing and pushing images to various image storages -* A CLI utility for interacting with cri-o. Any CLIs built as part of this project are only meant for testing this project and there will be no guarantees on the backward compatibility with it. +* A CLI utility for interacting with CRI-O. Any CLIs built as part of this project are only meant for testing this project and there will be no guarantees on the backward compatibility with it. This is an implementation of the Kubernetes Container Runtime Interface (CRI) that will allow Kubernetes to directly launch and manage Open Container Initiative (OCI) containers. @@ -40,36 +54,8 @@ It is currently in active development in the Kubernetes community through the [d | Command | Description | Demo| | ---------------------------------------------------- | --------------------------------------------------------------------------|-----| | [crio(8)](/docs/crio.8.md) | OCI Kubernetes Container Runtime daemon || -| [kpod(1)](/docs/kpod.1.md) | Simple management tool for pods and images || -| [kpod-attach(1)](/docs/kpod-attach.1.md) | Instead of providing a `kpod attach` command, the man page `kpod-attach` describes how to use the `kpod logs` and `kpod exec` commands to achieve the same goals as `kpod attach`.|| -| [kpod-cp(1)](/docs/kpod-cp.1.md) | Instead of providing a `kpod cp` command, the man page `kpod-cp` describes how to use the `kpod mount` command to have even more flexibility and functionality.|| -| [kpod-diff(1)](/docs/kpod-diff.1.md) | Inspect changes on a container or image's filesystem || -| [kpod-export(1)](/docs/kpod-export.1.md) | Export container's filesystem contents as a tar archive |[![...](/docs/play.png)](https://asciinema.org/a/913lBIRAg5hK8asyIhhkQVLtV)| -| [kpod-history(1)](/docs/kpod-history.1.md) | Shows the history of an image |[![...](/docs/play.png)](https://asciinema.org/a/bCvUQJ6DkxInMELZdc5DinNSx)| -| [kpod-images(1)](/docs/kpod-images.1.md) | List images in local storage |[![...](/docs/play.png)](https://asciinema.org/a/133649)| -| [kpod-info(1)](/docs/kpod-info.1.md) | Display system information || -| [kpod-inspect(1)](/docs/kpod-inspect.1.md) | Display the configuration of a container or image |[![...](/docs/play.png)](https://asciinema.org/a/133418)| -| [kpod-kill(1)](/docs/kpod-kill.1.md) | Kill the main process in one or more running containers |[![...](/docs/play.png)](https://asciinema.org/a/3jNos0A5yzO4hChu7ddKkUPw7)| -| [kpod-load(1)](/docs/kpod-load.1.md) | Load an image from docker archive or oci |[![...](/docs/play.png)](https://asciinema.org/a/kp8kOaexEhEa20P1KLZ3L5X4g)| -| [kpod-login(1)](/docs/kpod-login.1.md) | Login to a container registry || -| [kpod-logout(1)](/docs/kpod-logout.1.md) | Logout of a container registry || -| [kpod-logs(1)](/docs/kpod-logs.1.md) | Display the logs of a container || -| [kpod-mount(1)](/docs/kpod-mount.1.md) | Mount a working container's root filesystem || -| [kpod-pause(1)](/docs/kpod-pause.1.md) | Pause one or more running containers |[![...](/docs/play.png)](https://asciinema.org/a/141292)| -| [kpod-ps(1)](/docs/kpod-ps.1.md) | Prints out information about containers |[![...](/docs/play.png)](https://asciinema.org/a/bbT41kac6CwZ5giESmZLIaTLR)| -| [kpod-pull(1)](/docs/kpod-pull.1.md) | Pull an image from a registry |[![...](/docs/play.png)](https://asciinema.org/a/lr4zfoynHJOUNu1KaXa1dwG2X)| -| [kpod-push(1)](/docs/kpod-push.1.md) | Push an image to a specified destination |[![...](/docs/play.png)](https://asciinema.org/a/133276)| -| [kpod-rename(1)](/docs/kpod-rename.1.md) | Rename a container || -| [kpod-rm(1)](/docs/kpod-rm.1.md) | Removes one or more containers |[![...](/docs/play.png)](https://asciinema.org/a/7EMk22WrfGtKWmgHJX9Nze1Qp)| -| [kpod-rmi(1)](/docs/kpod-rmi.1.md) | Removes one or more images |[![...](/docs/play.png)](https://asciinema.org/a/133799)| -| [kpod-save(1)](/docs/kpod-save.1.md) | Saves an image to an archive |[![...](/docs/play.png)](https://asciinema.org/a/kp8kOaexEhEa20P1KLZ3L5X4g)| -| [kpod-stats(1)](/docs/kpod-stats.1.md) | Display a live stream of one or more containers' resource usage statistics|| -| [kpod-stop(1)](/docs/kpod-stop.1.md) | Stops one or more running containers || -| [kpod-tag(1)](/docs/kpod-tag.1.md) | Add an additional name to a local image |[![...](/docs/play.png)](https://asciinema.org/a/133803)| -| [kpod-umount(1)](/docs/kpod-umount.1.md) | Unmount a working container's root filesystem || -| [kpod-unpause(1)](/docs/kpod-unpause.1.md) | Unpause one or more running containers |[![...](/docs/play.png)](https://asciinema.org/a/141292)| -| [kpod-version(1)](/docs/kpod-version.1.md) | Display the version information |[![...](/docs/play.png)](https://asciinema.org/a/mfrn61pjZT9Fc8L4NbfdSqfgu)| -| [kpod-wait(1)](/docs/kpod-wait.1.md) | Wait on one or more containers to stop and print their exit codes|| + +Note that kpod and its container management and debugging commands have moved to a separate repository, located [here](https://github.com/projectatomic/libpod). ## Configuration | File | Description | @@ -80,21 +66,26 @@ It is currently in active development in the Kubernetes community through the [d [CRI-O configures OCI Hooks to run when launching a container](./hooks.md) -## cri-o Usage Transfer +## CRI-O Usage Transfer -[Useful information for ops and dev transfer as it relates to infrastructure that utilizes cri-o](/transfer.md) +[Useful information for ops and dev transfer as it relates to infrastructure that utilizes CRI-O](/transfer.md) ## Communication For async communication and long running discussions please use issues and pull requests on the github repo. This will be the best place to discuss design and implementation. -For sync communication we have an IRC channel #cri-o, on chat.freenode.net, that everyone is welcome to join and chat about development. +For sync communication we have an IRC channel #CRI-O, on chat.freenode.net, that everyone is welcome to join and chat about development. ## Getting started -### Prerequisites +### Runtime dependencies -Latest version of `runc` is expected to be installed on the system. It is picked up as the default runtime by crio. +- runc, Clear Containers runtime, or any other OCI compatible runtime +- socat +- iproute +- iptables + +Latest version of `runc` is expected to be installed on the system. It is picked up as the default runtime by CRI-O. ### Build and Run Dependencies @@ -111,6 +102,7 @@ yum install -y \ glibc-devel \ glibc-static \ go \ + golang-github-cpuguy83-go-md2man \ gpgme-devel \ libassuan-devel \ libgpg-error-devel \ @@ -138,6 +130,7 @@ apt-get install -y \ libseccomp-dev \ libselinux1-dev \ pkg-config \ + go-md2man \ runc \ skopeo-containers ``` @@ -165,7 +158,7 @@ apt-get install -y \ ### Get Source Code -As with other Go projects, cri-o must be cloned into a directory structure like: +As with other Go projects, CRI-O must be cloned into a directory structure like: ``` GOPATH @@ -199,7 +192,7 @@ make sudo make install ``` -Otherwise, if you do not want to build `cri-o` with seccomp support you can add `BUILDTAGS=""` when running make. +Otherwise, if you do not want to build `CRI-O` with seccomp support you can add `BUILDTAGS=""` when running make. ```bash make BUILDTAGS="" @@ -208,7 +201,7 @@ sudo make install #### Build Tags -`cri-o` supports optional build tags for compiling support of various features. +`CRI-O` supports optional build tags for compiling support of various features. To add build tags to the make option the `BUILDTAGS` variable must be set. ```bash @@ -234,15 +227,15 @@ your system. ### Running with kubernetes -You can run a local version of kubernetes with cri-o using `local-up-cluster.sh`: +You can run a local version of kubernetes with CRI-O using `local-up-cluster.sh`: 1. Clone the [kubernetes repository](https://github.com/kubernetes/kubernetes) -1. Start the cri-o daemon (`crio`) +1. Start the CRI-O daemon (`crio`) 1. From the kubernetes project directory, run: ```shell CGROUP_DRIVER=systemd \ CONTAINER_RUNTIME=remote \ -CONTAINER_RUNTIME_ENDPOINT='/var/run/crio.sock --runtime-request-timeout=15m' \ +CONTAINER_RUNTIME_ENDPOINT='/var/run/crio/crio.sock --runtime-request-timeout=15m' \ ./hack/local-up-cluster.sh ``` @@ -256,5 +249,4 @@ To run a full cluster, see [the instructions](kubernetes.md). 1. Support for log management, networking integration using CNI, pluggable image/storage management (done) 1. Support for exec/attach (done) 1. Target fully automated kubernetes testing without failures [e2e status](https://github.com/kubernetes-incubator/cri-o/issues/533) -1. Release 1.0 1. Track upstream k8s releases diff --git a/VERSION b/VERSION deleted file mode 100644 index 1e78071b..00000000 --- a/VERSION +++ /dev/null @@ -1 +0,0 @@ -1.0.0-rc4-dev diff --git a/cmd/crio/config.go b/cmd/crio/config.go index 855bd466..2564baf1 100644 --- a/cmd/crio/config.go +++ b/cmd/crio/config.go @@ -28,8 +28,7 @@ storage_driver = "{{ .Storage }}" storage_option = [ {{ range $opt := .StorageOptions }}{{ printf "\t%q,\n" $opt }}{{ end }}] -# The "crio.api" table contains settings for the kubelet/gRPC -# interface (which is also used by crioctl). +# The "crio.api" table contains settings for the kubelet/gRPC interface. [crio.api] # listen is the path to the AF_LOCAL socket on which crio will listen. @@ -108,9 +107,16 @@ cgroup_manager = "{{ .CgroupManager }}" # hooks_dir_path is the oci hooks directory for automatically executed hooks hooks_dir_path = "{{ .HooksDirPath }}" +# default_mounts is the mounts list to be mounted for the container when created +default_mounts = [ +{{ range $mount := .DefaultMounts }}{{ printf "\t%q, \n" $mount }}{{ end }}] + # pids_limit is the number of processes allowed in a container pids_limit = {{ .PidsLimit }} +# enable using a shared PID namespace for containers in a pod +enable_shared_pid_namespace = {{ .EnableSharedPIDNamespace }} + # log_size_max is the max limit for the container log size in bytes. # Negative values indicate that no limit is imposed. log_size_max = {{ .LogSizeMax }} diff --git a/cmd/crio/main.go b/cmd/crio/main.go index 93044e88..a058f296 100644 --- a/cmd/crio/main.go +++ b/cmd/crio/main.go @@ -8,12 +8,15 @@ import ( _ "net/http/pprof" "os" "os/signal" + "path/filepath" "sort" "strings" + "time" "github.com/containers/storage/pkg/reexec" - "github.com/kubernetes-incubator/cri-o/libkpod" + "github.com/kubernetes-incubator/cri-o/lib" "github.com/kubernetes-incubator/cri-o/server" + "github.com/kubernetes-incubator/cri-o/version" "github.com/opencontainers/selinux/go-selinux" "github.com/sirupsen/logrus" "github.com/soheilhy/cmux" @@ -23,19 +26,15 @@ import ( "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) -// This is populated by the Makefile from the VERSION file -// in the repository -var version = "" - // gitCommit is the commit that the binary is being built from. // It will be populated by the Makefile. var gitCommit = "" func validateConfig(config *server.Config) error { switch config.ImageVolumes { - case libkpod.ImageVolumesMkdir: - case libkpod.ImageVolumesIgnore: - case libkpod.ImageVolumesBind: + case lib.ImageVolumesMkdir: + case lib.ImageVolumesIgnore: + case lib.ImageVolumesBind: default: return fmt.Errorf("Unrecognized image volume type specified") @@ -127,9 +126,15 @@ func mergeConfig(config *server.Config, ctx *cli.Context) error { if ctx.GlobalIsSet("hooks-dir-path") { config.HooksDirPath = ctx.GlobalString("hooks-dir-path") } + if ctx.GlobalIsSet("default-mounts") { + config.DefaultMounts = ctx.GlobalStringSlice("default-mounts") + } if ctx.GlobalIsSet("pids-limit") { config.PidsLimit = ctx.GlobalInt64("pids-limit") } + if ctx.GlobalIsSet("enable-shared-pid-namespace") { + config.EnableSharedPIDNamespace = ctx.GlobalBool("enable-shared-pid-namespace") + } if ctx.GlobalIsSet("log-size-max") { config.LogSizeMax = ctx.GlobalInt64("log-size-max") } @@ -140,7 +145,7 @@ func mergeConfig(config *server.Config, ctx *cli.Context) error { config.PluginDir = ctx.GlobalString("cni-plugin-dir") } if ctx.GlobalIsSet("image-volumes") { - config.ImageVolumes = libkpod.ImageVolumesType(ctx.GlobalString("image-volumes")) + config.ImageVolumes = lib.ImageVolumesType(ctx.GlobalString("image-volumes")) } return nil } @@ -161,8 +166,7 @@ func catchShutdown(gserver *grpc.Server, sserver *server.Server, hserver *http.S *signalled = true gserver.GracefulStop() hserver.Shutdown(context.Background()) - // TODO(runcom): enable this after https://github.com/kubernetes/kubernetes/pull/51377 - //sserver.StopStreamServer() + sserver.StopStreamServer() sserver.StopExitMonitor() if err := sserver.Shutdown(); err != nil { logrus.Warnf("error shutting down main service %v", err) @@ -179,9 +183,7 @@ func main() { app := cli.NewApp() var v []string - if version != "" { - v = append(v, version) - } + v = append(v, version.Version) if gitCommit != "" { v = append(v, fmt.Sprintf("commit: %s", gitCommit)) } @@ -295,12 +297,16 @@ func main() { }, cli.Int64Flag{ Name: "pids-limit", - Value: libkpod.DefaultPidsLimit, + Value: lib.DefaultPidsLimit, Usage: "maximum number of processes allowed in a container", }, + cli.BoolFlag{ + Name: "enable-shared-pid-namespace", + Usage: "enable using a shared PID namespace for containers in a pod", + }, cli.Int64Flag{ Name: "log-size-max", - Value: libkpod.DefaultLogSizeMax, + Value: lib.DefaultLogSizeMax, Usage: "maximum log size in bytes for a container", }, cli.StringFlag{ @@ -313,13 +319,18 @@ func main() { }, cli.StringFlag{ Name: "image-volumes", - Value: string(libkpod.ImageVolumesMkdir), + Value: string(lib.ImageVolumesMkdir), Usage: "image volume handling ('mkdir', 'bind', or 'ignore')", }, cli.StringFlag{ Name: "hooks-dir-path", Usage: "set the OCI hooks directory path", - Value: libkpod.DefaultHooksDirPath, + Value: lib.DefaultHooksDirPath, + Hidden: true, + }, + cli.StringSliceFlag{ + Name: "default-mounts", + Usage: "add one or more default mount paths in the form host:container", Hidden: true, }, cli.BoolFlag{ @@ -405,6 +416,16 @@ func main() { }() } + args := c.Args() + if len(args) > 0 { + for _, command := range app.Commands { + if args[0] == command.Name { + break + } + } + return fmt.Errorf("command %q not supported", args[0]) + } + config := c.App.Metadata["config"].(*server.Config) if !config.SELinux { @@ -416,6 +437,10 @@ func main() { return fmt.Errorf("invalid --runtime value %q", err) } + if err := os.MkdirAll(filepath.Dir(config.Listen), 0755); err != nil { + return err + } + // Remove the socket if it already exists if _, err := os.Stat(config.Listen); err == nil { if err := os.Remove(config.Listen); err != nil { @@ -467,7 +492,8 @@ func main() { infoMux := service.GetInfoMux() srv := &http.Server{ - Handler: infoMux, + Handler: infoMux, + ReadTimeout: 5 * time.Second, } graceful := false @@ -483,26 +509,23 @@ func main() { if graceful && strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") { err = nil } else { - logrus.Errorf("Failed to serve grpc grpc request: %v", err) + logrus.Errorf("Failed to serve grpc request: %v", err) } } }() - // TODO(runcom): enable this after https://github.com/kubernetes/kubernetes/pull/51377 - //streamServerCloseCh := service.StreamingServerCloseChan() + streamServerCloseCh := service.StreamingServerCloseChan() serverExitMonitorCh := service.ExitMonitorCloseChan() select { - // TODO(runcom): enable this after https://github.com/kubernetes/kubernetes/pull/51377 - //case <-streamServerCloseCh: + case <-streamServerCloseCh: case <-serverExitMonitorCh: case <-serverCloseCh: } service.Shutdown() - // TODO(runcom): enable this after https://github.com/kubernetes/kubernetes/pull/51377 - //<-streamServerCloseCh - //logrus.Debug("closed stream server") + <-streamServerCloseCh + logrus.Debug("closed stream server") <-serverExitMonitorCh logrus.Debug("closed exit monitor") <-serverCloseCh diff --git a/cmd/crioctl/container.go b/cmd/crioctl/container.go deleted file mode 100644 index 7be5a7d6..00000000 --- a/cmd/crioctl/container.go +++ /dev/null @@ -1,656 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "log" - "net/url" - "os" - "strings" - "time" - - "github.com/kubernetes-incubator/cri-o/client" - "github.com/urfave/cli" - "golang.org/x/net/context" - remocommandconsts "k8s.io/apimachinery/pkg/util/remotecommand" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/remotecommand" - pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" -) - -var containerCommand = cli.Command{ - Name: "container", - Aliases: []string{"ctr"}, - Subcommands: []cli.Command{ - createContainerCommand, - inspectContainerCommand, - startContainerCommand, - stopContainerCommand, - removeContainerCommand, - containerStatusCommand, - listContainersCommand, - execSyncCommand, - execCommand, - }, -} - -type createOptions struct { - // configPath is path to the config for container - configPath string - // name sets the container name - name string - // podID of the container - podID string - // labels for the container - labels map[string]string -} - -var createContainerCommand = cli.Command{ - Name: "create", - Usage: "create a container", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "pod", - Usage: "the id of the pod sandbox to which the container belongs", - }, - cli.StringFlag{ - Name: "config", - Value: "config.json", - Usage: "the path of a container config file", - }, - cli.StringFlag{ - Name: "name", - Value: "", - Usage: "the name of the container", - }, - cli.StringSliceFlag{ - Name: "label", - Usage: "add key=value labels to the container", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - if !context.IsSet("pod") { - return fmt.Errorf("Please specify the id of the pod sandbox to which the container belongs via the --pod option") - } - - opts := createOptions{ - configPath: context.String("config"), - name: context.String("name"), - podID: context.String("pod"), - labels: make(map[string]string), - } - - for _, l := range context.StringSlice("label") { - pair := strings.Split(l, "=") - if len(pair) != 2 { - return fmt.Errorf("incorrectly specified label: %v", l) - } - opts.labels[pair[0]] = pair[1] - } - - // Test RuntimeServiceClient.CreateContainer - err = CreateContainer(client, opts) - if err != nil { - return fmt.Errorf("Creating container failed: %v", err) - } - return nil - }, -} - -var startContainerCommand = cli.Command{ - Name: "start", - Usage: "start a container", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the container", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - err = StartContainer(client, context.String("id")) - if err != nil { - return fmt.Errorf("Starting the container failed: %v", err) - } - return nil - }, -} - -var stopContainerCommand = cli.Command{ - Name: "stop", - Usage: "stop a container", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the container", - }, - cli.Int64Flag{ - Name: "timeout", - Value: 10, - Usage: "seconds to wait to kill the container after a graceful stop is requested", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - err = StopContainer(client, context.String("id"), context.Int64("timeout")) - if err != nil { - return fmt.Errorf("Stopping the container failed: %v", err) - } - return nil - }, -} - -var removeContainerCommand = cli.Command{ - Name: "remove", - Usage: "remove a container", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the container", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - err = RemoveContainer(client, context.String("id")) - if err != nil { - return fmt.Errorf("Removing the container failed: %v", err) - } - return nil - }, -} - -var containerStatusCommand = cli.Command{ - Name: "status", - Usage: "get the status of a container", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the container", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - err = ContainerStatus(client, context.String("id")) - if err != nil { - return fmt.Errorf("Getting the status of the container failed: %v", err) - } - return nil - }, -} - -var execSyncCommand = cli.Command{ - Name: "execsync", - Usage: "exec a command synchronously in a container", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the container", - }, - cli.Int64Flag{ - Name: "timeout", - Value: 0, - Usage: "timeout for the command", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - err = ExecSync(client, context.String("id"), context.Args(), context.Int64("timeout")) - if err != nil { - return fmt.Errorf("execing command in container failed: %v", err) - } - return nil - }, -} - -var execCommand = cli.Command{ - Name: "exec", - Usage: "prepare a streaming endpoint to execute a command in the container", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the container", - }, - cli.BoolFlag{ - Name: "tty", - Usage: "whether to use tty", - }, - cli.BoolFlag{ - Name: "stdin", - Usage: "whether to stream to stdin", - }, - cli.BoolFlag{ - Name: "url", - Usage: "do not exec command, just prepare streaming endpoint", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - err = Exec(client, context.String("id"), context.Bool("tty"), context.Bool("stdin"), context.Bool("url"), context.Args()) - if err != nil { - return fmt.Errorf("execing command in container failed: %v", err) - } - return nil - }, -} - -type listOptions struct { - // id of the container - id string - // podID of the container - podID string - // state of the container - state string - // quiet is for listing just container IDs - quiet bool - // labels are selectors for the container - labels map[string]string -} - -var listContainersCommand = cli.Command{ - Name: "list", - Usage: "list containers", - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "quiet", - Usage: "list only container IDs", - }, - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "filter by container id", - }, - cli.StringFlag{ - Name: "pod", - Value: "", - Usage: "filter by container pod id", - }, - cli.StringFlag{ - Name: "state", - Value: "", - Usage: "filter by container state", - }, - cli.StringSliceFlag{ - Name: "label", - Usage: "filter by key=value label", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - opts := listOptions{ - id: context.String("id"), - podID: context.String("pod"), - state: context.String("state"), - quiet: context.Bool("quiet"), - labels: make(map[string]string), - } - - for _, l := range context.StringSlice("label") { - pair := strings.Split(l, "=") - if len(pair) != 2 { - return fmt.Errorf("incorrectly specified label: %v", l) - } - opts.labels[pair[0]] = pair[1] - } - - err = ListContainers(client, opts) - if err != nil { - return fmt.Errorf("listing containers failed: %v", err) - } - return nil - }, -} - -// CreateContainer sends a CreateContainerRequest to the server, and parses -// the returned CreateContainerResponse. -func CreateContainer(client pb.RuntimeServiceClient, opts createOptions) error { - config, err := loadContainerConfig(opts.configPath) - if err != nil { - return err - } - - // Override the name by the one specified through CLI - if opts.name != "" { - config.Metadata.Name = opts.name - } - - for k, v := range opts.labels { - config.Labels[k] = v - } - - r, err := client.CreateContainer(context.Background(), &pb.CreateContainerRequest{ - PodSandboxId: opts.podID, - Config: config, - // TODO(runcom): this is missing PodSandboxConfig!!! - // we should/could find a way to retrieve it from the fs and set it here - }) - if err != nil { - return err - } - fmt.Println(r.ContainerId) - return nil -} - -// StartContainer sends a StartContainerRequest to the server, and parses -// the returned StartContainerResponse. -func StartContainer(client pb.RuntimeServiceClient, ID string) error { - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - _, err := client.StartContainer(context.Background(), &pb.StartContainerRequest{ - ContainerId: ID, - }) - if err != nil { - return err - } - fmt.Println(ID) - return nil -} - -// StopContainer sends a StopContainerRequest to the server, and parses -// the returned StopContainerResponse. -func StopContainer(client pb.RuntimeServiceClient, ID string, timeout int64) error { - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - _, err := client.StopContainer(context.Background(), &pb.StopContainerRequest{ - ContainerId: ID, - Timeout: timeout, - }) - if err != nil { - return err - } - fmt.Println(ID) - return nil -} - -// RemoveContainer sends a RemoveContainerRequest to the server, and parses -// the returned RemoveContainerResponse. -func RemoveContainer(client pb.RuntimeServiceClient, ID string) error { - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - _, err := client.RemoveContainer(context.Background(), &pb.RemoveContainerRequest{ - ContainerId: ID, - }) - if err != nil { - return err - } - fmt.Println(ID) - return nil -} - -// ContainerStatus sends a ContainerStatusRequest to the server, and parses -// the returned ContainerStatusResponse. -func ContainerStatus(client pb.RuntimeServiceClient, ID string) error { - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - r, err := client.ContainerStatus(context.Background(), &pb.ContainerStatusRequest{ - ContainerId: ID}) - if err != nil { - return err - } - fmt.Printf("ID: %s\n", r.Status.Id) - if r.Status.Metadata != nil { - if r.Status.Metadata.Name != "" { - fmt.Printf("Name: %s\n", r.Status.Metadata.Name) - } - fmt.Printf("Attempt: %v\n", r.Status.Metadata.Attempt) - } - // TODO(mzylowski): print it prettier - fmt.Printf("Status: %s\n", r.Status.State) - ctm := time.Unix(0, r.Status.CreatedAt) - fmt.Printf("Created: %v\n", ctm) - stm := time.Unix(0, r.Status.StartedAt) - fmt.Printf("Started: %v\n", stm) - ftm := time.Unix(0, r.Status.FinishedAt) - fmt.Printf("Finished: %v\n", ftm) - fmt.Printf("Exit Code: %v\n", r.Status.ExitCode) - fmt.Printf("Reason: %v\n", r.Status.Reason) - if r.Status.Image != nil { - fmt.Printf("Image: %v\n", r.Status.Image.Image) - } - // - // TODO: https://github.com/kubernetes-incubator/cri-o/issues/531 - // - //fmt.Printf("ImageRef: %v\n", r.Status.ImageRef) - - return nil -} - -// ExecSync sends an ExecSyncRequest to the server, and parses -// the returned ExecSyncResponse. -func ExecSync(client pb.RuntimeServiceClient, ID string, cmd []string, timeout int64) error { - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - r, err := client.ExecSync(context.Background(), &pb.ExecSyncRequest{ - ContainerId: ID, - Cmd: cmd, - Timeout: timeout, - }) - if err != nil { - return err - } - fmt.Println("Stdout:") - fmt.Println(string(r.Stdout)) - fmt.Println("Stderr:") - fmt.Println(string(r.Stderr)) - fmt.Printf("Exit code: %v\n", r.ExitCode) - - return nil -} - -// Exec sends an ExecRequest to the server, and parses -// the returned ExecResponse. -func Exec(client pb.RuntimeServiceClient, ID string, tty bool, stdin bool, urlOnly bool, cmd []string) error { - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - r, err := client.Exec(context.Background(), &pb.ExecRequest{ - ContainerId: ID, - Cmd: cmd, - Tty: tty, - Stdin: stdin, - }) - if err != nil { - return err - } - - if urlOnly { - fmt.Println("URL:") - fmt.Println(r.Url) - return nil - } - - execURL, err := url.Parse(r.Url) - if err != nil { - return err - } - - streamExec, err := remotecommand.NewExecutor(&restclient.Config{}, "GET", execURL) - if err != nil { - return err - } - - options := remotecommand.StreamOptions{ - SupportedProtocols: remocommandconsts.SupportedStreamingProtocols, - Stdout: os.Stdout, - Stderr: os.Stderr, - Tty: tty, - } - - if stdin { - options.Stdin = os.Stdin - } - - return streamExec.Stream(options) -} - -// ListContainers sends a ListContainerRequest to the server, and parses -// the returned ListContainerResponse. -func ListContainers(client pb.RuntimeServiceClient, opts listOptions) error { - filter := &pb.ContainerFilter{} - if opts.id != "" { - filter.Id = opts.id - } - if opts.podID != "" { - filter.PodSandboxId = opts.podID - } - if opts.state != "" { - st := &pb.ContainerStateValue{} - st.State = pb.ContainerState_CONTAINER_UNKNOWN - switch opts.state { - case "created": - st.State = pb.ContainerState_CONTAINER_CREATED - filter.State = st - case "running": - st.State = pb.ContainerState_CONTAINER_RUNNING - filter.State = st - case "stopped": - st.State = pb.ContainerState_CONTAINER_EXITED - filter.State = st - default: - log.Fatalf("--state should be one of created, running or stopped") - } - } - if opts.labels != nil { - filter.LabelSelector = opts.labels - } - r, err := client.ListContainers(context.Background(), &pb.ListContainersRequest{ - Filter: filter, - }) - if err != nil { - return err - } - for _, c := range r.GetContainers() { - if opts.quiet { - fmt.Println(c.Id) - continue - } - fmt.Printf("ID: %s\n", c.Id) - fmt.Printf("Pod: %s\n", c.PodSandboxId) - if c.Metadata != nil { - if c.Metadata.Name != "" { - fmt.Printf("Name: %s\n", c.Metadata.Name) - } - fmt.Printf("Attempt: %v\n", c.Metadata.Attempt) - } - fmt.Printf("Status: %s\n", c.State) - if c.Image != nil { - fmt.Printf("Image: %s\n", c.Image.Image) - } - ctm := time.Unix(0, c.CreatedAt) - fmt.Printf("Created: %v\n", ctm) - if c.Labels != nil { - fmt.Println("Labels:") - for _, k := range getSortedKeys(c.Labels) { - fmt.Printf("\t%s -> %s\n", k, c.Labels[k]) - } - } - if c.Annotations != nil { - fmt.Println("Annotations:") - for _, k := range getSortedKeys(c.Annotations) { - fmt.Printf("\t%s -> %s\n", k, c.Annotations[k]) - } - } - fmt.Println() - } - return nil -} - -var inspectContainerCommand = cli.Command{ - Name: "inspect", - Usage: "get container info from crio daemon", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the container", - }, - }, - Action: func(context *cli.Context) error { - ID := context.String("id") - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - c, err := client.New(context.GlobalString("connect")) - if err != nil { - return err - } - - cInfo, err := c.ContainerInfo(ID) - if err != nil { - return err - } - - jsonBytes, err := json.MarshalIndent(cInfo, "", " ") - if err != nil { - return err - } - fmt.Println(string(jsonBytes)) - return nil - }, -} diff --git a/cmd/crioctl/image.go b/cmd/crioctl/image.go deleted file mode 100644 index 426c67e9..00000000 --- a/cmd/crioctl/image.go +++ /dev/null @@ -1,173 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/urfave/cli" - "golang.org/x/net/context" - pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" -) - -var imageCommand = cli.Command{ - Name: "image", - Subcommands: []cli.Command{ - pullImageCommand, - listImageCommand, - imageStatusCommand, - removeImageCommand, - }, -} - -var pullImageCommand = cli.Command{ - Name: "pull", - Usage: "pull an image", - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewImageServiceClient(conn) - - _, err = PullImage(client, context.Args().Get(0)) - if err != nil { - return fmt.Errorf("pulling image failed: %v", err) - } - return nil - }, -} - -var listImageCommand = cli.Command{ - Name: "list", - Usage: "list images", - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "quiet", - Usage: "list only image IDs", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewImageServiceClient(conn) - - r, err := ListImages(client, context.Args().Get(0)) - if err != nil { - return fmt.Errorf("listing images failed: %v", err) - } - quiet := context.Bool("quiet") - for _, image := range r.Images { - if quiet { - fmt.Printf("%s\n", image.Id) - continue - } - fmt.Printf("ID: %s\n", image.Id) - for _, tag := range image.RepoTags { - fmt.Printf("Tag: %s\n", tag) - } - for _, digest := range image.RepoDigests { - fmt.Printf("Digest: %s\n", digest) - } - if image.Size_ != 0 { - fmt.Printf("Size: %d\n", image.Size_) - } - } - return nil - }, -} - -var imageStatusCommand = cli.Command{ - Name: "status", - Usage: "return the status of an image", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Usage: "id of the image", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewImageServiceClient(conn) - - r, err := ImageStatus(client, context.String("id")) - if err != nil { - return fmt.Errorf("image status request failed: %v", err) - } - image := r.Image - if image == nil { - return fmt.Errorf("no such image present") - } - fmt.Printf("ID: %s\n", image.Id) - for _, tag := range image.RepoTags { - fmt.Printf("Tag: %s\n", tag) - } - for _, digest := range image.RepoDigests { - fmt.Printf("Digest: %s\n", digest) - } - fmt.Printf("Size: %d\n", image.Size_) - return nil - }, -} -var removeImageCommand = cli.Command{ - Name: "remove", - Usage: "remove an image", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the image", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewImageServiceClient(conn) - - _, err = RemoveImage(client, context.String("id")) - if err != nil { - return fmt.Errorf("removing the image failed: %v", err) - } - return nil - }, -} - -// PullImage sends a PullImageRequest to the server, and parses -// the returned PullImageResponse. -func PullImage(client pb.ImageServiceClient, image string) (*pb.PullImageResponse, error) { - return client.PullImage(context.Background(), &pb.PullImageRequest{Image: &pb.ImageSpec{Image: image}}) -} - -// ListImages sends a ListImagesRequest to the server, and parses -// the returned ListImagesResponse. -func ListImages(client pb.ImageServiceClient, image string) (*pb.ListImagesResponse, error) { - return client.ListImages(context.Background(), &pb.ListImagesRequest{Filter: &pb.ImageFilter{Image: &pb.ImageSpec{Image: image}}}) -} - -// ImageStatus sends an ImageStatusRequest to the server, and parses -// the returned ImageStatusResponse. -func ImageStatus(client pb.ImageServiceClient, image string) (*pb.ImageStatusResponse, error) { - return client.ImageStatus(context.Background(), &pb.ImageStatusRequest{Image: &pb.ImageSpec{Image: image}}) -} - -// RemoveImage sends a RemoveImageRequest to the server, and parses -// the returned RemoveImageResponse. -func RemoveImage(client pb.ImageServiceClient, image string) (*pb.RemoveImageResponse, error) { - if image == "" { - return nil, fmt.Errorf("ID cannot be empty") - } - return client.RemoveImage(context.Background(), &pb.RemoveImageRequest{Image: &pb.ImageSpec{Image: image}}) -} diff --git a/cmd/crioctl/info.go b/cmd/crioctl/info.go deleted file mode 100644 index 1f06f594..00000000 --- a/cmd/crioctl/info.go +++ /dev/null @@ -1,31 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - - "github.com/kubernetes-incubator/cri-o/client" - "github.com/urfave/cli" -) - -var infoCommand = cli.Command{ - Name: "info", - Usage: "get crio daemon info", - Action: func(context *cli.Context) error { - c, err := client.New(context.GlobalString("connect")) - if err != nil { - return err - } - di, err := c.DaemonInfo() - if err != nil { - return err - } - - jsonBytes, err := json.MarshalIndent(di, "", " ") - if err != nil { - return err - } - fmt.Println(string(jsonBytes)) - return nil - }, -} diff --git a/cmd/crioctl/main.go b/cmd/crioctl/main.go deleted file mode 100644 index 3d77867f..00000000 --- a/cmd/crioctl/main.go +++ /dev/null @@ -1,113 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "net" - "os" - "strings" - "time" - - "github.com/sirupsen/logrus" - "github.com/urfave/cli" - "google.golang.org/grpc" - pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" -) - -// This is populated by the Makefile from the VERSION file -// in the repository -var version = "" - -// gitCommit is the commit that the binary is being built from. -// It will be populated by the Makefile. -var gitCommit = "" - -func getClientConnection(context *cli.Context) (*grpc.ClientConn, error) { - conn, err := grpc.Dial(context.GlobalString("connect"), grpc.WithInsecure(), grpc.WithTimeout(context.GlobalDuration("timeout")), - grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { - return net.DialTimeout("unix", addr, timeout) - })) - if err != nil { - return nil, fmt.Errorf("failed to connect: %v", err) - } - return conn, nil -} - -func openFile(path string) (*os.File, error) { - f, err := os.Open(path) - if err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("config at %s not found", path) - } - return nil, err - } - return f, nil -} - -func loadPodSandboxConfig(path string) (*pb.PodSandboxConfig, error) { - f, err := openFile(path) - if err != nil { - return nil, err - } - defer f.Close() - - var config pb.PodSandboxConfig - if err := json.NewDecoder(f).Decode(&config); err != nil { - return nil, err - } - return &config, nil -} - -func loadContainerConfig(path string) (*pb.ContainerConfig, error) { - f, err := openFile(path) - if err != nil { - return nil, err - } - defer f.Close() - - var config pb.ContainerConfig - if err := json.NewDecoder(f).Decode(&config); err != nil { - return nil, err - } - return &config, nil -} - -func main() { - app := cli.NewApp() - var v []string - if version != "" { - v = append(v, version) - } - if gitCommit != "" { - v = append(v, fmt.Sprintf("commit: %s", gitCommit)) - } - - app.Name = "crioctl" - app.Usage = "client for crio" - app.Version = strings.Join(v, "\n") - - app.Commands = []cli.Command{ - podSandboxCommand, - containerCommand, - runtimeVersionCommand, - imageCommand, - infoCommand, - } - - app.Flags = []cli.Flag{ - cli.StringFlag{ - Name: "connect", - Value: "/var/run/crio.sock", - Usage: "Socket to connect to", - }, - cli.DurationFlag{ - Name: "timeout", - Value: 10 * time.Second, - Usage: "Timeout of connecting to server", - }, - } - - if err := app.Run(os.Args); err != nil { - logrus.Fatal(err) - } -} diff --git a/cmd/crioctl/sandbox.go b/cmd/crioctl/sandbox.go deleted file mode 100644 index e44183be..00000000 --- a/cmd/crioctl/sandbox.go +++ /dev/null @@ -1,386 +0,0 @@ -package main - -import ( - "fmt" - "log" - "sort" - "strings" - "time" - - "github.com/urfave/cli" - "golang.org/x/net/context" - pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" -) - -var podSandboxCommand = cli.Command{ - Name: "pod", - Subcommands: []cli.Command{ - runPodSandboxCommand, - stopPodSandboxCommand, - removePodSandboxCommand, - podSandboxStatusCommand, - listPodSandboxCommand, - }, -} - -var runPodSandboxCommand = cli.Command{ - Name: "run", - Usage: "run a pod", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "config", - Value: "", - Usage: "the path of a pod sandbox config file", - }, - cli.StringFlag{ - Name: "name", - Value: "", - Usage: "the name of the pod sandbox", - }, - cli.StringSliceFlag{ - Name: "label", - Usage: "add key=value labels to the container", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - opts := createOptions{ - configPath: context.String("config"), - name: context.String("name"), - labels: make(map[string]string), - } - - for _, l := range context.StringSlice("label") { - pair := strings.Split(l, "=") - if len(pair) != 2 { - return fmt.Errorf("incorrectly specified label: %v", l) - } - opts.labels[pair[0]] = pair[1] - } - - // Test RuntimeServiceClient.RunPodSandbox - err = RunPodSandbox(client, opts) - if err != nil { - return fmt.Errorf("Creating the pod sandbox failed: %v", err) - } - return nil - }, -} - -var stopPodSandboxCommand = cli.Command{ - Name: "stop", - Usage: "stop a pod sandbox", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the pod sandbox", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - err = StopPodSandbox(client, context.String("id")) - if err != nil { - return fmt.Errorf("stopping the pod sandbox failed: %v", err) - } - return nil - }, -} - -var removePodSandboxCommand = cli.Command{ - Name: "remove", - Usage: "remove a pod sandbox", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the pod sandbox", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - err = RemovePodSandbox(client, context.String("id")) - if err != nil { - return fmt.Errorf("removing the pod sandbox failed: %v", err) - } - return nil - }, -} - -var podSandboxStatusCommand = cli.Command{ - Name: "status", - Usage: "return the status of a pod", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "id of the pod", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - err = PodSandboxStatus(client, context.String("id")) - if err != nil { - return fmt.Errorf("getting the pod sandbox status failed: %v", err) - } - return nil - }, -} - -var listPodSandboxCommand = cli.Command{ - Name: "list", - Usage: "list pod sandboxes", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "id", - Value: "", - Usage: "filter by pod sandbox id", - }, - cli.StringFlag{ - Name: "state", - Value: "", - Usage: "filter by pod sandbox state", - }, - cli.StringSliceFlag{ - Name: "label", - Usage: "filter by key=value label", - }, - cli.BoolFlag{ - Name: "quiet", - Usage: "list only pod IDs", - }, - }, - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - opts := listOptions{ - id: context.String("id"), - state: context.String("state"), - quiet: context.Bool("quiet"), - labels: make(map[string]string), - } - - for _, l := range context.StringSlice("label") { - pair := strings.Split(l, "=") - if len(pair) != 2 { - return fmt.Errorf("incorrectly specified label: %v", l) - } - opts.labels[pair[0]] = pair[1] - } - - err = ListPodSandboxes(client, opts) - if err != nil { - return fmt.Errorf("listing pod sandboxes failed: %v", err) - } - return nil - }, -} - -// RunPodSandbox sends a RunPodSandboxRequest to the server, and parses -// the returned RunPodSandboxResponse. -func RunPodSandbox(client pb.RuntimeServiceClient, opts createOptions) error { - config, err := loadPodSandboxConfig(opts.configPath) - if err != nil { - return err - } - - // Override the name by the one specified through CLI - if opts.name != "" { - config.Metadata.Name = opts.name - } - - for k, v := range opts.labels { - config.Labels[k] = v - } - - r, err := client.RunPodSandbox(context.Background(), &pb.RunPodSandboxRequest{Config: config}) - if err != nil { - return err - } - fmt.Println(r.PodSandboxId) - return nil -} - -// StopPodSandbox sends a StopPodSandboxRequest to the server, and parses -// the returned StopPodSandboxResponse. -func StopPodSandbox(client pb.RuntimeServiceClient, ID string) error { - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - _, err := client.StopPodSandbox(context.Background(), &pb.StopPodSandboxRequest{PodSandboxId: ID}) - if err != nil { - return err - } - fmt.Println(ID) - return nil -} - -// RemovePodSandbox sends a RemovePodSandboxRequest to the server, and parses -// the returned RemovePodSandboxResponse. -func RemovePodSandbox(client pb.RuntimeServiceClient, ID string) error { - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - _, err := client.RemovePodSandbox(context.Background(), &pb.RemovePodSandboxRequest{PodSandboxId: ID}) - if err != nil { - return err - } - fmt.Println(ID) - return nil -} - -// PodSandboxStatus sends a PodSandboxStatusRequest to the server, and parses -// the returned PodSandboxStatusResponse. -func PodSandboxStatus(client pb.RuntimeServiceClient, ID string) error { - if ID == "" { - return fmt.Errorf("ID cannot be empty") - } - r, err := client.PodSandboxStatus(context.Background(), &pb.PodSandboxStatusRequest{PodSandboxId: ID}) - if err != nil { - return err - } - fmt.Printf("ID: %s\n", r.Status.Id) - if r.Status.Metadata != nil { - if r.Status.Metadata.Name != "" { - fmt.Printf("Name: %s\n", r.Status.Metadata.Name) - } - if r.Status.Metadata.Uid != "" { - fmt.Printf("UID: %s\n", r.Status.Metadata.Uid) - } - if r.Status.Metadata.Namespace != "" { - fmt.Printf("Namespace: %s\n", r.Status.Metadata.Namespace) - } - fmt.Printf("Attempt: %v\n", r.Status.Metadata.Attempt) - } - fmt.Printf("Status: %s\n", r.Status.State) - ctm := time.Unix(0, r.Status.CreatedAt) - fmt.Printf("Created: %v\n", ctm) - if r.Status.Network != nil { - fmt.Printf("IP Address: %v\n", r.Status.Network.Ip) - } - if r.Status.Labels != nil { - fmt.Println("Labels:") - for _, k := range getSortedKeys(r.Status.Labels) { - fmt.Printf("\t%s -> %s\n", k, r.Status.Labels[k]) - } - } - if r.Status.Annotations != nil { - fmt.Println("Annotations:") - for _, k := range getSortedKeys(r.Status.Annotations) { - fmt.Printf("\t%s -> %s\n", k, r.Status.Annotations[k]) - } - } - return nil -} - -// ListPodSandboxes sends a ListPodSandboxRequest to the server, and parses -// the returned ListPodSandboxResponse. -func ListPodSandboxes(client pb.RuntimeServiceClient, opts listOptions) error { - filter := &pb.PodSandboxFilter{} - if opts.id != "" { - filter.Id = opts.id - } - if opts.state != "" { - st := &pb.PodSandboxStateValue{} - st.State = pb.PodSandboxState_SANDBOX_NOTREADY - switch opts.state { - case "ready": - st.State = pb.PodSandboxState_SANDBOX_READY - filter.State = st - case "notready": - st.State = pb.PodSandboxState_SANDBOX_NOTREADY - filter.State = st - default: - log.Fatalf("--state should be ready or notready") - } - } - if opts.labels != nil { - filter.LabelSelector = opts.labels - } - r, err := client.ListPodSandbox(context.Background(), &pb.ListPodSandboxRequest{ - Filter: filter, - }) - if err != nil { - return err - } - for _, pod := range r.Items { - if opts.quiet { - fmt.Println(pod.Id) - continue - } - fmt.Printf("ID: %s\n", pod.Id) - if pod.Metadata != nil { - if pod.Metadata.Name != "" { - fmt.Printf("Name: %s\n", pod.Metadata.Name) - } - if pod.Metadata.Uid != "" { - fmt.Printf("UID: %s\n", pod.Metadata.Uid) - } - if pod.Metadata.Namespace != "" { - fmt.Printf("Namespace: %s\n", pod.Metadata.Namespace) - } - fmt.Printf("Attempt: %v\n", pod.Metadata.Attempt) - } - fmt.Printf("Status: %s\n", pod.State) - ctm := time.Unix(0, pod.CreatedAt) - fmt.Printf("Created: %v\n", ctm) - if pod.Labels != nil { - fmt.Println("Labels:") - for _, k := range getSortedKeys(pod.Labels) { - fmt.Printf("\t%s -> %s\n", k, pod.Labels[k]) - } - } - if pod.Annotations != nil { - fmt.Println("Annotations:") - for _, k := range getSortedKeys(pod.Annotations) { - fmt.Printf("\t%s -> %s\n", k, pod.Annotations[k]) - } - } - fmt.Println() - } - return nil -} - -func getSortedKeys(m map[string]string) []string { - var keys []string - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - return keys -} diff --git a/cmd/crioctl/system.go b/cmd/crioctl/system.go deleted file mode 100644 index 7e04161c..00000000 --- a/cmd/crioctl/system.go +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/urfave/cli" - "golang.org/x/net/context" - pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" -) - -var runtimeVersionCommand = cli.Command{ - Name: "runtimeversion", - Usage: "get runtime version information", - Action: func(context *cli.Context) error { - // Set up a connection to the server. - conn, err := getClientConnection(context) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - client := pb.NewRuntimeServiceClient(conn) - - // Test RuntimeServiceClient.Version - version := "v1alpha1" - err = Version(client, version) - if err != nil { - return fmt.Errorf("Getting the runtime version failed: %v", err) - } - return nil - }, -} - -// Version sends a VersionRequest to the server, and parses the returned VersionResponse. -func Version(client pb.RuntimeServiceClient, version string) error { - r, err := client.Version(context.Background(), &pb.VersionRequest{Version: version}) - if err != nil { - return err - } - fmt.Printf("VersionResponse: Version: %s, RuntimeName: %s, RuntimeVersion: %s, RuntimeApiVersion: %s\n", r.Version, r.RuntimeName, r.RuntimeVersion, r.RuntimeApiVersion) - return nil -} diff --git a/cmd/kpod/README.md b/cmd/kpod/README.md deleted file mode 100644 index 7a79e489..00000000 --- a/cmd/kpod/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# kpod - Simple debugging tool for pods and images -kpod is a simple client only tool to help with debugging issues when daemons such as CRI runtime and the kubelet are not responding or -failing. A shared API layer could be created to share code between the daemon and kpod. kpod does not require any daemon running. kpod -utilizes the same underlying components that crio uses i.e. containers/image, container/storage, oci-runtime-tool/generate, runc or -any other OCI compatible runtime. kpod shares state with crio and so has the capability to debug pods/images created by crio. - -## Use cases -1. List pods. -2. Launch simple pods (that require no daemon support). -3. Exec commands in a container in a pod. -4. Launch additional containers in a pod. -5. List images. -6. Remove images not in use. -7. Pull images. -8. Check image size. -9. Report pod disk resource usage. diff --git a/cmd/kpod/common.go b/cmd/kpod/common.go deleted file mode 100644 index f77b3fd1..00000000 --- a/cmd/kpod/common.go +++ /dev/null @@ -1,135 +0,0 @@ -package main - -import ( - "os" - "reflect" - "regexp" - "strings" - - is "github.com/containers/image/storage" - "github.com/containers/storage" - "github.com/fatih/camelcase" - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/kubernetes-incubator/cri-o/libpod" - "github.com/kubernetes-incubator/cri-o/server" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - stores = make(map[storage.Store]struct{}) -) - -func getStore(c *libkpod.Config) (storage.Store, error) { - options := storage.DefaultStoreOptions - options.GraphRoot = c.Root - options.RunRoot = c.RunRoot - options.GraphDriverName = c.Storage - options.GraphDriverOptions = c.StorageOptions - - store, err := storage.GetStore(options) - if err != nil { - return nil, err - } - is.Transport.SetStore(store) - stores[store] = struct{}{} - return store, nil -} - -func getRuntime(c *cli.Context) (*libpod.Runtime, error) { - - config, err := getConfig(c) - if err != nil { - return nil, errors.Wrapf(err, "could not get config") - } - - options := storage.DefaultStoreOptions - options.GraphRoot = config.Root - options.RunRoot = config.RunRoot - options.GraphDriverName = config.Storage - options.GraphDriverOptions = config.StorageOptions - - return libpod.NewRuntime(libpod.WithStorageConfig(options)) -} - -func shutdownStores() { - for store := range stores { - if _, err := store.Shutdown(false); err != nil { - break - } - } -} - -func getConfig(c *cli.Context) (*libkpod.Config, error) { - config := libkpod.DefaultConfig() - var configFile string - if c.GlobalIsSet("config") { - configFile = c.GlobalString("config") - } else if _, err := os.Stat(server.CrioConfigPath); err == nil { - configFile = server.CrioConfigPath - } - // load and merge the configfile from the commandline or use - // the default crio config file - if configFile != "" { - err := config.UpdateFromFile(configFile) - if err != nil { - return config, err - } - } - if c.GlobalIsSet("root") { - config.Root = c.GlobalString("root") - } - if c.GlobalIsSet("runroot") { - config.RunRoot = c.GlobalString("runroot") - } - - if c.GlobalIsSet("storage-driver") { - config.Storage = c.GlobalString("storage-driver") - } - if c.GlobalIsSet("storage-opt") { - opts := c.GlobalStringSlice("storage-opt") - if len(opts) > 0 { - config.StorageOptions = opts - } - } - if c.GlobalIsSet("runtime") { - config.Runtime = c.GlobalString("runtime") - } - return config, nil -} - -func splitCamelCase(src string) string { - entries := camelcase.Split(src) - return strings.Join(entries, " ") -} - -// validateFlags searches for StringFlags or StringSlice flags that never had -// a value set. This commonly occurs when the CLI mistakenly takes the next -// option and uses it as a value. -func validateFlags(c *cli.Context, flags []cli.Flag) error { - for _, flag := range flags { - switch reflect.TypeOf(flag).String() { - case "cli.StringSliceFlag": - { - f := flag.(cli.StringSliceFlag) - name := strings.Split(f.Name, ",") - val := c.StringSlice(name[0]) - for _, v := range val { - if ok, _ := regexp.MatchString("^-.+", v); ok { - return errors.Errorf("option --%s requires a value", name[0]) - } - } - } - case "cli.StringFlag": - { - f := flag.(cli.StringFlag) - name := strings.Split(f.Name, ",") - val := c.String(name[0]) - if ok, _ := regexp.MatchString("^-.+", val); ok { - return errors.Errorf("option --%s requires a value", name[0]) - } - } - } - } - return nil -} diff --git a/cmd/kpod/common_test.go b/cmd/kpod/common_test.go deleted file mode 100644 index 663bc41e..00000000 --- a/cmd/kpod/common_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package main - -import ( - "os/exec" - "os/user" - "testing" - - "flag" - - "github.com/urfave/cli" -) - -func TestGetStore(t *testing.T) { - t.Skip("FIX THIS!") - - //cmd/kpod/common_test.go:27: cannot use c (type *cli.Context) as type *libkpod.Config in argument to getStore - - // Make sure the tests are running as root - skipTestIfNotRoot(t) - - set := flag.NewFlagSet("test", 0) - globalSet := flag.NewFlagSet("test", 0) - globalSet.String("root", "", "path to the root directory in which data, including images, is stored") - globalCtx := cli.NewContext(nil, globalSet, nil) - command := cli.Command{Name: "imagesCommand"} - c := cli.NewContext(nil, set, globalCtx) - c.Command = command - - //_, err := getStore(c) - //if err != nil { - //t.Error(err) - //} -} - -func skipTestIfNotRoot(t *testing.T) { - u, err := user.Current() - if err != nil { - t.Skip("Could not determine user. Running without root may cause tests to fail") - } else if u.Uid != "0" { - t.Skip("tests will fail unless run as root") - } -} - -func pullTestImage(name string) error { - cmd := exec.Command("crioctl", "image", "pull", name) - err := cmd.Run() - if err != nil { - return err - } - return nil -} diff --git a/cmd/kpod/diff.go b/cmd/kpod/diff.go deleted file mode 100644 index c28bdfce..00000000 --- a/cmd/kpod/diff.go +++ /dev/null @@ -1,128 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/containers/storage/pkg/archive" - "github.com/kubernetes-incubator/cri-o/cmd/kpod/formats" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -type diffJSONOutput struct { - Changed []string `json:"changed,omitempty"` - Added []string `json:"added,omitempty"` - Deleted []string `json:"deleted,omitempty"` -} - -type diffOutputParams struct { - Change archive.ChangeType - Path string -} - -type stdoutStruct struct { - output []diffOutputParams -} - -func (so stdoutStruct) Out() error { - for _, d := range so.output { - fmt.Printf("%s %s\n", d.Change, d.Path) - } - return nil -} - -var ( - diffFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "archive", - Usage: "Save the diff as a tar archive", - Hidden: true, - }, - cli.StringFlag{ - Name: "format", - Usage: "Change the output format.", - }, - } - diffDescription = fmt.Sprint(`Displays changes on a container or image's filesystem. The - container or image will be compared to its parent layer`) - - diffCommand = cli.Command{ - Name: "diff", - Usage: "Inspect changes on container's file systems", - Description: diffDescription, - Flags: diffFlags, - Action: diffCmd, - ArgsUsage: "ID-NAME", - } -) - -func formatJSON(output []diffOutputParams) (diffJSONOutput, error) { - jsonStruct := diffJSONOutput{} - for _, output := range output { - switch output.Change { - case archive.ChangeModify: - jsonStruct.Changed = append(jsonStruct.Changed, output.Path) - case archive.ChangeAdd: - jsonStruct.Added = append(jsonStruct.Added, output.Path) - case archive.ChangeDelete: - jsonStruct.Deleted = append(jsonStruct.Deleted, output.Path) - default: - return jsonStruct, errors.Errorf("output kind %q not recognized", output.Change.String()) - } - } - return jsonStruct, nil -} - -func diffCmd(c *cli.Context) error { - if err := validateFlags(c, diffFlags); err != nil { - return err - } - - if len(c.Args()) != 1 { - return errors.Errorf("container, image, or layer name must be specified: kpod diff [options [...]] ID-NAME") - } - - runtime, err := getRuntime(c) - if err != nil { - return errors.Wrapf(err, "could not get runtime") - } - defer runtime.Shutdown(false) - - to := c.Args().Get(0) - changes, err := runtime.GetDiff("", to) - if err != nil { - return errors.Wrapf(err, "could not get changes for %q", to) - } - - diffOutput := []diffOutputParams{} - outputFormat := c.String("format") - - for _, change := range changes { - - params := diffOutputParams{ - Change: change.Kind, - Path: change.Path, - } - diffOutput = append(diffOutput, params) - } - - var out formats.Writer - - if outputFormat != "" { - switch outputFormat { - case formats.JSONString: - data, err := formatJSON(diffOutput) - if err != nil { - return err - } - out = formats.JSONStruct{Output: data} - default: - return errors.New("only valid format for diff is 'json'") - } - } else { - out = stdoutStruct{output: diffOutput} - } - formats.Writer(out).Out() - - return nil -} diff --git a/cmd/kpod/docker/types.go b/cmd/kpod/docker/types.go deleted file mode 100644 index a7e45655..00000000 --- a/cmd/kpod/docker/types.go +++ /dev/null @@ -1,271 +0,0 @@ -package docker - -// -// Types extracted from Docker -// - -import ( - "time" - - "github.com/containers/image/pkg/strslice" - "github.com/opencontainers/go-digest" -) - -// TypeLayers github.com/docker/docker/image/rootfs.go -const TypeLayers = "layers" - -// V2S2MediaTypeManifest github.com/docker/distribution/manifest/schema2/manifest.go -const V2S2MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" - -// V2S2MediaTypeImageConfig github.com/docker/distribution/manifest/schema2/manifest.go -const V2S2MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json" - -// V2S2MediaTypeLayer github.com/docker/distribution/manifest/schema2/manifest.go -const V2S2MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" - -// V2S2MediaTypeUncompressedLayer github.com/docker/distribution/manifest/schema2/manifest.go -const V2S2MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar" - -// V2S2RootFS describes images root filesystem -// This is currently a placeholder that only supports layers. In the future -// this can be made into an interface that supports different implementations. -// github.com/docker/docker/image/rootfs.go -type V2S2RootFS struct { - Type string `json:"type"` - DiffIDs []digest.Digest `json:"diff_ids,omitempty"` -} - -// V2S2History stores build commands that were used to create an image -// github.com/docker/docker/image/image.go -type V2S2History struct { - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // CreatedBy keeps the Dockerfile command used while building the image - CreatedBy string `json:"created_by,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // EmptyLayer is set to true if this history item did not generate a - // layer. Otherwise, the history item is associated with the next - // layer in the RootFS section. - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// ID is the content-addressable ID of an image. -// github.com/docker/docker/image/image.go -type ID digest.Digest - -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -// github.com/docker/docker/api/types/container/config.go -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// PortSet is a collection of structs indexed by Port -// github.com/docker/go-connections/nat/nat.go -type PortSet map[Port]struct{} - -// Port is a string containing port number and protocol in the format "80/tcp" -// github.com/docker/go-connections/nat/nat.go -type Port string - -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -// github.com/docker/docker/api/types/container/config.go -type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts PortSet `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} - -// V1Compatibility - For non-top-level layers, create fake V1Compatibility -// strings that fit the format and don't collide with anything else, but -// don't result in runnable images on their own. -// github.com/docker/distribution/manifest/schema1/config_builder.go -type V1Compatibility struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig struct { - Cmd []string - } `json:"container_config,omitempty"` - Author string `json:"author,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` -} - -// V1Image stores the V1 image configuration. -// github.com/docker/docker/image/image.go -type V1Image struct { - // ID is a unique 64 character identifier of the image - ID string `json:"id,omitempty"` - // Parent is the ID of the parent image - Parent string `json:"parent,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Container is the id of the container used to commit - Container string `json:"container,omitempty"` - // ContainerConfig is the configuration of the container that is committed into the image - ContainerConfig Config `json:"container_config,omitempty"` - // DockerVersion specifies the version of Docker that was used to build the image - DockerVersion string `json:"docker_version,omitempty"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // Config is the configuration of the container received from the client - Config *Config `json:"config,omitempty"` - // Architecture is the hardware that the image is build and runs on - Architecture string `json:"architecture,omitempty"` - // OS is the operating system used to build and run the image - OS string `json:"os,omitempty"` - // Size is the total size of the image including all layers it is composed of - Size int64 `json:",omitempty"` -} - -// V2Image stores the image configuration -// github.com/docker/docker/image/image.go -type V2Image struct { - V1Image - Parent ID `json:"parent,omitempty"` - RootFS *V2S2RootFS `json:"rootfs,omitempty"` - History []V2S2History `json:"history,omitempty"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - - // rawJSON caches the immutable JSON associated with this image. - //rawJSON []byte - - // computedID is the ID computed from the hash of the image config. - // Not to be confused with the legacy V1 ID in V1Image. - //computedID ID -} - -// V2Versioned provides a struct with the manifest schemaVersion and mediaType. -// Incoming content with unknown schema version can be decoded against this -// struct to check the version. -// github.com/docker/distribution/manifest/versioned.go -type V2Versioned struct { - // SchemaVersion is the image manifest schema that this image follows - SchemaVersion int `json:"schemaVersion"` - - // MediaType is the media type of this schema. - MediaType string `json:"mediaType,omitempty"` -} - -// V2S1FSLayer is a container struct for BlobSums defined in an image manifest -// github.com/docker/distribution/manifest/schema1/manifest.go -type V2S1FSLayer struct { - // BlobSum is the tarsum of the referenced filesystem image layer - BlobSum digest.Digest `json:"blobSum"` -} - -// V2S1History stores unstructured v1 compatibility information -// github.com/docker/distribution/manifest/schema1/manifest.go -type V2S1History struct { - // V1Compatibility is the raw v1 compatibility information - V1Compatibility string `json:"v1Compatibility"` -} - -// V2S1Manifest provides the base accessible fields for working with V2 image -// format in the registry. -// github.com/docker/distribution/manifest/schema1/manifest.go -type V2S1Manifest struct { - V2Versioned - - // Name is the name of the image's repository - Name string `json:"name"` - - // Tag is the tag of the image specified by this manifest - Tag string `json:"tag"` - - // Architecture is the host architecture on which this image is intended to - // run - Architecture string `json:"architecture"` - - // FSLayers is a list of filesystem layer blobSums contained in this image - FSLayers []V2S1FSLayer `json:"fsLayers"` - - // History is a list of unstructured historical data for v1 compatibility - History []V2S1History `json:"history"` -} - -// V2S2Descriptor describes targeted content. Used in conjunction with a blob -// store, a descriptor can be used to fetch, store and target any kind of -// blob. The struct also describes the wire protocol format. Fields should -// only be added but never changed. -// github.com/docker/distribution/blobs.go -type V2S2Descriptor struct { - // MediaType describe the type of the content. All text based formats are - // encoded as utf-8. - MediaType string `json:"mediaType,omitempty"` - - // Size in bytes of content. - Size int64 `json:"size,omitempty"` - - // Digest uniquely identifies the content. A byte stream can be verified - // against against this digest. - Digest digest.Digest `json:"digest,omitempty"` - - // URLs contains the source URLs of this content. - URLs []string `json:"urls,omitempty"` - - // NOTE: Before adding a field here, please ensure that all - // other options have been exhausted. Much of the type relationships - // depend on the simplicity of this type. -} - -// V2S2Manifest defines a schema2 manifest. -// github.com/docker/distribution/manifest/schema2/manifest.go -type V2S2Manifest struct { - V2Versioned - - // Config references the image configuration as a blob. - Config V2S2Descriptor `json:"config"` - - // Layers lists descriptors for the layers referenced by the - // configuration. - Layers []V2S2Descriptor `json:"layers"` -} diff --git a/cmd/kpod/export.go b/cmd/kpod/export.go deleted file mode 100644 index 94f05ce1..00000000 --- a/cmd/kpod/export.go +++ /dev/null @@ -1,106 +0,0 @@ -package main - -import ( - "io" - "os" - - "fmt" - - "github.com/containers/storage" - "github.com/containers/storage/pkg/archive" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/urfave/cli" -) - -type exportOptions struct { - output string - container string -} - -var ( - exportFlags = []cli.Flag{ - cli.StringFlag{ - Name: "output, o", - Usage: "Write to a file, default is STDOUT", - Value: "/dev/stdout", - }, - } - exportDescription = "Exports container's filesystem contents as a tar archive" + - " and saves it on the local machine." - exportCommand = cli.Command{ - Name: "export", - Usage: "Export container's filesystem contents as a tar archive", - Description: exportDescription, - Flags: exportFlags, - Action: exportCmd, - ArgsUsage: "CONTAINER", - } -) - -// exportCmd saves a container to a tarball on disk -func exportCmd(c *cli.Context) error { - args := c.Args() - if len(args) == 0 { - return errors.Errorf("container id must be specified") - } - if len(args) > 1 { - return errors.Errorf("too many arguments given, need 1 at most.") - } - container := args[0] - if err := validateFlags(c, exportFlags); err != nil { - return err - } - - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "could not get config") - } - store, err := getStore(config) - if err != nil { - return err - } - - output := c.String("output") - if output == "/dev/stdout" { - file := os.Stdout - if logrus.IsTerminal(file) { - return errors.Errorf("refusing to export to terminal. Use -o flag or redirect") - } - } - - opts := exportOptions{ - output: output, - container: container, - } - - return exportContainer(store, opts) -} - -// exportContainer exports the contents of a container and saves it as -// a tarball on disk -func exportContainer(store storage.Store, opts exportOptions) error { - mountPoint, err := store.Mount(opts.container, "") - if err != nil { - return errors.Wrapf(err, "error finding container %q", opts.container) - } - defer func() { - if err := store.Unmount(opts.container); err != nil { - fmt.Printf("error unmounting container %q: %v\n", opts.container, err) - } - }() - - input, err := archive.Tar(mountPoint, archive.Uncompressed) - if err != nil { - return errors.Wrapf(err, "error reading container directory %q", opts.container) - } - - outFile, err := os.Create(opts.output) - if err != nil { - return errors.Wrapf(err, "error creating file %q", opts.output) - } - defer outFile.Close() - - _, err = io.Copy(outFile, input) - return err -} diff --git a/cmd/kpod/formats/formats.go b/cmd/kpod/formats/formats.go deleted file mode 100644 index 007f09c6..00000000 --- a/cmd/kpod/formats/formats.go +++ /dev/null @@ -1,132 +0,0 @@ -package formats - -import ( - "encoding/json" - "fmt" - "os" - "strings" - "text/tabwriter" - "text/template" - - "github.com/ghodss/yaml" - "github.com/pkg/errors" -) - -const ( - // JSONString const to save on duplicate variable names - JSONString = "json" - // IDString const to save on duplicates for Go templates - IDString = "{{.ID}}" -) - -// Writer interface for outputs -type Writer interface { - Out() error -} - -// JSONStructArray for JSON output -type JSONStructArray struct { - Output []interface{} -} - -// StdoutTemplateArray for Go template output -type StdoutTemplateArray struct { - Output []interface{} - Template string - Fields map[string]string -} - -// JSONStruct for JSON output -type JSONStruct struct { - Output interface{} -} - -// StdoutTemplate for Go template output -type StdoutTemplate struct { - Output interface{} - Template string - Fields map[string]string -} - -// YAMLStruct for YAML output -type YAMLStruct struct { - Output interface{} -} - -// Out method for JSON Arrays -func (j JSONStructArray) Out() error { - data, err := json.MarshalIndent(j.Output, "", " ") - if err != nil { - return err - } - fmt.Printf("%s\n", data) - return nil -} - -// Out method for Go templates -func (t StdoutTemplateArray) Out() error { - w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0) - if strings.HasPrefix(t.Template, "table") { - // replace any spaces with tabs in template so that tabwriter can align it - t.Template = strings.Replace(strings.TrimSpace(t.Template[5:]), " ", "\t", -1) - headerTmpl, err := template.New("header").Funcs(headerFunctions).Parse(t.Template) - if err != nil { - return errors.Wrapf(err, "Template parsing error") - } - err = headerTmpl.Execute(w, t.Fields) - if err != nil { - return err - } - fmt.Fprintln(w, "") - } - t.Template = strings.Replace(t.Template, " ", "\t", -1) - tmpl, err := template.New("image").Funcs(basicFunctions).Parse(t.Template) - if err != nil { - return errors.Wrapf(err, "Template parsing error") - } - for _, img := range t.Output { - basicTmpl := tmpl.Funcs(basicFunctions) - err = basicTmpl.Execute(w, img) - if err != nil { - return err - } - fmt.Fprintln(w, "") - } - return w.Flush() -} - -// Out method for JSON struct -func (j JSONStruct) Out() error { - data, err := json.MarshalIndent(j.Output, "", " ") - if err != nil { - return err - } - fmt.Printf("%s\n", data) - return nil -} - -//Out method for Go templates -func (t StdoutTemplate) Out() error { - tmpl, err := template.New("image").Parse(t.Template) - if err != nil { - return errors.Wrapf(err, "template parsing error") - } - err = tmpl.Execute(os.Stdout, t.Output) - if err != nil { - return err - } - fmt.Println() - return nil -} - -// Out method for YAML -func (y YAMLStruct) Out() error { - var buf []byte - var err error - buf, err = yaml.Marshal(y.Output) - if err != nil { - return err - } - fmt.Println(string(buf)) - return nil -} diff --git a/cmd/kpod/history.go b/cmd/kpod/history.go deleted file mode 100644 index dd0da38a..00000000 --- a/cmd/kpod/history.go +++ /dev/null @@ -1,243 +0,0 @@ -package main - -import ( - "reflect" - "strconv" - "strings" - "time" - - "github.com/containers/image/types" - units "github.com/docker/go-units" - "github.com/kubernetes-incubator/cri-o/cmd/kpod/formats" - "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -const ( - createdByTruncLength = 45 - idTruncLength = 13 -) - -// historyTemplateParams stores info about each layer -type historyTemplateParams struct { - ID string - Created string - CreatedBy string - Size string - Comment string -} - -// historyJSONParams is only used when the JSON format is specified, -// and is better for data processing from JSON. -// historyJSONParams will be populated by data from v1.History and types.BlobInfo, -// the members of the struct are the sama data types as their sources. -type historyJSONParams struct { - ID string `json:"id"` - Created *time.Time `json:"created"` - CreatedBy string `json:"createdBy"` - Size int64 `json:"size"` - Comment string `json:"comment"` -} - -// historyOptions stores cli flag values -type historyOptions struct { - human bool - noTrunc bool - quiet bool - format string -} - -var ( - historyFlags = []cli.Flag{ - cli.BoolTFlag{ - Name: "human, H", - Usage: "Display sizes and dates in human readable format", - }, - cli.BoolFlag{ - Name: "no-trunc, notruncate", - Usage: "Do not truncate the output", - }, - cli.BoolFlag{ - Name: "quiet, q", - Usage: "Display the numeric IDs only", - }, - cli.StringFlag{ - Name: "format", - Usage: "Change the output to JSON or a Go template", - }, - } - - historyDescription = "Displays the history of an image. The information can be printed out in an easy to read, " + - "or user specified format, and can be truncated." - historyCommand = cli.Command{ - Name: "history", - Usage: "Show history of a specified image", - Description: historyDescription, - Flags: historyFlags, - Action: historyCmd, - ArgsUsage: "", - } -) - -func historyCmd(c *cli.Context) error { - if err := validateFlags(c, historyFlags); err != nil { - return err - } - - runtime, err := getRuntime(c) - if err != nil { - return errors.Wrapf(err, "Could not get config") - } - defer runtime.Shutdown(false) - - format := genHistoryFormat(c.Bool("quiet")) - if c.IsSet("format") { - format = c.String("format") - } - - args := c.Args() - if len(args) == 0 { - return errors.Errorf("an image name must be specified") - } - if len(args) > 1 { - return errors.Errorf("Kpod history takes at most 1 argument") - } - imgName := args[0] - - opts := historyOptions{ - human: c.BoolT("human"), - noTrunc: c.Bool("no-trunc"), - quiet: c.Bool("quiet"), - format: format, - } - - history, layers, imageID, err := runtime.GetHistory(imgName) - if err != nil { - return errors.Wrapf(err, "error getting history of image %q", imgName) - } - - return generateHistoryOutput(history, layers, imageID, opts) -} - -func genHistoryFormat(quiet bool) (format string) { - if quiet { - return formats.IDString - } - return "table {{.ID}}\t{{.Created}}\t{{.CreatedBy}}\t{{.Size}}\t{{.Comment}}\t" -} - -// historyToGeneric makes an empty array of interfaces for output -func historyToGeneric(templParams []historyTemplateParams, JSONParams []historyJSONParams) (genericParams []interface{}) { - if len(templParams) > 0 { - for _, v := range templParams { - genericParams = append(genericParams, interface{}(v)) - } - return - } - for _, v := range JSONParams { - genericParams = append(genericParams, interface{}(v)) - } - return -} - -// generate the header based on the template provided -func (h *historyTemplateParams) headerMap() map[string]string { - v := reflect.Indirect(reflect.ValueOf(h)) - values := make(map[string]string) - for h := 0; h < v.NumField(); h++ { - key := v.Type().Field(h).Name - value := key - values[key] = strings.ToUpper(splitCamelCase(value)) - } - return values -} - -// getHistorytemplateOutput gets the modified history information to be printed in human readable format -func getHistoryTemplateOutput(history []v1.History, layers []types.BlobInfo, imageID string, opts historyOptions) (historyOutput []historyTemplateParams) { - var ( - outputSize string - createdTime string - createdBy string - count = 1 - ) - for i := len(history) - 1; i >= 0; i-- { - if i != len(history)-1 { - imageID = "" - } - if !opts.noTrunc && i == len(history)-1 { - imageID = imageID[:idTruncLength] - } - - var size int64 - if !history[i].EmptyLayer { - size = layers[len(layers)-count].Size - count++ - } - - if opts.human { - createdTime = units.HumanDuration(time.Since((*history[i].Created))) + " ago" - outputSize = units.HumanSize(float64(size)) - } else { - createdTime = (history[i].Created).Format(time.RFC3339) - outputSize = strconv.FormatInt(size, 10) - } - - createdBy = strings.Join(strings.Fields(history[i].CreatedBy), " ") - if !opts.noTrunc && len(createdBy) > createdByTruncLength { - createdBy = createdBy[:createdByTruncLength-3] + "..." - } - - params := historyTemplateParams{ - ID: imageID, - Created: createdTime, - CreatedBy: createdBy, - Size: outputSize, - Comment: history[i].Comment, - } - historyOutput = append(historyOutput, params) - } - return -} - -// getHistoryJSONOutput returns the history information in its raw form -func getHistoryJSONOutput(history []v1.History, layers []types.BlobInfo, imageID string) (historyOutput []historyJSONParams) { - count := 1 - for i := len(history) - 1; i >= 0; i-- { - var size int64 - if !history[i].EmptyLayer { - size = layers[len(layers)-count].Size - count++ - } - - params := historyJSONParams{ - ID: imageID, - Created: history[i].Created, - CreatedBy: history[i].CreatedBy, - Size: size, - Comment: history[i].Comment, - } - historyOutput = append(historyOutput, params) - } - return -} - -// generateHistoryOutput generates the history based on the format given -func generateHistoryOutput(history []v1.History, layers []types.BlobInfo, imageID string, opts historyOptions) error { - if len(history) == 0 { - return nil - } - - var out formats.Writer - - switch opts.format { - case formats.JSONString: - historyOutput := getHistoryJSONOutput(history, layers, imageID) - out = formats.JSONStructArray{Output: historyToGeneric([]historyTemplateParams{}, historyOutput)} - default: - historyOutput := getHistoryTemplateOutput(history, layers, imageID, opts) - out = formats.StdoutTemplateArray{Output: historyToGeneric(historyOutput, []historyJSONParams{}), Template: opts.format, Fields: historyOutput[0].headerMap()} - } - - return formats.Writer(out).Out() -} diff --git a/cmd/kpod/images.go b/cmd/kpod/images.go deleted file mode 100644 index d7824ba3..00000000 --- a/cmd/kpod/images.go +++ /dev/null @@ -1,330 +0,0 @@ -package main - -import ( - "fmt" - "reflect" - "strings" - "time" - - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/docker/go-units" - "github.com/kubernetes-incubator/cri-o/cmd/kpod/formats" - "github.com/kubernetes-incubator/cri-o/libpod" - "github.com/kubernetes-incubator/cri-o/libpod/common" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -type imagesTemplateParams struct { - ID string - Name string - Digest digest.Digest - CreatedAt string - Size string -} - -type imagesJSONParams struct { - ID string `json:"id"` - Name []string `json:"names"` - Digest digest.Digest `json:"digest"` - CreatedAt time.Time `json:"created"` - Size int64 `json:"size"` -} - -type imagesOptions struct { - quiet bool - noHeading bool - noTrunc bool - digests bool - format string -} - -var ( - imagesFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "quiet, q", - Usage: "display only image IDs", - }, - cli.BoolFlag{ - Name: "noheading, n", - Usage: "do not print column headings", - }, - cli.BoolFlag{ - Name: "no-trunc, notruncate", - Usage: "do not truncate output", - }, - cli.BoolFlag{ - Name: "digests", - Usage: "show digests", - }, - cli.StringFlag{ - Name: "format", - Usage: "Change the output format to JSON or a Go template", - }, - cli.StringFlag{ - Name: "filter, f", - Usage: "filter output based on conditions provided (default [])", - }, - } - - imagesDescription = "lists locally stored images." - imagesCommand = cli.Command{ - Name: "images", - Usage: "list images in local storage", - Description: imagesDescription, - Flags: imagesFlags, - Action: imagesCmd, - ArgsUsage: "", - } -) - -func imagesCmd(c *cli.Context) error { - if err := validateFlags(c, imagesFlags); err != nil { - return err - } - - runtime, err := getRuntime(c) - if err != nil { - return errors.Wrapf(err, "Could not get runtime") - } - defer runtime.Shutdown(false) - - var format string - if c.IsSet("format") { - format = c.String("format") - } else { - format = genImagesFormat(c.Bool("quiet"), c.Bool("noheading"), c.Bool("digests")) - } - - opts := imagesOptions{ - quiet: c.Bool("quiet"), - noHeading: c.Bool("noheading"), - noTrunc: c.Bool("no-trunc"), - digests: c.Bool("digests"), - format: format, - } - - var imageInput string - if len(c.Args()) == 1 { - imageInput = c.Args().Get(0) - } - if len(c.Args()) > 1 { - return errors.New("'kpod images' requires at most 1 argument") - } - - params, err := runtime.ParseImageFilter(imageInput, c.String("filter")) - if err != nil { - return errors.Wrapf(err, "error parsing filter") - } - - // generate the different filters - labelFilter := generateImagesFilter(params, "label") - beforeImageFilter := generateImagesFilter(params, "before-image") - sinceImageFilter := generateImagesFilter(params, "since-image") - danglingFilter := generateImagesFilter(params, "dangling") - referenceFilter := generateImagesFilter(params, "reference") - imageInputFilter := generateImagesFilter(params, "image-input") - - images, err := runtime.GetImages(params, labelFilter, beforeImageFilter, sinceImageFilter, danglingFilter, referenceFilter, imageInputFilter) - if err != nil { - return errors.Wrapf(err, "could not get list of images matching filter") - } - - return generateImagesOutput(runtime, images, opts) -} - -func genImagesFormat(quiet, noHeading, digests bool) (format string) { - if quiet { - return formats.IDString - } - format = "table {{.ID}}\t{{.Name}}\t" - if noHeading { - format = "{{.ID}}\t{{.Name}}\t" - } - if digests { - format += "{{.Digest}}\t" - } - format += "{{.CreatedAt}}\t{{.Size}}\t" - return -} - -// imagesToGeneric creates an empty array of interfaces for output -func imagesToGeneric(templParams []imagesTemplateParams, JSONParams []imagesJSONParams) (genericParams []interface{}) { - if len(templParams) > 0 { - for _, v := range templParams { - genericParams = append(genericParams, interface{}(v)) - } - return - } - for _, v := range JSONParams { - genericParams = append(genericParams, interface{}(v)) - } - return -} - -// generate the header based on the template provided -func (i *imagesTemplateParams) headerMap() map[string]string { - v := reflect.Indirect(reflect.ValueOf(i)) - values := make(map[string]string) - - for i := 0; i < v.NumField(); i++ { - key := v.Type().Field(i).Name - value := key - if value == "ID" || value == "Name" { - value = "Image" + value - } - values[key] = strings.ToUpper(splitCamelCase(value)) - } - return values -} - -// getImagesTemplateOutput returns the images information to be printed in human readable format -func getImagesTemplateOutput(runtime *libpod.Runtime, images []*storage.Image, opts imagesOptions) (imagesOutput []imagesTemplateParams) { - var ( - lastID string - ) - for _, img := range images { - if opts.quiet && lastID == img.ID { - continue // quiet should not show the same ID multiple times - } - createdTime := img.Created - - imageID := img.ID - if !opts.noTrunc { - imageID = imageID[:idTruncLength] - } - - imageName := "" - if len(img.Names) > 0 { - imageName = img.Names[0] - } - - info, imageDigest, size, _ := runtime.InfoAndDigestAndSize(*img) - if info != nil { - createdTime = info.Created - } - - params := imagesTemplateParams{ - ID: imageID, - Name: imageName, - Digest: imageDigest, - CreatedAt: units.HumanDuration(time.Since((createdTime))) + " ago", - Size: units.HumanSize(float64(size)), - } - imagesOutput = append(imagesOutput, params) - } - return -} - -// getImagesJSONOutput returns the images information in its raw form -func getImagesJSONOutput(runtime *libpod.Runtime, images []*storage.Image) (imagesOutput []imagesJSONParams) { - for _, img := range images { - createdTime := img.Created - - info, imageDigest, size, _ := runtime.InfoAndDigestAndSize(*img) - if info != nil { - createdTime = info.Created - } - - params := imagesJSONParams{ - ID: img.ID, - Name: img.Names, - Digest: imageDigest, - CreatedAt: createdTime, - Size: size, - } - imagesOutput = append(imagesOutput, params) - } - return -} - -// generateImagesOutput generates the images based on the format provided -func generateImagesOutput(runtime *libpod.Runtime, images []*storage.Image, opts imagesOptions) error { - if len(images) == 0 { - return nil - } - - var out formats.Writer - - switch opts.format { - case formats.JSONString: - imagesOutput := getImagesJSONOutput(runtime, images) - out = formats.JSONStructArray{Output: imagesToGeneric([]imagesTemplateParams{}, imagesOutput)} - default: - imagesOutput := getImagesTemplateOutput(runtime, images, opts) - out = formats.StdoutTemplateArray{Output: imagesToGeneric(imagesOutput, []imagesJSONParams{}), Template: opts.format, Fields: imagesOutput[0].headerMap()} - - } - - return formats.Writer(out).Out() -} - -// generateImagesFilter returns an ImageFilter based on filterType -// to add more filters, define a new case and write what the ImageFilter function should do -func generateImagesFilter(params *libpod.ImageFilterParams, filterType string) libpod.ImageFilter { - switch filterType { - case "label": - return func(image *storage.Image, info *types.ImageInspectInfo) bool { - if params == nil || params.Label == "" { - return true - } - - pair := strings.SplitN(params.Label, "=", 2) - if val, ok := info.Labels[pair[0]]; ok { - if len(pair) == 2 && val == pair[1] { - return true - } - if len(pair) == 1 { - return true - } - } - return false - } - case "before-image": - return func(image *storage.Image, info *types.ImageInspectInfo) bool { - if params == nil || params.BeforeImage.IsZero() { - return true - } - return info.Created.Before(params.BeforeImage) - } - case "since-image": - return func(image *storage.Image, info *types.ImageInspectInfo) bool { - if params == nil || params.SinceImage.IsZero() { - return true - } - return info.Created.After(params.SinceImage) - } - case "dangling": - return func(image *storage.Image, info *types.ImageInspectInfo) bool { - if params == nil || params.Dangling == "" { - return true - } - if common.IsFalse(params.Dangling) && params.ImageName != "" { - return true - } - if common.IsTrue(params.Dangling) && params.ImageName == "" { - return true - } - return false - } - case "reference": - return func(image *storage.Image, info *types.ImageInspectInfo) bool { - if params == nil || params.ReferencePattern == "" { - return true - } - return libpod.MatchesReference(params.ImageName, params.ReferencePattern) - } - case "image-input": - return func(image *storage.Image, info *types.ImageInspectInfo) bool { - if params == nil || params.ImageInput == "" { - return true - } - return libpod.MatchesReference(params.ImageName, params.ImageInput) - } - default: - fmt.Println("invalid filter type", filterType) - return nil - } -} diff --git a/cmd/kpod/info.go b/cmd/kpod/info.go deleted file mode 100644 index 22ca74c7..00000000 --- a/cmd/kpod/info.go +++ /dev/null @@ -1,200 +0,0 @@ -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "runtime" - - "github.com/docker/docker/pkg/system" - "github.com/kubernetes-incubator/cri-o/cmd/kpod/formats" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - infoDescription = "display system information" - infoCommand = cli.Command{ - Name: "info", - Usage: infoDescription, - Description: `Information display here pertain to the host, current storage stats, and build of kpod. Useful for the user and when reporting issues.`, - Flags: infoFlags, - Action: infoCmd, - ArgsUsage: "", - } - infoFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "debug, D", - Usage: "display additional debug information", - }, - cli.StringFlag{ - Name: "format", - Usage: "Change the output format to JSON or a Go template", - }, - } -) - -func infoCmd(c *cli.Context) error { - if err := validateFlags(c, infoFlags); err != nil { - return err - } - info := map[string]interface{}{} - - infoGivers := []infoGiverFunc{ - storeInfo, - hostInfo, - } - - if c.Bool("debug") { - infoGivers = append(infoGivers, debugInfo) - } - - for _, giver := range infoGivers { - thisName, thisInfo, err := giver(c) - if err != nil { - info[thisName] = infoErr(err) - continue - } - info[thisName] = thisInfo - } - - var out formats.Writer - infoOutputFormat := c.String("format") - switch infoOutputFormat { - case formats.JSONString: - out = formats.JSONStruct{Output: info} - case "": - out = formats.YAMLStruct{Output: info} - default: - out = formats.StdoutTemplate{Output: info, Template: infoOutputFormat} - } - - formats.Writer(out).Out() - - return nil -} - -func infoErr(err error) map[string]interface{} { - return map[string]interface{}{ - "error": err.Error(), - } -} - -type infoGiverFunc func(c *cli.Context) (name string, info map[string]interface{}, err error) - -// top-level "debug" info -func debugInfo(c *cli.Context) (string, map[string]interface{}, error) { - info := map[string]interface{}{} - info["compiler"] = runtime.Compiler - info["go version"] = runtime.Version() - info["kpod version"] = c.App.Version - info["git commit"] = gitCommit - return "debug", info, nil -} - -// top-level "host" info -func hostInfo(c *cli.Context) (string, map[string]interface{}, error) { - // lets say OS, arch, number of cpus, amount of memory, maybe os distribution/version, hostname, kernel version, uptime - info := map[string]interface{}{} - info["os"] = runtime.GOOS - info["arch"] = runtime.GOARCH - info["cpus"] = runtime.NumCPU() - mi, err := system.ReadMemInfo() - if err != nil { - info["meminfo"] = infoErr(err) - } else { - // TODO this might be a place for github.com/dustin/go-humanize - info["MemTotal"] = mi.MemTotal - info["MemFree"] = mi.MemFree - info["SwapTotal"] = mi.SwapTotal - info["SwapFree"] = mi.SwapFree - } - if kv, err := readKernelVersion(); err != nil { - info["kernel"] = infoErr(err) - } else { - info["kernel"] = kv - } - - if up, err := readUptime(); err != nil { - info["uptime"] = infoErr(err) - } else { - info["uptime"] = up - } - if host, err := os.Hostname(); err != nil { - info["hostname"] = infoErr(err) - } else { - info["hostname"] = host - } - return "host", info, nil -} - -// top-level "store" info -func storeInfo(c *cli.Context) (string, map[string]interface{}, error) { - storeStr := "store" - config, err := getConfig(c) - if err != nil { - return storeStr, nil, errors.Wrapf(err, "Could not get config") - } - store, err := getStore(config) - if err != nil { - return storeStr, nil, err - } - - // lets say storage driver in use, number of images, number of containers - info := map[string]interface{}{} - info["GraphRoot"] = store.GraphRoot() - info["RunRoot"] = store.RunRoot() - info["GraphDriverName"] = store.GraphDriverName() - info["GraphOptions"] = store.GraphOptions() - statusPairs, err := store.Status() - if err != nil { - return storeStr, nil, err - } - status := map[string]string{} - for _, pair := range statusPairs { - status[pair[0]] = pair[1] - } - info["GraphStatus"] = status - images, err := store.Images() - if err != nil { - info["ImageStore"] = infoErr(err) - } else { - info["ImageStore"] = map[string]interface{}{ - "number": len(images), - } - } - containers, err := store.Containers() - if err != nil { - info["ContainerStore"] = infoErr(err) - } else { - info["ContainerStore"] = map[string]interface{}{ - "number": len(containers), - } - } - return storeStr, info, nil -} - -func readKernelVersion() (string, error) { - buf, err := ioutil.ReadFile("/proc/version") - if err != nil { - return "", err - } - f := bytes.Fields(buf) - if len(f) < 2 { - return string(bytes.TrimSpace(buf)), nil - } - return string(f[2]), nil -} - -func readUptime() (string, error) { - buf, err := ioutil.ReadFile("/proc/uptime") - if err != nil { - return "", err - } - f := bytes.Fields(buf) - if len(f) < 1 { - return "", fmt.Errorf("invalid uptime") - } - return string(f[0]), nil -} diff --git a/cmd/kpod/inspect.go b/cmd/kpod/inspect.go deleted file mode 100644 index 45e9d7e1..00000000 --- a/cmd/kpod/inspect.go +++ /dev/null @@ -1,120 +0,0 @@ -package main - -import ( - "github.com/kubernetes-incubator/cri-o/cmd/kpod/formats" - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/kubernetes-incubator/cri-o/libpod/images" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -const ( - inspectTypeContainer = "container" - inspectTypeImage = "image" - inspectAll = "all" -) - -var ( - inspectFlags = []cli.Flag{ - cli.StringFlag{ - Name: "type, t", - Value: inspectAll, - Usage: "Return JSON for specified type, (e.g image, container or task)", - }, - cli.StringFlag{ - Name: "format, f", - Usage: "Change the output format to a Go template", - }, - cli.BoolFlag{ - Name: "size", - Usage: "Display total file size if the type is container", - }, - } - inspectDescription = "This displays the low-level information on containers and images identified by name or ID. By default, this will render all results in a JSON array. If the container and image have the same name, this will return container JSON for unspecified type." - inspectCommand = cli.Command{ - Name: "inspect", - Usage: "Displays the configuration of a container or image", - Description: inspectDescription, - Flags: inspectFlags, - Action: inspectCmd, - ArgsUsage: "CONTAINER-OR-IMAGE", - } -) - -func inspectCmd(c *cli.Context) error { - args := c.Args() - if len(args) == 0 { - return errors.Errorf("container or image name must be specified: kpod inspect [options [...]] name") - } - if len(args) > 1 { - return errors.Errorf("too many arguments specified") - } - if err := validateFlags(c, inspectFlags); err != nil { - return err - } - - itemType := c.String("type") - size := c.Bool("size") - - switch itemType { - case inspectTypeContainer: - case inspectTypeImage: - case inspectAll: - default: - return errors.Errorf("the only recognized types are %q, %q, and %q", inspectTypeContainer, inspectTypeImage, inspectAll) - } - - name := args[0] - - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "Could not get config") - } - server, err := libkpod.New(config) - if err != nil { - return errors.Wrapf(err, "could not get container server") - } - defer server.Shutdown() - if err = server.Update(); err != nil { - return errors.Wrapf(err, "could not update list of containers") - } - - outputFormat := c.String("format") - var data interface{} - switch itemType { - case inspectTypeContainer: - data, err = server.GetContainerData(name, size) - if err != nil { - return errors.Wrapf(err, "error parsing container data") - } - case inspectTypeImage: - data, err = images.GetData(server.Store(), name) - if err != nil { - return errors.Wrapf(err, "error parsing image data") - } - case inspectAll: - ctrData, err := server.GetContainerData(name, size) - if err != nil { - imgData, err := images.GetData(server.Store(), name) - if err != nil { - return errors.Wrapf(err, "error parsing container or image data") - } - data = imgData - - } else { - data = ctrData - } - } - - var out formats.Writer - if outputFormat != "" && outputFormat != formats.JSONString { - //template - out = formats.StdoutTemplate{Output: data, Template: outputFormat} - } else { - // default is json output - out = formats.JSONStruct{Output: data} - } - - formats.Writer(out).Out() - return nil -} diff --git a/cmd/kpod/kill.go b/cmd/kpod/kill.go deleted file mode 100644 index 8a550003..00000000 --- a/cmd/kpod/kill.go +++ /dev/null @@ -1,74 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/docker/docker/pkg/signal" - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - killFlags = []cli.Flag{ - cli.StringFlag{ - Name: "signal, s", - Usage: "Signal to send to the container", - Value: "KILL", - }, - } - killDescription = "The main process inside each container specified will be sent SIGKILL, or any signal specified with option --signal." - killCommand = cli.Command{ - Name: "kill", - Usage: "Kill one or more running containers with a specific signal", - Description: killDescription, - Flags: killFlags, - Action: killCmd, - ArgsUsage: "[CONTAINER_NAME_OR_ID]", - } -) - -// killCmd kills one or more containers with a signal -func killCmd(c *cli.Context) error { - args := c.Args() - if len(args) == 0 { - return errors.Errorf("specify one or more containers to kill") - } - if err := validateFlags(c, killFlags); err != nil { - return err - } - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "could not get config") - } - server, err := libkpod.New(config) - if err != nil { - return errors.Wrapf(err, "could not get container server") - } - killSignal := c.String("signal") - // Check if the signalString provided by the user is valid - // Invalid signals will return err - sysSignal, err := signal.ParseSignal(killSignal) - if err != nil { - return err - } - defer server.Shutdown() - err = server.Update() - if err != nil { - return errors.Wrapf(err, "could not update list of containers") - } - var lastError error - for _, container := range c.Args() { - id, err := server.ContainerKill(container, sysSignal) - if err != nil { - if lastError != nil { - fmt.Fprintln(os.Stderr, lastError) - } - lastError = errors.Wrapf(err, "unable to kill %v", container) - } else { - fmt.Println(id) - } - } - return lastError -} diff --git a/cmd/kpod/load.go b/cmd/kpod/load.go deleted file mode 100644 index e3920805..00000000 --- a/cmd/kpod/load.go +++ /dev/null @@ -1,107 +0,0 @@ -package main - -import ( - "io" - "io/ioutil" - "os" - - "github.com/kubernetes-incubator/cri-o/libpod" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - loadFlags = []cli.Flag{ - cli.StringFlag{ - Name: "input, i", - Usage: "Read from archive file, default is STDIN", - Value: "/dev/stdin", - }, - cli.BoolFlag{ - Name: "quiet, q", - Usage: "Suppress the output", - }, - } - loadDescription = "Loads the image from docker-archive stored on the local machine." - loadCommand = cli.Command{ - Name: "load", - Usage: "load an image from docker archive", - Description: loadDescription, - Flags: loadFlags, - Action: loadCmd, - ArgsUsage: "", - } -) - -// loadCmd gets the image/file to be loaded from the command line -// and calls loadImage to load the image to containers-storage -func loadCmd(c *cli.Context) error { - - args := c.Args() - var image string - if len(args) == 1 { - image = args[0] - } - if len(args) > 1 { - return errors.New("too many arguments. Requires exactly 1") - } - if err := validateFlags(c, loadFlags); err != nil { - return err - } - - runtime, err := getRuntime(c) - if err != nil { - return errors.Wrapf(err, "could not get runtime") - } - defer runtime.Shutdown(false) - - input := c.String("input") - - if input == "/dev/stdin" { - fi, err := os.Stdin.Stat() - if err != nil { - return err - } - // checking if loading from pipe - if !fi.Mode().IsRegular() { - outFile, err := ioutil.TempFile("/var/tmp", "kpod") - if err != nil { - return errors.Errorf("error creating file %v", err) - } - defer outFile.Close() - defer os.Remove(outFile.Name()) - - inFile, err := os.OpenFile(input, 0, 0666) - if err != nil { - return errors.Errorf("error reading file %v", err) - } - defer inFile.Close() - - _, err = io.Copy(outFile, inFile) - if err != nil { - return errors.Errorf("error copying file %v", err) - } - - input = outFile.Name() - } - } - - var output io.Writer - if !c.Bool("quiet") { - output = os.Stdout - } - - src := libpod.DockerArchive + ":" + input - if err := runtime.PullImage(src, false, "", output); err != nil { - src = libpod.OCIArchive + ":" + input - // generate full src name with specified image:tag - if image != "" { - src = src + ":" + image - } - if err := runtime.PullImage(src, false, "", output); err != nil { - return errors.Wrapf(err, "error pulling %q", src) - } - } - - return nil -} diff --git a/cmd/kpod/login.go b/cmd/kpod/login.go deleted file mode 100644 index 17880f7a..00000000 --- a/cmd/kpod/login.go +++ /dev/null @@ -1,110 +0,0 @@ -package main - -import ( - "bufio" - "context" - "fmt" - "os" - "strings" - - "github.com/containers/image/docker" - "github.com/containers/image/pkg/docker/config" - "github.com/kubernetes-incubator/cri-o/libpod/common" - "github.com/pkg/errors" - "github.com/urfave/cli" - "golang.org/x/crypto/ssh/terminal" -) - -var ( - loginFlags = []cli.Flag{ - cli.StringFlag{ - Name: "password, p", - Usage: "Password for registry", - }, - cli.StringFlag{ - Name: "username, u", - Usage: "Username for registry", - }, - cli.StringFlag{ - Name: "authfile", - Usage: "Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json", - }, - } - loginDescription = "Login to a container registry on a specified server." - loginCommand = cli.Command{ - Name: "login", - Usage: "login to a container registry", - Description: loginDescription, - Flags: loginFlags, - Action: loginCmd, - ArgsUsage: "REGISTRY", - } -) - -// loginCmd uses the authentication package to store a user's authenticated credentials -// in an auth.json file for future use -func loginCmd(c *cli.Context) error { - args := c.Args() - if len(args) > 1 { - return errors.Errorf("too many arguments, login takes only 1 argument") - } - if len(args) == 0 { - return errors.Errorf("registry must be given") - } - var server string - if len(args) == 1 { - server = args[0] - } - - sc := common.GetSystemContext("", c.String("authfile")) - - // username of user logged in to server (if one exists) - userFromAuthFile := config.GetUserLoggedIn(sc, server) - username, password, err := getUserAndPass(c.String("username"), c.String("password"), userFromAuthFile) - if err != nil { - return errors.Wrapf(err, "error getting username and password") - } - - if err = docker.CheckAuth(context.TODO(), sc, username, password, server); err == nil { - if err := config.SetAuthentication(sc, server, username, password); err != nil { - return err - } - } - switch err { - case nil: - fmt.Println("Login Succeeded!") - return nil - case docker.ErrUnauthorizedForCredentials: - return errors.Errorf("error logging into %q: invalid username/password\n", server) - default: - return errors.Wrapf(err, "error authenticating creds for %q", server) - } -} - -// getUserAndPass gets the username and password from STDIN if not given -// using the -u and -p flags -func getUserAndPass(username, password, userFromAuthFile string) (string, string, error) { - var err error - reader := bufio.NewReader(os.Stdin) - if username == "" { - if userFromAuthFile != "" { - fmt.Printf("Username (%s): ", userFromAuthFile) - } else { - fmt.Print("Username: ") - } - username, err = reader.ReadString('\n') - if err != nil { - return "", "", errors.Wrapf(err, "error reading username") - } - } - if password == "" { - fmt.Print("Password: ") - pass, err := terminal.ReadPassword(0) - if err != nil { - return "", "", errors.Wrapf(err, "error reading password") - } - password = string(pass) - fmt.Println() - } - return strings.TrimSpace(username), password, err -} diff --git a/cmd/kpod/logout.go b/cmd/kpod/logout.go deleted file mode 100644 index 9438b81a..00000000 --- a/cmd/kpod/logout.go +++ /dev/null @@ -1,66 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/containers/image/pkg/docker/config" - "github.com/kubernetes-incubator/cri-o/libpod/common" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - logoutFlags = []cli.Flag{ - cli.StringFlag{ - Name: "authfile", - Usage: "Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json", - }, - cli.BoolFlag{ - Name: "all, a", - Usage: "Remove the cached credentials for all registries in the auth file", - }, - } - logoutDescription = "Remove the cached username and password for the registry." - logoutCommand = cli.Command{ - Name: "logout", - Usage: "logout of a container registry", - Description: logoutDescription, - Flags: logoutFlags, - Action: logoutCmd, - ArgsUsage: "REGISTRY", - } -) - -// logoutCmd uses the authentication package to remove the authenticated of a registry -// stored in the auth.json file -func logoutCmd(c *cli.Context) error { - args := c.Args() - if len(args) > 1 { - return errors.Errorf("too many arguments, logout takes only 1 argument") - } - var server string - if len(args) == 1 { - server = args[0] - } - - sc := common.GetSystemContext("", c.String("authfile")) - - if c.Bool("all") { - if err := config.RemoveAllAuthentication(sc); err != nil { - return err - } - fmt.Println("Remove login credentials for all registries") - return nil - } - - err := config.RemoveAuthentication(sc, server) - switch err { - case nil: - fmt.Printf("Remove login credentials for %s\n", server) - return nil - case config.ErrNotLoggedIn: - return errors.Errorf("Not logged into %s\n", server) - default: - return errors.Wrapf(err, "error logging out of %q", server) - } -} diff --git a/cmd/kpod/logs.go b/cmd/kpod/logs.go deleted file mode 100644 index 60be4792..00000000 --- a/cmd/kpod/logs.go +++ /dev/null @@ -1,92 +0,0 @@ -package main - -import ( - "fmt" - "time" - - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - logsFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "details", - Usage: "Show extra details provided to the logs", - Hidden: true, - }, - cli.BoolFlag{ - Name: "follow, f", - Usage: "Follow log output. The default is false", - }, - cli.StringFlag{ - Name: "since", - Usage: "Show logs since TIMESTAMP", - }, - cli.Uint64Flag{ - Name: "tail", - Usage: "Output the specified number of LINES at the end of the logs. Defaults to 0, which prints all lines", - }, - } - logsDescription = "The kpod logs command batch-retrieves whatever logs are present for a container at the time of execution. This does not guarantee execution" + - "order when combined with kpod run (i.e. your run may not have generated any logs at the time you execute kpod logs" - logsCommand = cli.Command{ - Name: "logs", - Usage: "Fetch the logs of a container", - Description: logsDescription, - Flags: logsFlags, - Action: logsCmd, - ArgsUsage: "CONTAINER", - } -) - -func logsCmd(c *cli.Context) error { - args := c.Args() - if len(args) != 1 { - return errors.Errorf("'kpod logs' requires exactly one container name/ID") - } - if err := validateFlags(c, logsFlags); err != nil { - return err - } - container := c.Args().First() - var opts libkpod.LogOptions - opts.Details = c.Bool("details") - opts.Follow = c.Bool("follow") - opts.SinceTime = time.Time{} - if c.IsSet("since") { - // parse time, error out if something is wrong - since, err := time.Parse("2006-01-02T15:04:05.999999999-07:00", c.String("since")) - if err != nil { - return errors.Wrapf(err, "could not parse time: %q", c.String("since")) - } - opts.SinceTime = since - } - opts.Tail = c.Uint64("tail") - - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "could not get config") - } - server, err := libkpod.New(config) - if err != nil { - return errors.Wrapf(err, "could not create container server") - } - defer server.Shutdown() - err = server.Update() - if err != nil { - return errors.Wrapf(err, "could not update list of containers") - } - logs := make(chan string) - go func() { - err = server.GetLogs(container, logs, opts) - }() - printLogs(logs) - return err -} - -func printLogs(logs chan string) { - for line := range logs { - fmt.Println(line) - } -} diff --git a/cmd/kpod/main.go b/cmd/kpod/main.go deleted file mode 100644 index 7745fbf3..00000000 --- a/cmd/kpod/main.go +++ /dev/null @@ -1,129 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/containers/storage/pkg/reexec" - "github.com/sirupsen/logrus" - "github.com/urfave/cli" -) - -// This is populated by the Makefile from the VERSION file -// in the repository -var kpodVersion = "" - -func main() { - debug := false - - if reexec.Init() { - return - } - - app := cli.NewApp() - app.Name = "kpod" - app.Usage = "manage pods and images" - - var v string - if kpodVersion != "" { - v = kpodVersion - } - app.Version = v - - app.Commands = []cli.Command{ - diffCommand, - exportCommand, - historyCommand, - imagesCommand, - infoCommand, - inspectCommand, - killCommand, - loadCommand, - loginCommand, - logoutCommand, - logsCommand, - mountCommand, - pauseCommand, - psCommand, - pullCommand, - pushCommand, - renameCommand, - rmCommand, - rmiCommand, - saveCommand, - statsCommand, - stopCommand, - tagCommand, - umountCommand, - unpauseCommand, - versionCommand, - waitCommand, - } - app.Before = func(c *cli.Context) error { - logLevel := c.GlobalString("log-level") - if logLevel != "" { - level, err := logrus.ParseLevel(logLevel) - if err != nil { - return err - } - - logrus.SetLevel(level) - } - - if logLevel == "debug" { - debug = true - - } - - return nil - } - app.After = func(*cli.Context) error { - // called by Run() when the command handler succeeds - shutdownStores() - return nil - } - cli.OsExiter = func(code int) { - // called by Run() when the command fails, bypassing After() - shutdownStores() - os.Exit(code) - } - app.Flags = []cli.Flag{ - cli.StringFlag{ - Name: "config, c", - Usage: "path of a config file detailing container server configuration options", - }, - cli.StringFlag{ - Name: "log-level", - Usage: "log messages above specified level: debug, info, warn, error (default), fatal or panic", - Value: "error", - }, - cli.StringFlag{ - Name: "root", - Usage: "path to the root directory in which data, including images, is stored", - }, - cli.StringFlag{ - Name: "runroot", - Usage: "path to the 'run directory' where all state information is stored", - }, - cli.StringFlag{ - Name: "runtime", - Usage: "path to the OCI-compatible binary used to run containers, default is /usr/bin/runc", - }, - cli.StringFlag{ - Name: "storage-driver, s", - Usage: "select which storage driver is used to manage storage of images and containers (default is overlay)", - }, - cli.StringSliceFlag{ - Name: "storage-opt", - Usage: "used to pass an option to the storage driver", - }, - } - if err := app.Run(os.Args); err != nil { - if debug { - logrus.Errorf(err.Error()) - } else { - fmt.Fprintln(os.Stderr, err.Error()) - } - cli.OsExiter(1) - } -} diff --git a/cmd/kpod/mount.go b/cmd/kpod/mount.go deleted file mode 100644 index a711bede..00000000 --- a/cmd/kpod/mount.go +++ /dev/null @@ -1,121 +0,0 @@ -package main - -import ( - js "encoding/json" - "fmt" - - of "github.com/kubernetes-incubator/cri-o/cmd/kpod/formats" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - mountDescription = ` - kpod mount - Lists all mounted containers mount points - - kpod mount CONTAINER-NAME-OR-ID - Mounts the specified container and outputs the mountpoint -` - - mountFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "notruncate", - Usage: "do not truncate output", - }, - cli.StringFlag{ - Name: "label", - Usage: "SELinux label for the mount point", - }, - cli.StringFlag{ - Name: "format", - Usage: "Change the output format to Go template", - }, - } - mountCommand = cli.Command{ - Name: "mount", - Usage: "Mount a working container's root filesystem", - Description: mountDescription, - Action: mountCmd, - ArgsUsage: "[CONTAINER-NAME-OR-ID]", - Flags: mountFlags, - } -) - -// MountOutputParams stores info about each layer -type jsonMountPoint struct { - ID string `json:"id"` - Names []string `json:"names"` - MountPoint string `json:"mountpoint"` -} - -func mountCmd(c *cli.Context) error { - formats := map[string]bool{ - "": true, - of.JSONString: true, - } - - args := c.Args() - json := c.String("format") == of.JSONString - if !formats[c.String("format")] { - return errors.Errorf("%q is not a supported format", c.String("format")) - } - - if len(args) > 1 { - return errors.Errorf("too many arguments specified") - } - if err := validateFlags(c, mountFlags); err != nil { - return err - } - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "Could not get config") - } - store, err := getStore(config) - if err != nil { - return errors.Wrapf(err, "error getting store") - } - if len(args) == 1 { - if json { - return errors.Wrapf(err, "json option can not be used with a container id") - } - mountPoint, err := store.Mount(args[0], c.String("label")) - if err != nil { - return errors.Wrapf(err, "error finding container %q", args[0]) - } - fmt.Printf("%s\n", mountPoint) - } else { - jsonMountPoints := []jsonMountPoint{} - containers, err2 := store.Containers() - if err2 != nil { - return errors.Wrapf(err2, "error reading list of all containers") - } - for _, container := range containers { - layer, err := store.Layer(container.LayerID) - if err != nil { - return errors.Wrapf(err, "error finding layer %q for container %q", container.LayerID, container.ID) - } - if layer.MountPoint == "" { - continue - } - if json { - jsonMountPoints = append(jsonMountPoints, jsonMountPoint{ID: container.ID, Names: container.Names, MountPoint: layer.MountPoint}) - continue - } - - if c.Bool("notruncate") { - fmt.Printf("%-64s %s\n", container.ID, layer.MountPoint) - } else { - fmt.Printf("%-12.12s %s\n", container.ID, layer.MountPoint) - } - } - if json { - data, err := js.MarshalIndent(jsonMountPoints, "", " ") - if err != nil { - return err - } - fmt.Printf("%s\n", data) - } - } - return nil -} diff --git a/cmd/kpod/pause.go b/cmd/kpod/pause.go deleted file mode 100644 index 5a8229eb..00000000 --- a/cmd/kpod/pause.go +++ /dev/null @@ -1,58 +0,0 @@ -package main - -import ( - "fmt" - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/pkg/errors" - "github.com/urfave/cli" - "os" -) - -var ( - pauseDescription = ` - kpod pause - - Pauses one or more running containers. The container name or ID can be used. -` - pauseCommand = cli.Command{ - Name: "pause", - Usage: "Pauses all the processes in one or more containers", - Description: pauseDescription, - Action: pauseCmd, - ArgsUsage: "CONTAINER-NAME [CONTAINER-NAME ...]", - } -) - -func pauseCmd(c *cli.Context) error { - args := c.Args() - if len(args) < 1 { - return errors.Errorf("you must provide at least one container name or id") - } - - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "could not get config") - } - server, err := libkpod.New(config) - if err != nil { - return errors.Wrapf(err, "could not get container server") - } - defer server.Shutdown() - if err := server.Update(); err != nil { - return errors.Wrapf(err, "could not update list of containers") - } - var lastError error - for _, container := range c.Args() { - cid, err := server.ContainerPause(container) - if err != nil { - if lastError != nil { - fmt.Fprintln(os.Stderr, lastError) - } - lastError = errors.Wrapf(err, "failed to pause container %v", container) - } else { - fmt.Println(cid) - } - } - - return lastError -} diff --git a/cmd/kpod/ps.go b/cmd/kpod/ps.go deleted file mode 100644 index 11dcae5e..00000000 --- a/cmd/kpod/ps.go +++ /dev/null @@ -1,663 +0,0 @@ -package main - -import ( - "os" - "path/filepath" - "reflect" - "regexp" - "strconv" - "strings" - "time" - - "github.com/docker/go-units" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/sirupsen/logrus" - - "k8s.io/apimachinery/pkg/fields" - - "github.com/kubernetes-incubator/cri-o/cmd/kpod/formats" - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/kubernetes-incubator/cri-o/oci" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -type psOptions struct { - all bool - filter string - format string - last int - latest bool - noTrunc bool - quiet bool - size bool - label string - namespace bool -} - -type psTemplateParams struct { - ID string - Image string - Command string - CreatedAt string - RunningFor string - Status string - Ports string - Size string - Names string - Labels string - Mounts string - PID int - Cgroup string - IPC string - MNT string - NET string - PIDNS string - User string - UTS string -} - -// psJSONParams is only used when the JSON format is specified, -// and is better for data processing from JSON. -// psJSONParams will be populated by data from libkpod.ContainerData, -// the members of the struct are the sama data types as their sources. -type psJSONParams struct { - ID string `json:"id"` - Image string `json:"image"` - ImageID string `json:"image_id"` - Command string `json:"command"` - CreatedAt time.Time `json:"createdAt"` - RunningFor time.Duration `json:"runningFor"` - Status string `json:"status"` - Ports map[string]struct{} `json:"ports"` - Size uint `json:"size"` - Names string `json:"names"` - Labels fields.Set `json:"labels"` - Mounts []specs.Mount `json:"mounts"` - ContainerRunning bool `json:"ctrRunning"` - Namespaces *namespace `json:"namespace,omitempty"` -} - -type namespace struct { - PID string `json:"pid,omitempty"` - Cgroup string `json:"cgroup,omitempty"` - IPC string `json:"ipc,omitempty"` - MNT string `json:"mnt,omitempty"` - NET string `json:"net,omitempty"` - PIDNS string `json:"pidns,omitempty"` - User string `json:"user,omitempty"` - UTS string `json:"uts,omitempty"` -} - -var ( - psFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "all, a", - Usage: "Show all the containers, default is only running containers", - }, - cli.StringFlag{ - Name: "filter, f", - Usage: "Filter output based on conditions given", - }, - cli.StringFlag{ - Name: "format", - Usage: "Pretty-print containers to JSON or using a Go template", - }, - cli.IntFlag{ - Name: "last, n", - Usage: "Print the n last created containers (all states)", - Value: -1, - }, - cli.BoolFlag{ - Name: "latest, l", - Usage: "Show the latest container created (all states)", - }, - cli.BoolFlag{ - Name: "no-trunc", - Usage: "Display the extended information", - }, - cli.BoolFlag{ - Name: "quiet, q", - Usage: "Print the numeric IDs of the containers only", - }, - cli.BoolFlag{ - Name: "size, s", - Usage: "Display the total file sizes", - }, - cli.BoolFlag{ - Name: "namespace, ns", - Usage: "Display namespace information", - }, - } - psDescription = "Prints out information about the containers" - psCommand = cli.Command{ - Name: "ps", - Usage: "List containers", - Description: psDescription, - Flags: psFlags, - Action: psCmd, - ArgsUsage: "", - } -) - -func psCmd(c *cli.Context) error { - if err := validateFlags(c, psFlags); err != nil { - return err - } - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "could not get config") - } - server, err := libkpod.New(config) - if err != nil { - return errors.Wrapf(err, "error creating server") - } - if err := server.Update(); err != nil { - return errors.Wrapf(err, "error updating list of containers") - } - - if len(c.Args()) > 0 { - return errors.Errorf("too many arguments, ps takes no arguments") - } - - format := genPsFormat(c.Bool("quiet"), c.Bool("size"), c.Bool("namespace")) - if c.IsSet("format") { - format = c.String("format") - } - - opts := psOptions{ - all: c.Bool("all"), - filter: c.String("filter"), - format: format, - last: c.Int("last"), - latest: c.Bool("latest"), - noTrunc: c.Bool("no-trunc"), - quiet: c.Bool("quiet"), - size: c.Bool("size"), - namespace: c.Bool("namespace"), - } - - // all, latest, and last are mutually exclusive. Only one flag can be used at a time - exclusiveOpts := 0 - if opts.last >= 0 { - exclusiveOpts++ - } - if opts.latest { - exclusiveOpts++ - } - if opts.all { - exclusiveOpts++ - } - if exclusiveOpts > 1 { - return errors.Errorf("Last, latest and all are mutually exclusive") - } - - containers, err := server.ListContainers() - if err != nil { - return errors.Wrapf(err, "error getting containers from server") - } - var params *FilterParamsPS - if opts.filter != "" { - params, err = parseFilter(opts.filter, containers) - if err != nil { - return errors.Wrapf(err, "error parsing filter") - } - } else { - params = nil - } - - containerList := getContainersMatchingFilter(containers, params, server) - - return generatePsOutput(containerList, server, opts) -} - -// generate the template based on conditions given -func genPsFormat(quiet, size, namespace bool) (format string) { - if quiet { - return formats.IDString - } - if namespace { - format = "table {{.ID}}\t{{.Names}}\t{{.PID}}\t{{.Cgroup}}\t{{.IPC}}\t{{.MNT}}\t{{.NET}}\t{{.PIDNS}}\t{{.User}}\t{{.UTS}}\t" - return - } - format = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}\t{{.Ports}}\t{{.Names}}\t" - if size { - format += "{{.Size}}\t" - } - return -} - -func psToGeneric(templParams []psTemplateParams, JSONParams []psJSONParams) (genericParams []interface{}) { - if len(templParams) > 0 { - for _, v := range templParams { - genericParams = append(genericParams, interface{}(v)) - } - return - } - for _, v := range JSONParams { - genericParams = append(genericParams, interface{}(v)) - } - return -} - -// generate the accurate header based on template given -func (p *psTemplateParams) headerMap() map[string]string { - v := reflect.Indirect(reflect.ValueOf(p)) - values := make(map[string]string) - - for i := 0; i < v.NumField(); i++ { - key := v.Type().Field(i).Name - value := key - if value == "ID" { - value = "Container" + value - } - values[key] = strings.ToUpper(splitCamelCase(value)) - } - return values -} - -// getContainers gets the containers that match the flags given -func getContainers(containers []*libkpod.ContainerData, opts psOptions) []*libkpod.ContainerData { - var containersOutput []*libkpod.ContainerData - if opts.last >= 0 && opts.last < len(containers) { - for i := 0; i < opts.last; i++ { - containersOutput = append(containersOutput, containers[i]) - } - return containersOutput - } - if opts.latest { - return []*libkpod.ContainerData{containers[0]} - } - if opts.all || opts.last >= len(containers) { - return containers - } - for _, ctr := range containers { - if ctr.State.Status == oci.ContainerStateRunning { - containersOutput = append(containersOutput, ctr) - } - } - return containersOutput -} - -// getTemplateOutput returns the modified container information -func getTemplateOutput(containers []*libkpod.ContainerData, opts psOptions) (psOutput []psTemplateParams) { - var status string - for _, ctr := range containers { - ctrID := ctr.ID - runningFor := units.HumanDuration(time.Since(ctr.State.Created)) - createdAt := runningFor + " ago" - command := getStrFromSquareBrackets(ctr.ImageCreatedBy) - imageName := ctr.FromImage - mounts := getMounts(ctr.Mounts, opts.noTrunc) - ports := getPorts(ctr.Config.ExposedPorts) - size := units.HumanSize(float64(ctr.SizeRootFs)) - labels := getLabels(ctr.Labels) - - ns := getNamespaces(ctr.State.Pid) - - switch ctr.State.Status { - case oci.ContainerStateStopped: - status = "Exited (" + strconv.FormatInt(int64(ctr.State.ExitCode), 10) + ") " + runningFor + " ago" - case oci.ContainerStateRunning: - status = "Up " + runningFor + " ago" - case oci.ContainerStatePaused: - status = "Paused" - default: - status = "Created" - } - - if !opts.noTrunc { - ctrID = ctr.ID[:idTruncLength] - imageName = getImageName(ctr.FromImage) - } - - params := psTemplateParams{ - ID: ctrID, - Image: imageName, - Command: command, - CreatedAt: createdAt, - RunningFor: runningFor, - Status: status, - Ports: ports, - Size: size, - Names: ctr.Name, - Labels: labels, - Mounts: mounts, - PID: ctr.State.Pid, - Cgroup: ns.Cgroup, - IPC: ns.IPC, - MNT: ns.MNT, - NET: ns.NET, - PIDNS: ns.PID, - User: ns.User, - UTS: ns.UTS, - } - psOutput = append(psOutput, params) - } - return -} - -func getNamespaces(pid int) *namespace { - ctrPID := strconv.Itoa(pid) - cgroup, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "cgroup")) - ipc, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "ipc")) - mnt, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "mnt")) - net, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "net")) - pidns, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "pid")) - user, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "user")) - uts, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "uts")) - - return &namespace{ - PID: ctrPID, - Cgroup: cgroup, - IPC: ipc, - MNT: mnt, - NET: net, - PIDNS: pidns, - User: user, - UTS: uts, - } -} - -func getNamespaceInfo(path string) (string, error) { - val, err := os.Readlink(path) - if err != nil { - return "", errors.Wrapf(err, "error getting info from %q", path) - } - return getStrFromSquareBrackets(val), nil -} - -// getJSONOutput returns the container info in its raw form -func getJSONOutput(containers []*libkpod.ContainerData, nSpace bool) (psOutput []psJSONParams) { - var ns *namespace - for _, ctr := range containers { - if nSpace { - ns = getNamespaces(ctr.State.Pid) - } - - params := psJSONParams{ - ID: ctr.ID, - Image: ctr.FromImage, - ImageID: ctr.FromImageID, - Command: getStrFromSquareBrackets(ctr.ImageCreatedBy), - CreatedAt: ctr.State.Created, - RunningFor: time.Since(ctr.State.Created), - Status: ctr.State.Status, - Ports: ctr.Config.ExposedPorts, - Size: ctr.SizeRootFs, - Names: ctr.Name, - Labels: ctr.Labels, - Mounts: ctr.Mounts, - ContainerRunning: ctr.State.Status == oci.ContainerStateRunning, - Namespaces: ns, - } - psOutput = append(psOutput, params) - } - return -} - -func generatePsOutput(containers []*libkpod.ContainerData, server *libkpod.ContainerServer, opts psOptions) error { - containersOutput := getContainers(containers, opts) - if len(containersOutput) == 0 { - return nil - } - - var out formats.Writer - - switch opts.format { - case formats.JSONString: - psOutput := getJSONOutput(containersOutput, opts.namespace) - out = formats.JSONStructArray{Output: psToGeneric([]psTemplateParams{}, psOutput)} - default: - psOutput := getTemplateOutput(containersOutput, opts) - out = formats.StdoutTemplateArray{Output: psToGeneric(psOutput, []psJSONParams{}), Template: opts.format, Fields: psOutput[0].headerMap()} - } - - return formats.Writer(out).Out() -} - -// getStrFromSquareBrackets gets the string inside [] from a string -func getStrFromSquareBrackets(cmd string) string { - reg, err := regexp.Compile(".*\\[|\\].*") - if err != nil { - return "" - } - arr := strings.Split(reg.ReplaceAllLiteralString(cmd, ""), ",") - return strings.Join(arr, ",") -} - -// getImageName shortens the image name -func getImageName(img string) string { - arr := strings.Split(img, "/") - if arr[0] == "docker.io" && arr[1] == "library" { - img = strings.Join(arr[2:], "/") - } else if arr[0] == "docker.io" { - img = strings.Join(arr[1:], "/") - } - return img -} - -// getLabels converts the labels to a string of the form "key=value, key2=value2" -func getLabels(labels fields.Set) string { - var arr []string - if len(labels) > 0 { - for key, val := range labels { - temp := key + "=" + val - arr = append(arr, temp) - } - return strings.Join(arr, ",") - } - return "" -} - -// getMounts converts the volumes mounted to a string of the form "mount1, mount2" -// it truncates it if noTrunc is false -func getMounts(mounts []specs.Mount, noTrunc bool) string { - var arr []string - if len(mounts) == 0 { - return "" - } - for _, mount := range mounts { - if noTrunc { - arr = append(arr, mount.Source) - continue - } - tempArr := strings.SplitAfter(mount.Source, "/") - if len(tempArr) >= 3 { - arr = append(arr, strings.Join(tempArr[:3], "")) - } else { - arr = append(arr, mount.Source) - } - } - return strings.Join(arr, ",") -} - -// getPorts converts the ports used to a string of the from "port1, port2" -func getPorts(ports map[string]struct{}) string { - var arr []string - if len(ports) == 0 { - return "" - } - for key := range ports { - arr = append(arr, key) - } - return strings.Join(arr, ",") -} - -// FilterParamsPS contains the filter options for ps -type FilterParamsPS struct { - id string - label string - name string - exited int32 - status string - ancestor string - before time.Time - since time.Time - volume string -} - -// parseFilter takes a filter string and a list of containers and filters it -func parseFilter(filter string, containers []*oci.Container) (*FilterParamsPS, error) { - params := new(FilterParamsPS) - allFilters := strings.Split(filter, ",") - - for _, param := range allFilters { - pair := strings.SplitN(param, "=", 2) - switch strings.TrimSpace(pair[0]) { - case "id": - params.id = pair[1] - case "label": - params.label = pair[1] - case "name": - params.name = pair[1] - case "exited": - exitedCode, err := strconv.ParseInt(pair[1], 10, 32) - if err != nil { - return nil, errors.Errorf("exited code out of range %q", pair[1]) - } - params.exited = int32(exitedCode) - case "status": - params.status = pair[1] - case "ancestor": - params.ancestor = pair[1] - case "before": - if ctr, err := findContainer(containers, pair[1]); err == nil { - params.before = ctr.CreatedAt() - } else { - return nil, errors.Wrapf(err, "no such container %q", pair[1]) - } - case "since": - if ctr, err := findContainer(containers, pair[1]); err == nil { - params.before = ctr.CreatedAt() - } else { - return nil, errors.Wrapf(err, "no such container %q", pair[1]) - } - case "volume": - params.volume = pair[1] - default: - return nil, errors.Errorf("invalid filter %q", pair[0]) - } - } - return params, nil -} - -// findContainer finds a container with a specific name or id from a list of containers -func findContainer(containers []*oci.Container, ref string) (*oci.Container, error) { - for _, ctr := range containers { - if strings.HasPrefix(ctr.ID(), ref) || ctr.Name() == ref { - return ctr, nil - } - } - return nil, errors.Errorf("could not find container") -} - -// matchesFilter checks if a container matches all the filter parameters -func matchesFilter(ctrData *libkpod.ContainerData, params *FilterParamsPS) bool { - if params == nil { - return true - } - if params.id != "" && !matchesID(ctrData, params.id) { - return false - } - if params.name != "" && !matchesName(ctrData, params.name) { - return false - } - if !params.before.IsZero() && !matchesBeforeContainer(ctrData, params.before) { - return false - } - if !params.since.IsZero() && !matchesSinceContainer(ctrData, params.since) { - return false - } - if params.exited > 0 && !matchesExited(ctrData, params.exited) { - return false - } - if params.status != "" && !matchesStatus(ctrData, params.status) { - return false - } - if params.ancestor != "" && !matchesAncestor(ctrData, params.ancestor) { - return false - } - if params.label != "" && !matchesLabel(ctrData, params.label) { - return false - } - if params.volume != "" && !matchesVolume(ctrData, params.volume) { - return false - } - return true -} - -// GetContainersMatchingFilter returns a slice of all the containers that match the provided filter parameters -func getContainersMatchingFilter(containers []*oci.Container, filter *FilterParamsPS, server *libkpod.ContainerServer) []*libkpod.ContainerData { - var filteredCtrs []*libkpod.ContainerData - for _, ctr := range containers { - ctrData, err := server.GetContainerData(ctr.ID(), true) - if err != nil { - logrus.Warn("unable to get container data for matched container") - } - if filter == nil || matchesFilter(ctrData, filter) { - filteredCtrs = append(filteredCtrs, ctrData) - } - } - return filteredCtrs -} - -// matchesID returns true if the id's match -func matchesID(ctrData *libkpod.ContainerData, id string) bool { - return strings.HasPrefix(ctrData.ID, id) -} - -// matchesBeforeContainer returns true if the container was created before the filter image -func matchesBeforeContainer(ctrData *libkpod.ContainerData, beforeTime time.Time) bool { - return ctrData.State.Created.Before(beforeTime) -} - -// matchesSincecontainer returns true if the container was created since the filter image -func matchesSinceContainer(ctrData *libkpod.ContainerData, sinceTime time.Time) bool { - return ctrData.State.Created.After(sinceTime) -} - -// matchesLabel returns true if the container label matches that of the filter label -func matchesLabel(ctrData *libkpod.ContainerData, label string) bool { - pair := strings.SplitN(label, "=", 2) - if val, ok := ctrData.Labels[pair[0]]; ok { - if len(pair) == 2 && val == pair[1] { - return true - } - if len(pair) == 1 { - return true - } - return false - } - return false -} - -// matchesName returns true if the names are identical -func matchesName(ctrData *libkpod.ContainerData, name string) bool { - return ctrData.Name == name -} - -// matchesExited returns true if the exit codes are identical -func matchesExited(ctrData *libkpod.ContainerData, exited int32) bool { - return ctrData.State.ExitCode == exited -} - -// matchesStatus returns true if the container status matches that of filter status -func matchesStatus(ctrData *libkpod.ContainerData, status string) bool { - return ctrData.State.Status == status -} - -// matchesAncestor returns true if filter ancestor is in container image name -func matchesAncestor(ctrData *libkpod.ContainerData, ancestor string) bool { - return strings.Contains(ctrData.FromImage, ancestor) -} - -// matchesVolue returns true if the volume mounted or path to volue of the container matches that of filter volume -func matchesVolume(ctrData *libkpod.ContainerData, volume string) bool { - for _, vol := range ctrData.Mounts { - if strings.Contains(vol.Source, volume) { - return true - } - } - return false -} diff --git a/cmd/kpod/pull.go b/cmd/kpod/pull.go deleted file mode 100644 index 02ce01f0..00000000 --- a/cmd/kpod/pull.go +++ /dev/null @@ -1,163 +0,0 @@ -package main - -import ( - "os" - - "fmt" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/pkg/sysregistries" - "github.com/containers/image/transports/alltransports" - "github.com/containers/image/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/urfave/cli" -) - -var ( - pullFlags = []cli.Flag{ - cli.BoolFlag{ - // all-tags is hidden since it has not been implemented yet - Name: "all-tags, a", - Hidden: true, - Usage: "Download all tagged images in the repository", - }, - cli.StringFlag{ - Name: "signature-policy", - Usage: "`pathname` of signature policy file (not usually used)", - Hidden: true, - }, - } - - pullDescription = "Pulls an image from a registry and stores it locally.\n" + - "An image can be pulled using its tag or digest. If a tag is not\n" + - "specified, the image with the 'latest' tag (if it exists) is pulled." - pullCommand = cli.Command{ - Name: "pull", - Usage: "pull an image from a registry", - Description: pullDescription, - Flags: pullFlags, - Action: pullCmd, - ArgsUsage: "", - } -) - -// struct for when a user passes a short or incomplete -// image name -type imagePullStruct struct { - imageName string - tag string - registry string - hasRegistry bool - transport string -} - -func (ips imagePullStruct) returnFQName() string { - return fmt.Sprintf("%s%s/%s:%s", ips.transport, ips.registry, ips.imageName, ips.tag) -} - -func getRegistriesToTry(image string) ([]string, error) { - var registries []string - var imageError = fmt.Sprintf("unable to parse '%s'\n", image) - imgRef, err := reference.Parse(image) - if err != nil { - return nil, errors.Wrapf(err, imageError) - } - tagged, isTagged := imgRef.(reference.NamedTagged) - tag := "latest" - if isTagged { - tag = tagged.Tag() - } - hasDomain := true - registry := reference.Domain(imgRef.(reference.Named)) - if registry == "" { - hasDomain = false - } - imageName := reference.Path(imgRef.(reference.Named)) - pImage := imagePullStruct{ - imageName, - tag, - registry, - hasDomain, - "docker://", - } - if pImage.hasRegistry { - // If input has a registry, we have to assume they included an image - // name but maybe not a tag - pullRef, err := alltransports.ParseImageName(pImage.returnFQName()) - if err != nil { - return nil, errors.Errorf(imageError) - } - registries = append(registries, pullRef.DockerReference().String()) - } else { - // No registry means we check the globals registries configuration file - // and assemble a list of candidate sources to try - registryConfigPath := "" - envOverride := os.Getenv("REGISTRIES_CONFIG_PATH") - if len(envOverride) > 0 { - registryConfigPath = envOverride - } - searchRegistries, err := sysregistries.GetRegistries(&types.SystemContext{SystemRegistriesConfPath: registryConfigPath}) - if err != nil { - fmt.Println(err) - return nil, errors.Errorf("unable to parse the registries.conf file and"+ - " the image name '%s' is incomplete.", imageName) - } - for _, searchRegistry := range searchRegistries { - pImage.registry = searchRegistry - pullRef, err := alltransports.ParseImageName(pImage.returnFQName()) - if err != nil { - return nil, errors.Errorf("unable to parse '%s'", pImage.returnFQName()) - } - registries = append(registries, pullRef.DockerReference().String()) - } - } - return registries, nil -} - -// pullCmd gets the data from the command line and calls pullImage -// to copy an image from a registry to a local machine -func pullCmd(c *cli.Context) error { - var fqRegistries []string - - args := c.Args() - if len(args) == 0 { - logrus.Errorf("an image name must be specified") - return nil - } - if len(args) > 1 { - logrus.Errorf("too many arguments. Requires exactly 1") - return nil - } - if err := validateFlags(c, pullFlags); err != nil { - return err - } - image := args[0] - srcRef, err := alltransports.ParseImageName(image) - if err != nil { - fqRegistries, err = getRegistriesToTry(image) - if err != nil { - fmt.Println(err) - } - } else { - fqRegistries = append(fqRegistries, srcRef.DockerReference().String()) - } - runtime, err := getRuntime(c) - if err != nil { - return errors.Wrapf(err, "could not get runtime") - } - defer runtime.Shutdown(false) - - if err != nil { - return errors.Wrapf(err, "could not create runtime") - } - for _, fqname := range fqRegistries { - fmt.Printf("Trying to pull %s...", fqname) - if err := runtime.PullImage(fqname, c.Bool("all-tags"), c.String("signature-policy"), os.Stdout); err != nil { - fmt.Printf(" Failed\n") - } else { - return nil - } - } - return errors.Errorf("error pulling image from %q", image) -} diff --git a/cmd/kpod/push.go b/cmd/kpod/push.go deleted file mode 100644 index a019f54a..00000000 --- a/cmd/kpod/push.go +++ /dev/null @@ -1,126 +0,0 @@ -package main - -import ( - "fmt" - "io" - "os" - - "github.com/containers/image/types" - "github.com/containers/storage/pkg/archive" - "github.com/kubernetes-incubator/cri-o/libpod" - "github.com/kubernetes-incubator/cri-o/libpod/common" - "github.com/pkg/errors" - "github.com/urfave/cli" - "golang.org/x/crypto/ssh/terminal" -) - -var ( - pushFlags = []cli.Flag{ - cli.StringFlag{ - Name: "signature-policy", - Usage: "`pathname` of signature policy file (not usually used)", - Hidden: true, - }, - cli.StringFlag{ - Name: "creds", - Usage: "`credentials` (USERNAME:PASSWORD) to use for authenticating to a registry", - }, - cli.StringFlag{ - Name: "cert-dir", - Usage: "`pathname` of a directory containing TLS certificates and keys", - }, - cli.BoolTFlag{ - Name: "tls-verify", - Usage: "require HTTPS and verify certificates when contacting registries (default: true)", - }, - cli.BoolFlag{ - Name: "remove-signatures", - Usage: "discard any pre-existing signatures in the image", - }, - cli.StringFlag{ - Name: "sign-by", - Usage: "add a signature at the destination using the specified key", - }, - cli.BoolFlag{ - Name: "quiet, q", - Usage: "don't output progress information when pushing images", - }, - } - pushDescription = fmt.Sprintf(` - Pushes an image to a specified location. - The Image "DESTINATION" uses a "transport":"details" format. - See kpod-push(1) section "DESTINATION" for the expected format`) - - pushCommand = cli.Command{ - Name: "push", - Usage: "push an image to a specified destination", - Description: pushDescription, - Flags: pushFlags, - Action: pushCmd, - ArgsUsage: "IMAGE DESTINATION", - } -) - -func pushCmd(c *cli.Context) error { - var registryCreds *types.DockerAuthConfig - - args := c.Args() - if len(args) < 2 { - return errors.New("kpod push requires exactly 2 arguments") - } - if err := validateFlags(c, pushFlags); err != nil { - return err - } - srcName := c.Args().Get(0) - destName := c.Args().Get(1) - - registryCredsString := c.String("creds") - certPath := c.String("cert-dir") - skipVerify := !c.BoolT("tls-verify") - removeSignatures := c.Bool("remove-signatures") - signBy := c.String("sign-by") - - if registryCredsString != "" { - creds, err := common.ParseRegistryCreds(registryCredsString) - if err != nil { - if err == common.ErrNoPassword { - fmt.Print("Password: ") - password, err := terminal.ReadPassword(0) - if err != nil { - return errors.Wrapf(err, "could not read password from terminal") - } - creds.Password = string(password) - } else { - return err - } - } - registryCreds = creds - } - - runtime, err := getRuntime(c) - if err != nil { - return errors.Wrapf(err, "could not create runtime") - } - defer runtime.Shutdown(false) - - var writer io.Writer - if !c.Bool("quiet") { - writer = os.Stdout - } - - options := libpod.CopyOptions{ - Compression: archive.Uncompressed, - SignaturePolicyPath: c.String("signature-policy"), - DockerRegistryOptions: common.DockerRegistryOptions{ - DockerRegistryCreds: registryCreds, - DockerCertPath: certPath, - DockerInsecureSkipTLSVerify: skipVerify, - }, - SigningOptions: common.SigningOptions{ - RemoveSignatures: removeSignatures, - SignBy: signBy, - }, - } - - return runtime.PushImage(srcName, destName, options, writer) -} diff --git a/cmd/kpod/rename.go b/cmd/kpod/rename.go deleted file mode 100644 index b638856e..00000000 --- a/cmd/kpod/rename.go +++ /dev/null @@ -1,49 +0,0 @@ -package main - -import ( - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - renameDescription = "Rename a container. Container may be created, running, paused, or stopped" - renameFlags = []cli.Flag{} - renameCommand = cli.Command{ - Name: "rename", - Usage: "rename a container", - Description: renameDescription, - Action: renameCmd, - ArgsUsage: "CONTAINER NEW-NAME", - Flags: renameFlags, - } -) - -func renameCmd(c *cli.Context) error { - if len(c.Args()) != 2 { - return errors.Errorf("Rename requires a src container name/ID and a dest container name") - } - if err := validateFlags(c, renameFlags); err != nil { - return err - } - - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "Could not get config") - } - server, err := libkpod.New(config) - if err != nil { - return errors.Wrapf(err, "could not get container server") - } - defer server.Shutdown() - err = server.Update() - if err != nil { - return errors.Wrapf(err, "could not update list of containers") - } - - err = server.ContainerRename(c.Args().Get(0), c.Args().Get(1)) - if err != nil { - return errors.Wrapf(err, "could not rename container") - } - return nil -} diff --git a/cmd/kpod/rm.go b/cmd/kpod/rm.go deleted file mode 100644 index 69f68302..00000000 --- a/cmd/kpod/rm.go +++ /dev/null @@ -1,68 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - rmFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "force, f", - Usage: "Force removal of a running container. The default is false", - }, - } - rmDescription = "Remove one or more containers" - rmCommand = cli.Command{ - Name: "rm", - Usage: fmt.Sprintf(`kpod rm will remove one or more containers from the host. The container name or ID can be used. - This does not remove images. Running containers will not be removed without the -f option.`), - Description: rmDescription, - Flags: rmFlags, - Action: rmCmd, - ArgsUsage: "", - } -) - -// saveCmd saves the image to either docker-archive or oci -func rmCmd(c *cli.Context) error { - args := c.Args() - if len(args) == 0 { - return errors.Errorf("specify one or more containers to remove") - } - if err := validateFlags(c, rmFlags); err != nil { - return err - } - - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "could not get config") - } - server, err := libkpod.New(config) - if err != nil { - return errors.Wrapf(err, "could not get container server") - } - defer server.Shutdown() - err = server.Update() - if err != nil { - return errors.Wrapf(err, "could not update list of containers") - } - force := c.Bool("force") - - for _, container := range c.Args() { - id, err2 := server.Remove(container, force) - if err2 != nil { - if err == nil { - err = err2 - } else { - err = errors.Wrapf(err, "%v. Stop the container before attempting removal or use -f\n", err2) - } - } else { - fmt.Println(id) - } - } - return err -} diff --git a/cmd/kpod/rmi.go b/cmd/kpod/rmi.go deleted file mode 100644 index 3713db45..00000000 --- a/cmd/kpod/rmi.go +++ /dev/null @@ -1,56 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - rmiDescription = "removes one or more locally stored images." - rmiFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "force, f", - Usage: "force removal of the image", - }, - } - rmiCommand = cli.Command{ - Name: "rmi", - Usage: "removes one or more images from local storage", - Description: rmiDescription, - Action: rmiCmd, - ArgsUsage: "IMAGE-NAME-OR-ID [...]", - Flags: rmiFlags, - } -) - -func rmiCmd(c *cli.Context) error { - if err := validateFlags(c, rmiFlags); err != nil { - return err - } - - runtime, err := getRuntime(c) - if err != nil { - return errors.Wrapf(err, "could not get runtime") - } - defer runtime.Shutdown(false) - - args := c.Args() - if len(args) == 0 { - return errors.Errorf("image name or ID must be specified") - } - - for _, arg := range args { - image, err := runtime.GetImage(arg) - if err != nil { - return errors.Wrapf(err, "could not get image %q", arg) - } - id, err := runtime.RemoveImage(image, c.Bool("force")) - if err != nil { - return errors.Wrapf(err, "error removing image %q", id) - } - fmt.Printf("%s\n", id) - } - return nil -} diff --git a/cmd/kpod/save.go b/cmd/kpod/save.go deleted file mode 100644 index fac73e65..00000000 --- a/cmd/kpod/save.go +++ /dev/null @@ -1,97 +0,0 @@ -package main - -import ( - "io" - "os" - - "github.com/kubernetes-incubator/cri-o/libpod" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/urfave/cli" -) - -var ( - saveFlags = []cli.Flag{ - cli.StringFlag{ - Name: "output, o", - Usage: "Write to a file, default is STDOUT", - Value: "/dev/stdout", - }, - cli.BoolFlag{ - Name: "quiet, q", - Usage: "Suppress the output", - }, - cli.StringFlag{ - Name: "format", - Usage: "Save image to oci-archive", - }, - } - saveDescription = ` - Save an image to docker-archive or oci-archive on the local machine. - Default is docker-archive` - - saveCommand = cli.Command{ - Name: "save", - Usage: "Save image to an archive", - Description: saveDescription, - Flags: saveFlags, - Action: saveCmd, - ArgsUsage: "", - } -) - -// saveCmd saves the image to either docker-archive or oci -func saveCmd(c *cli.Context) error { - args := c.Args() - if len(args) == 0 { - return errors.Errorf("need at least 1 argument") - } - if err := validateFlags(c, saveFlags); err != nil { - return err - } - - runtime, err := getRuntime(c) - if err != nil { - return errors.Wrapf(err, "could not create runtime") - } - defer runtime.Shutdown(false) - - var writer io.Writer - if !c.Bool("quiet") { - writer = os.Stdout - } - - output := c.String("output") - if output == "/dev/stdout" { - fi := os.Stdout - if logrus.IsTerminal(fi) { - return errors.Errorf("refusing to save to terminal. Use -o flag or redirect") - } - } - - var dst string - switch c.String("format") { - case libpod.OCIArchive: - dst = libpod.OCIArchive + ":" + output - case libpod.DockerArchive: - fallthrough - case "": - dst = libpod.DockerArchive + ":" + output - default: - return errors.Errorf("unknown format option %q", c.String("format")) - } - - saveOpts := libpod.CopyOptions{ - SignaturePolicyPath: "", - } - - // only one image is supported for now - // future pull requests will fix this - for _, image := range args { - dest := dst + ":" + image - if err := runtime.PushImage(image, dest, saveOpts, writer); err != nil { - return errors.Wrapf(err, "unable to save %q", image) - } - } - return nil -} diff --git a/cmd/kpod/stats.go b/cmd/kpod/stats.go deleted file mode 100644 index ac81212a..00000000 --- a/cmd/kpod/stats.go +++ /dev/null @@ -1,245 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "os" - "strings" - "text/template" - "time" - - "github.com/docker/go-units" - - tm "github.com/buger/goterm" - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/kubernetes-incubator/cri-o/oci" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var printf func(format string, a ...interface{}) (n int, err error) -var println func(a ...interface{}) (n int, err error) - -type statsOutputParams struct { - Container string - ID string - CPUPerc string - MemUsage string - MemPerc string - NetIO string - BlockIO string - PIDs uint64 -} - -var ( - statsFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "all, a", - Usage: "show all containers. Only running containers are shown by default. The default is false", - }, - cli.BoolFlag{ - Name: "no-stream", - Usage: "disable streaming stats and only pull the first result, default setting is false", - }, - cli.StringFlag{ - Name: "format", - Usage: "pretty-print container statistics using a Go template", - }, - cli.BoolFlag{ - Name: "json", - Usage: "output container statistics in json format", - }, - } - - statsDescription = "display a live stream of one or more containers' resource usage statistics" - statsCommand = cli.Command{ - Name: "stats", - Usage: "Display percentage of CPU, memory, network I/O, block I/O and PIDs for one or more containers", - Description: statsDescription, - Flags: statsFlags, - Action: statsCmd, - ArgsUsage: "", - } -) - -func statsCmd(c *cli.Context) error { - if err := validateFlags(c, statsFlags); err != nil { - return err - } - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "could not read config") - } - containerServer, err := libkpod.New(config) - if err != nil { - return errors.Wrapf(err, "could not create container server") - } - defer containerServer.Shutdown() - err = containerServer.Update() - if err != nil { - return errors.Wrapf(err, "could not update list of containers") - } - times := -1 - if c.Bool("no-stream") { - times = 1 - } - statsChan := make(chan []*libkpod.ContainerStats) - // iterate over the channel until it is closed - go func() { - // print using goterm - printf = tm.Printf - println = tm.Println - for stats := range statsChan { - // Continually refresh statistics - tm.Clear() - tm.MoveCursor(1, 1) - outputStats(stats, c.String("format"), c.Bool("json")) - tm.Flush() - time.Sleep(time.Second) - } - }() - return getStats(containerServer, c.Args(), c.Bool("all"), statsChan, times) -} - -func getStats(server *libkpod.ContainerServer, args []string, all bool, statsChan chan []*libkpod.ContainerStats, times int) error { - ctrs, err := server.ListContainers(isRunning, ctrInList(args)) - if err != nil { - return err - } - containerStats := map[string]*libkpod.ContainerStats{} - for _, ctr := range ctrs { - initialStats, err := server.GetContainerStats(ctr, &libkpod.ContainerStats{}) - if err != nil { - return err - } - containerStats[ctr.ID()] = initialStats - } - step := 1 - if times == -1 { - times = 1 - step = 0 - } - for i := 0; i < times; i += step { - reportStats := []*libkpod.ContainerStats{} - for _, ctr := range ctrs { - id := ctr.ID() - if _, ok := containerStats[ctr.ID()]; !ok { - initialStats, err := server.GetContainerStats(ctr, &libkpod.ContainerStats{}) - if err != nil { - return err - } - containerStats[id] = initialStats - } - stats, err := server.GetContainerStats(ctr, containerStats[id]) - if err != nil { - return err - } - // replace the previous measurement with the current one - containerStats[id] = stats - reportStats = append(reportStats, stats) - } - statsChan <- reportStats - - err := server.Update() - if err != nil { - return err - } - ctrs, err = server.ListContainers(isRunning, ctrInList(args)) - if err != nil { - return err - } - } - return nil -} - -func outputStats(stats []*libkpod.ContainerStats, format string, json bool) error { - if format == "" { - outputStatsHeader() - } - if json { - return outputStatsAsJSON(stats) - } - var err error - for _, s := range stats { - if format == "" { - outputStatsUsingFormatString(s) - } else { - params := getStatsOutputParams(s) - err2 := outputStatsUsingTemplate(format, params) - if err2 != nil { - err = errors.Wrapf(err, err2.Error()) - } - } - } - return err -} - -func outputStatsHeader() { - printf("%-64s %-16s %-32s %-16s %-24s %-24s %s\n", "CONTAINER", "CPU %", "MEM USAGE / MEM LIMIT", "MEM %", "NET I/O", "BLOCK I/O", "PIDS") -} - -func outputStatsUsingFormatString(stats *libkpod.ContainerStats) { - printf("%-64s %-16s %-32s %-16s %-24s %-24s %d\n", stats.Container, floatToPercentString(stats.CPU), combineHumanValues(stats.MemUsage, stats.MemLimit), floatToPercentString(stats.MemPerc), combineHumanValues(stats.NetInput, stats.NetOutput), combineHumanValues(stats.BlockInput, stats.BlockOutput), stats.PIDs) -} - -func combineHumanValues(a, b uint64) string { - return fmt.Sprintf("%s / %s", units.HumanSize(float64(a)), units.HumanSize(float64(b))) -} - -func floatToPercentString(f float64) string { - return fmt.Sprintf("%.2f %s", f, "%") -} - -func getStatsOutputParams(stats *libkpod.ContainerStats) statsOutputParams { - return statsOutputParams{ - Container: stats.Container, - ID: stats.Container, - CPUPerc: floatToPercentString(stats.CPU), - MemUsage: combineHumanValues(stats.MemUsage, stats.MemLimit), - MemPerc: floatToPercentString(stats.MemPerc), - NetIO: combineHumanValues(stats.NetInput, stats.NetOutput), - BlockIO: combineHumanValues(stats.BlockInput, stats.BlockOutput), - PIDs: stats.PIDs, - } -} - -func outputStatsUsingTemplate(format string, params statsOutputParams) error { - tmpl, err := template.New("stats").Parse(format) - if err != nil { - return errors.Wrapf(err, "template parsing error") - } - - err = tmpl.Execute(os.Stdout, params) - if err != nil { - return err - } - println() - return nil -} - -func outputStatsAsJSON(stats []*libkpod.ContainerStats) error { - s, err := json.Marshal(stats) - if err != nil { - return err - } - println(s) - return nil -} - -func isRunning(ctr *oci.Container) bool { - return ctr.State().Status == "running" -} - -func ctrInList(idsOrNames []string) func(ctr *oci.Container) bool { - if len(idsOrNames) == 0 { - return func(*oci.Container) bool { return true } - } - return func(ctr *oci.Container) bool { - for _, idOrName := range idsOrNames { - if strings.HasPrefix(ctr.ID(), idOrName) || strings.HasSuffix(ctr.Name(), idOrName) { - return true - } - } - return false - } -} diff --git a/cmd/kpod/stop.go b/cmd/kpod/stop.go deleted file mode 100644 index 06b26bb9..00000000 --- a/cmd/kpod/stop.go +++ /dev/null @@ -1,76 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - defaultTimeout int64 = 10 - stopFlags = []cli.Flag{ - cli.Int64Flag{ - Name: "timeout, t", - Usage: "Seconds to wait for stop before killing the container", - Value: defaultTimeout, - }, - } - stopDescription = ` - kpod stop - - Stops one or more running containers. The container name or ID can be used. - A timeout to forcibly stop the container can also be set but defaults to 10 - seconds otherwise. -` - - stopCommand = cli.Command{ - Name: "stop", - Usage: "Stop one or more containers", - Description: stopDescription, - Flags: stopFlags, - Action: stopCmd, - ArgsUsage: "CONTAINER-NAME [CONTAINER-NAME ...]", - } -) - -func stopCmd(c *cli.Context) error { - args := c.Args() - stopTimeout := c.Int64("timeout") - if len(args) < 1 { - return errors.Errorf("you must provide at least one container name or id") - } - if err := validateFlags(c, stopFlags); err != nil { - return err - } - - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "could not get config") - } - server, err := libkpod.New(config) - if err != nil { - return errors.Wrapf(err, "could not get container server") - } - defer server.Shutdown() - err = server.Update() - if err != nil { - return errors.Wrapf(err, "could not update list of containers") - } - var lastError error - for _, container := range c.Args() { - cid, err := server.ContainerStop(container, stopTimeout) - if err != nil { - if lastError != nil { - fmt.Fprintln(os.Stderr, lastError) - } - lastError = errors.Wrapf(err, "failed to stop container %v", container) - } else { - fmt.Println(cid) - } - } - - return lastError -} diff --git a/cmd/kpod/tag.go b/cmd/kpod/tag.go deleted file mode 100644 index b9c38060..00000000 --- a/cmd/kpod/tag.go +++ /dev/null @@ -1,77 +0,0 @@ -package main - -import ( - "github.com/containers/image/docker/reference" - "github.com/containers/storage" - "github.com/kubernetes-incubator/cri-o/libpod" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - tagDescription = "Adds one or more additional names to locally-stored image" - tagCommand = cli.Command{ - Name: "tag", - Usage: "Add an additional name to a local image", - Description: tagDescription, - Action: tagCmd, - ArgsUsage: "IMAGE-NAME [IMAGE-NAME ...]", - } -) - -func tagCmd(c *cli.Context) error { - args := c.Args() - if len(args) < 2 { - return errors.Errorf("image name and at least one new name must be specified") - } - runtime, err := getRuntime(c) - if err != nil { - return errors.Wrapf(err, "could not create runtime") - } - defer runtime.Shutdown(false) - - img, err := runtime.GetImage(args[0]) - if err != nil { - return err - } - if img == nil { - return errors.New("null image") - } - err = addImageNames(runtime, img, args[1:]) - if err != nil { - return errors.Wrapf(err, "error adding names %v to image %q", args[1:], args[0]) - } - return nil -} - -func addImageNames(runtime *libpod.Runtime, image *storage.Image, addNames []string) error { - // Add tags to the names if applicable - names, err := expandedTags(addNames) - if err != nil { - return err - } - for _, name := range names { - if err := runtime.TagImage(image, name); err != nil { - return errors.Wrapf(err, "error adding name (%v) to image %q", name, image.ID) - } - } - return nil -} - -func expandedTags(tags []string) ([]string, error) { - expandedNames := []string{} - for _, tag := range tags { - var labelName string - name, err := reference.Parse(tag) - if err != nil { - return nil, errors.Wrapf(err, "error parsing tag %q", name) - } - if _, ok := name.(reference.NamedTagged); ok { - labelName = name.String() - } else { - labelName = name.String() + ":latest" - } - expandedNames = append(expandedNames, labelName) - } - return expandedNames, nil -} diff --git a/cmd/kpod/umount.go b/cmd/kpod/umount.go deleted file mode 100644 index bad6752a..00000000 --- a/cmd/kpod/umount.go +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - umountCommand = cli.Command{ - Name: "umount", - Aliases: []string{"unmount"}, - Usage: "Unmount a working container's root filesystem", - Description: "Unmounts a working container's root filesystem", - Action: umountCmd, - ArgsUsage: "CONTAINER-NAME-OR-ID", - } -) - -func umountCmd(c *cli.Context) error { - args := c.Args() - if len(args) == 0 { - return errors.Errorf("container ID must be specified") - } - if len(args) > 1 { - return errors.Errorf("too many arguments specified") - } - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "Could not get config") - } - store, err := getStore(config) - if err != nil { - return err - } - - err = store.Unmount(args[0]) - if err != nil { - return errors.Wrapf(err, "error unmounting container %q", args[0]) - } - return nil -} diff --git a/cmd/kpod/unpause.go b/cmd/kpod/unpause.go deleted file mode 100644 index a7b7db20..00000000 --- a/cmd/kpod/unpause.go +++ /dev/null @@ -1,58 +0,0 @@ -package main - -import ( - "fmt" - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/pkg/errors" - "github.com/urfave/cli" - "os" -) - -var ( - unpauseDescription = ` - kpod unpause - - Unpauses one or more running containers. The container name or ID can be used. -` - unpauseCommand = cli.Command{ - Name: "unpause", - Usage: "Unpause the processes in one or more containers", - Description: unpauseDescription, - Action: unpauseCmd, - ArgsUsage: "CONTAINER-NAME [CONTAINER-NAME ...]", - } -) - -func unpauseCmd(c *cli.Context) error { - args := c.Args() - if len(args) < 1 { - return errors.Errorf("you must provide at least one container name or id") - } - - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "could not get config") - } - server, err := libkpod.New(config) - if err != nil { - return errors.Wrapf(err, "could not get container server") - } - defer server.Shutdown() - if err := server.Update(); err != nil { - return errors.Wrapf(err, "could not update list of containers") - } - var lastError error - for _, container := range c.Args() { - cid, err := server.ContainerUnpause(container) - if err != nil { - if lastError != nil { - fmt.Fprintln(os.Stderr, lastError) - } - lastError = errors.Wrapf(err, "failed to unpause container %v", container) - } else { - fmt.Println(cid) - } - } - - return lastError -} diff --git a/cmd/kpod/version.go b/cmd/kpod/version.go deleted file mode 100644 index 586c41da..00000000 --- a/cmd/kpod/version.go +++ /dev/null @@ -1,48 +0,0 @@ -package main - -import ( - "fmt" - "runtime" - "strconv" - "time" - - "github.com/urfave/cli" -) - -// Overwritten at build time -var ( - // gitCommit is the commit that the binary is being built from. - // It will be populated by the Makefile. - gitCommit string - // buildInfo is the time at which the binary was built - // It will be populated by the Makefile. - buildInfo string -) - -// versionCmd gets and prints version info for version command -func versionCmd(c *cli.Context) error { - fmt.Println("Version: ", c.App.Version) - fmt.Println("Go Version: ", runtime.Version()) - if gitCommit != "" { - fmt.Println("Git Commit: ", gitCommit) - } - if buildInfo != "" { - // Converts unix time from string to int64 - buildTime, err := strconv.ParseInt(buildInfo, 10, 64) - if err != nil { - return err - } - // Prints out the build time in readable format - fmt.Println("Built: ", time.Unix(buildTime, 0).Format(time.ANSIC)) - } - fmt.Println("OS/Arch: ", runtime.GOOS+"/"+runtime.GOARCH) - - return nil -} - -// Cli command to print out the full version of kpod -var versionCommand = cli.Command{ - Name: "version", - Usage: "Display the KPOD Version Information", - Action: versionCmd, -} diff --git a/cmd/kpod/wait.go b/cmd/kpod/wait.go deleted file mode 100644 index b166e330..00000000 --- a/cmd/kpod/wait.go +++ /dev/null @@ -1,62 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var ( - waitDescription = ` - kpod wait - - Block until one or more containers stop and then print their exit codes -` - - waitCommand = cli.Command{ - Name: "wait", - Usage: "Block on one or more containers", - Description: waitDescription, - Action: waitCmd, - ArgsUsage: "CONTAINER-NAME [CONTAINER-NAME ...]", - } -) - -func waitCmd(c *cli.Context) error { - args := c.Args() - if len(args) < 1 { - return errors.Errorf("you must provide at least one container name or id") - } - - config, err := getConfig(c) - if err != nil { - return errors.Wrapf(err, "could not get config") - } - server, err := libkpod.New(config) - if err != nil { - return errors.Wrapf(err, "could not get container server") - } - defer server.Shutdown() - err = server.Update() - if err != nil { - return errors.Wrapf(err, "could not update list of containers") - } - - var lastError error - for _, container := range c.Args() { - returnCode, err := server.ContainerWait(container) - if err != nil { - if lastError != nil { - fmt.Fprintln(os.Stderr, lastError) - } - lastError = errors.Wrapf(err, "failed to wait for the container %v", container) - } else { - fmt.Println(returnCode) - } - } - - return lastError -} diff --git a/code-of-conduct.md b/code-of-conduct.md index 215ce7ac..0d15c00c 100644 --- a/code-of-conduct.md +++ b/code-of-conduct.md @@ -1,55 +1,3 @@ -## Kubernetes Community Code of Conduct +# Kubernetes Community Code of Conduct -### Contributor Code of Conduct - -As contributors and maintainers of this project, and in the interest of fostering -an open and welcoming community, we pledge to respect all people who contribute -through reporting issues, posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. - -We are committed to making participation in this project a harassment-free experience for -everyone, regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, body size, race, ethnicity, age, -religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery. -* Personal attacks. -* Trolling or insulting/derogatory comments. -* Public or private harassment. -* Publishing other's private information, such as physical or electronic addresses, - without explicit permission. -* Other unethical or unprofessional conduct. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are not -aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers -commit themselves to fairly and consistently applying these principles to every aspect -of managing this project. Project maintainers who do not follow or enforce the Code of -Conduct may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a Kubernetes maintainer, Sarah Novotny , and/or Dan Kohn . - -This Code of Conduct is adapted from the Contributor Covenant -(http://contributor-covenant.org), version 1.2.0, available at -http://contributor-covenant.org/version/1/2/0/ - -### Kubernetes Events Code of Conduct - -Kubernetes events are working conferences intended for professional networking and collaboration in the -Kubernetes community. Attendees are expected to behave according to professional standards and in accordance -with their employer's policies on appropriate workplace behavior. - -While at Kubernetes events or related social networking opportunities, attendees should not engage in -discriminatory or offensive speech or actions regarding gender, sexuality, race, or religion. Speakers should -be especially aware of these concerns. - -The Kubernetes team does not condone any statements by speakers contrary to these standards. The Kubernetes -team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to -be engaging in discriminatory or offensive speech or actions. - -Please bring any concerns to the immediate attention of the Kubernetes event staff. +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/completions/bash/kpod b/completions/bash/kpod deleted file mode 100644 index 700c0eb8..00000000 --- a/completions/bash/kpod +++ /dev/null @@ -1,554 +0,0 @@ -#! /bin/bash - -: ${PROG:=$(basename ${BASH_SOURCE})} - -__kpod_list_images() { - COMPREPLY=($(compgen -W "$(kpod images -q)" -- $cur)) -} - -__kpod_list_containers() { - COMPREPLY=($(compgen -W "$(kpod ps -aq)" -- $cur)) -} - -_kpod_diff() { - local options_with_args=" - --format - " - local boolean_options=" - " - _complete_ "$options_with_args" "$boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - *) - __kpod_list_images - ;; - esac -} - -_kpod_export() { - local options_with_args=" - --output - -o - " - local boolean_options=" - " - _complete_ "$options_with_args" "$boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - *) - __kpod_list_images - ;; - esac -} - -_kpod_history() { - local options_with_args=" - --format - " - local boolean_options=" - --human -H - --no-trunc - --quiet -q - " - _complete_ "$options_with_args" "$boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - *) - __kpod_list_images - ;; - esac -} - -_kpod_info() { - local boolean_options=" - --help - -h - --debug - " - local options_with_args=" - --format - " - - local all_options="$options_with_args $boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - *) - __kpod_list_images - ;; - esac -} - -_kpod_images() { - local boolean_options=" - --help - -h - --quiet - -q - --noheading - -n - --no-trunc - --digests - --filter - -f - " - local options_with_args=" - --format - " - - local all_options="$options_with_args $boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - esac -} - -_kpod_inspect() { - local boolean_options=" - --help - -h - " - local options_with_args=" - --format - -f - --type - -t - --size - " - - local all_options="$options_with_args $boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - esac -} -_kpod_kill() { - local options_with_args=" - --signal -s - " - local boolean_options=" - --help - -h" - _complete_ "$options_with_args" "$boolean_options" -} - -_kpod_logs() { - local options_with_args=" - --since - --tail - " - local boolean_options=" - --follow - -f - " - _complete_ "$options_with_args" "$boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - *) - __kpod_list_containers - ;; - esac -} - -_kpod_pull() { - local options_with_args=" - " - local boolean_options=" - --all-tags -a - " - _complete_ "$options_with_args" "$boolean_options" -} - -_kpod_unmount() { - _kpod_umount $@ -} - -_kpod_umount() { - local boolean_options=" - --help - -h - " - local options_with_args=" - " - - local all_options="$options_with_args $boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - esac -} - -_kpod_mount() { - local boolean_options=" - --help - -h - --notruncate - " - - local options_with_args=" - --label - --format - " - - local all_options="$options_with_args $boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - esac -} - -_kpod_push() { - local boolean_options=" - --disable-compression - -D - --quiet - -q - --signature-policy - --certs - --tls-verify - --remove-signatures - --sign-by - " - - local options_with_args=" - " - - local all_options="$options_with_args $boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - esac -} - -_kpod_rename() { - local boolean_options=" - --help - -h - " - local options_with_args=" - " - - local all_options="$options_with_args $boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - *) - __kpod_list_containers - ;; - esac -} - -_kpod_rm() { - local boolean_options=" - --force - -f - " - - local options_with_args=" - " - - local all_options="$options_with_args $boolean_options" - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - *) - __kpod_list_containers - ;; - esac -} - -_kpod_rmi() { - local boolean_options=" - --help - -h - --force - -f - " - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - *) - __kpod_list_images - ;; - esac -} - -_kpod_stats() { - local boolean_options=" - --help - --all - -a - --no-stream - --format - " - - case "$cur" in - -*) - COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) - ;; - *) - __kpod_list_containers - ;; - esac -} - -kpod_tag() { - local options_with_args=" - " - local boolean_options=" - " - _complete_ "$options_with_args" "$boolean_options" -} - -_kpod_version() { - local options_with_args=" - " - local boolean_options=" - " - _complete_ "$options_with_args" "$boolean_options" -} - -_kpod_save() { - local options_with_args=" - --output -o - --format - " - local boolean_options=" - --quiet -q - " - _complete_ "$options_with_args" "$boolean_options" -} - -_kpod_export() { - local options_with_args=" - --output -o - " - local boolean_options=" - " - _complete_ "$options_with_args" "$boolean_options" -} - -_kpod_pause() { - local options_with_args=" - --help -h - " - local boolean_options="" - _complete_ "$options_with_args" "$boolean_options" -} - -_kpod_ps() { - local options_with_args=" - --filter -f - --format - --last -n - " - local boolean_options=" - --all -a - --latest -l - --no-trunc - --quiet -q - --size -s - --namespace --ns - " - _complete_ "$options_with_args" "$boolean_options" -} - -_kpod_stop() { - local options_with_args=" - --timeout -t - " - local boolean_options="" - _complete_ "$options_with_args" "$boolean_options" -} - -_kpod_unpause() { - local options_with_args=" - --help -h - " - local boolean_options="" - _complete_ "$options_with_args" "$boolean_options" - -_kpod_wait() { - local options_with_args="" - local boolean_options="--help -h" - _complete_ "$options_with_args" "$boolean_options" -} - -_complete_() { - local options_with_args=$1 - local boolean_options="$2 -h --help" - - case "$prev" in - $options_with_args) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) - ;; - esac -} - -_kpod_load() { - local options_with_args=" - --input -i - " - local boolean_options=" - --quiet -q - " - _complete_ "$options_with_args" "$boolean_options" -} - -_kpod_login() { - local options_with_args=" - --username - -u - --password - -p - --authfile - " - local boolean_options=" - --help - -h - " - _complete_ "$options_with_args" "$boolean_options" -} - -_kpod_logout() { - local options_with_args=" - --authfile - " - local boolean_options=" - --all - -a - --help - -h - " - _complete_ "$options_with_args" "$boolean_options" -} - -_kpod_kpod() { - local options_with_args=" - --config -c - --root - --runroot - --storage-driver - --storage-opt - --log-level - " - local boolean_options=" - --help -h - --version -v - " - commands=" - diff - export - history - images - info - inspect - kill - load - login - logout - logs - mount - pause - ps - pull - push - rename - rm - rmi - save - stats - stop - tag - umount - unmount - unpause - version - wait - " - - case "$prev" in - $main_options_with_args_glob ) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) ) - ;; - esac -} - -_cli_bash_autocomplete() { - local cur opts base - - COMPREPLY=() - cur="${COMP_WORDS[COMP_CWORD]}" - COMPREPLY=() - local cur prev words cword - - _get_comp_words_by_ref -n : cur prev words cword - - local command=${PROG} cpos=0 - local counter=1 - counter=1 - while [ $counter -lt $cword ]; do - case "!${words[$counter]}" in - *) - command=$(echo "${words[$counter]}" | sed 's/-/_/g') - cpos=$counter - (( cpos++ )) - break - ;; - esac - (( counter++ )) - done - - local completions_func=_kpod_${command} - declare -F $completions_func >/dev/null && $completions_func - - eval "$previous_extglob_setting" - return 0 -} - -complete -F _cli_bash_autocomplete $PROG diff --git a/conmon/Makefile b/conmon/Makefile index 460c1faa..b75605d9 100644 --- a/conmon/Makefile +++ b/conmon/Makefile @@ -5,8 +5,8 @@ override LIBS += $(shell pkg-config --libs glib-2.0) override CFLAGS += -std=c99 -Os -Wall -Wextra $(shell pkg-config --cflags glib-2.0) conmon: $(obj) - $(CC) -o $@ $^ $(CFLAGS) $(LIBS) + $(CC) -o ../bin/$@ $^ $(CFLAGS) $(LIBS) .PHONY: clean clean: - rm -f $(obj) conmon + rm -f $(obj) ../bin/conmon diff --git a/conmon/conmon.c b/conmon/conmon.c index 05789882..477b98bf 100644 --- a/conmon/conmon.c +++ b/conmon/conmon.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -96,6 +95,8 @@ static inline void strv_cleanup(char ***strv) #define CMD_SIZE 1024 #define MAX_EVENTS 10 +#define DEFAULT_SOCKET_PATH "/var/lib/crio" + static bool opt_terminal = false; static bool opt_stdin = false; static char *opt_cid = NULL; @@ -111,6 +112,7 @@ static char *opt_log_path = NULL; static char *opt_exit_dir = NULL; static int opt_timeout = 0; static int64_t opt_log_size_max = -1; +static char *opt_socket_path = DEFAULT_SOCKET_PATH; static GOptionEntry opt_entries[] = { { "terminal", 't', 0, G_OPTION_ARG_NONE, &opt_terminal, "Terminal", NULL }, @@ -128,6 +130,7 @@ static GOptionEntry opt_entries[] = { "log-path", 'l', 0, G_OPTION_ARG_STRING, &opt_log_path, "Log file path", NULL }, { "timeout", 'T', 0, G_OPTION_ARG_INT, &opt_timeout, "Timeout in seconds", NULL }, { "log-size-max", 0, 0, G_OPTION_ARG_INT64, &opt_log_size_max, "Maximum size of log file", NULL }, + { "socket-dir-path", 0, 0, G_OPTION_ARG_STRING, &opt_socket_path, "Location of container attach sockets", NULL }, { NULL } }; @@ -292,7 +295,6 @@ const char *stdpipe_name(stdpipe_t pipe) static int write_k8s_log(int fd, stdpipe_t pipe, const char *buf, ssize_t buflen) { char tsbuf[TSBUFLEN]; - static stdpipe_t trailing_line = NO_PIPE; writev_buffer_t bufv = {0}; static int64_t bytes_written = 0; int64_t bytes_to_be_written = 0; @@ -309,35 +311,22 @@ static int write_k8s_log(int fd, stdpipe_t pipe, const char *buf, ssize_t buflen while (buflen > 0) { const char *line_end = NULL; ptrdiff_t line_len = 0; - bool insert_newline = FALSE; - bool insert_timestamp = FALSE; + bool partial = FALSE; /* Find the end of the line, or alternatively the end of the buffer. */ line_end = memchr(buf, '\n', buflen); - if (line_end == NULL) + if (line_end == NULL) { line_end = &buf[buflen-1]; + partial = TRUE; + } line_len = line_end - buf + 1; - bytes_to_be_written = line_len; - if (trailing_line != pipe) { - /* - * Write the (timestamp, stream) tuple if there isn't any trailing - * output from the previous line (or if there is trailing output but - * the current buffer being printed is from a different pipe). - */ - insert_timestamp = TRUE; - bytes_to_be_written += (TSBUFLEN - 1); - /* - * If there was a trailing line from a different pipe, prepend a - * newline to split it properly. This technically breaks the flow - * of the previous line (adding a newline in the log where there - * wasn't one output) but without modifying the file in a - * non-append-only way there's not much we can do. - */ - if (trailing_line != NO_PIPE) { - insert_newline = TRUE; - bytes_to_be_written += 1; - } + /* This is line_len bytes + TSBUFLEN - 1 + 2 (- 1 is for ignoring \0). */ + bytes_to_be_written = line_len + TSBUFLEN + 1; + + /* If partial, then we add a \n */ + if (partial) { + bytes_to_be_written += 1; } /* @@ -347,8 +336,6 @@ static int write_k8s_log(int fd, stdpipe_t pipe, const char *buf, ssize_t buflen */ if ((opt_log_size_max > 0) && (bytes_written + bytes_to_be_written) > opt_log_size_max) { ninfo("Creating new log file"); - insert_newline = FALSE; - insert_timestamp = TRUE; bytes_written = 0; /* Close the existing fd */ @@ -362,22 +349,25 @@ static int write_k8s_log(int fd, stdpipe_t pipe, const char *buf, ssize_t buflen /* Open the log path file again */ log_fd = open(opt_log_path, O_WRONLY | O_APPEND | O_CREAT | O_CLOEXEC, 0600); if (log_fd < 0) - pexit("Failed to open log file"); + pexit("Failed to open log file %s: %s", opt_log_path, strerror(errno)); fd = log_fd; } - /* Output a newline */ - if (insert_newline) { - if (writev_buffer_append_segment(fd, &bufv, "\n", -1) < 0) { - nwarn("failed to write newline to log"); - goto next; - } + /* Output the timestamp */ + if (writev_buffer_append_segment(fd, &bufv, tsbuf, -1) < 0) { + nwarn("failed to write (timestamp, stream) to log"); + goto next; } - /* Output a timestamp */ - if (insert_timestamp) { - if (writev_buffer_append_segment(fd, &bufv, tsbuf, -1) < 0) { - nwarn("failed to write (timestamp, stream) to log"); + /* Output log tag for partial or newline */ + if (partial) { + if (writev_buffer_append_segment(fd, &bufv, "P ", -1) < 0) { + nwarn("failed to write partial log tag"); + goto next; + } + } else { + if (writev_buffer_append_segment(fd, &bufv, "F ", -1) < 0) { + nwarn("failed to write end log tag"); goto next; } } @@ -388,11 +378,15 @@ static int write_k8s_log(int fd, stdpipe_t pipe, const char *buf, ssize_t buflen goto next; } + /* Output a newline for partial */ + if (partial) { + if (writev_buffer_append_segment(fd, &bufv, "\n", -1) < 0) { + nwarn("failed to write newline to log"); + goto next; + } + } + bytes_written += bytes_to_be_written; - - /* If we did not output a full line, then we are a trailing_line. */ - trailing_line = (*line_end == '\n') ? NO_PIPE : pipe; - next: /* Update the head of the buffer remaining to output. */ buf += line_len; @@ -989,14 +983,14 @@ static char *setup_attach_socket(void) * Create a symlink so we don't exceed unix domain socket * path length limit. */ - attach_symlink_dir_path = g_build_filename("/var/run/crio", opt_cuuid, NULL); + attach_symlink_dir_path = g_build_filename(opt_socket_path, opt_cuuid, NULL); if (unlink(attach_symlink_dir_path) == -1 && errno != ENOENT) pexit("Failed to remove existing symlink for attach socket directory"); if (symlink(opt_bundle_path, attach_symlink_dir_path) == -1) pexit("Failed to create symlink for attach socket"); - attach_sock_path = g_build_filename("/var/run/crio", opt_cuuid, "attach", NULL); + attach_sock_path = g_build_filename(opt_socket_path, opt_cuuid, "attach", NULL); ninfo("attach sock path: %s", attach_sock_path); strncpy(attach_addr.sun_path, attach_sock_path, sizeof(attach_addr.sun_path) - 1); @@ -1126,6 +1120,8 @@ int main(int argc, char *argv[]) if (opt_runtime_path == NULL) nexit("Runtime path not provided. Use --runtime"); + if (access(opt_runtime_path, X_OK) < 0) + pexit("Runtime path %s is not valid: %s", opt_runtime_path, strerror(errno)); if (!opt_exec && opt_exit_dir == NULL) nexit("Container exit directory not provided. Use --exit-dir"); diff --git a/contrib/rpm/Makefile b/contrib/rpm/Makefile deleted file mode 100644 index 24bbca28..00000000 --- a/contrib/rpm/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -.PHONY: dist -dist: crio.spec - spectool -g crio.spec - -.PHONY: rpm -rpm: dist - rpmbuild --define "_sourcedir `pwd`" --define "_specdir `pwd`" \ - --define "_rpmdir `pwd`" --define "_srcrpmdir `pwd`" -ba crio.spec - -all: rpm - -clean: - rm -f *rpm *gz - rm -rf x86_64 diff --git a/contrib/rpm/crio.spec b/contrib/rpm/crio.spec deleted file mode 100644 index 3485fe37..00000000 --- a/contrib/rpm/crio.spec +++ /dev/null @@ -1,76 +0,0 @@ -%define debug_package %{nil} -%global provider github -%global provider_tld com -%global project kubernetes-incubator -%global repo cri-o -%global Name crio -# https://github.com/kubernetes-incubator/cri-o -%global provider_prefix %{provider}.%{provider_tld}/%{project}/%{repo} -%global import_path %{provider_prefix} -%global commit 8ba639952a95f2e24cc98987689138b67545576c -%global shortcommit %(c=%{commit}; echo ${c:0:7}) - -Name: %{Name} -Version: 0.0.1 -Release: 1.git%{shortcommit}%{?dist} -Summary: Kubelet Container Runtime Interface (CRI) for OCI runtimes. -Group: Applications/Text -License: Apache 2.0 -URL: https://%{provider_prefix} -Source0: https://%{provider_prefix}/archive/%{commit}/%{repo}-%{shortcommit}.tar.gz -Provides: %{repo} - -BuildRequires: golang-github-cpuguy83-go-md2man - -%description -The crio package provides an implementation of the -Kubelet Container Runtime Interface (CRI) using OCI conformant runtimes. - -crio provides following functionalities: - - Support multiple image formats including the existing Docker image format - Support for multiple means to download images including trust & image verification - Container image management (managing image layers, overlay filesystems, etc) - Container process lifecycle management - Monitoring and logging required to satisfy the CRI - Resource isolation as required by the CRI - -%prep -%setup -q -n %{repo}-%{commit} - -%build -make all - -%install -%make_install -%make_install install.systemd - -#define license tag if not already defined -%{!?_licensedir:%global license %doc} -%files -%{_bindir}/crio -%{_bindir}/crioctl -%{_mandir}/man5/crio.conf.5* -%{_mandir}/man8/crio.8* -%{_sysconfdir}/crio.conf -%{_sysconfdir}/seccomp.json -%dir /%{_libexecdir}/crio -/%{_libexecdir}/crio/conmon -/%{_libexecdir}/crio/pause -%{_unitdir}/crio.service -%doc README.md -%license LICENSE -%dir /usr/share/oci-umount/oci-umount.d -/usr/share/oci-umount/oci-umount.d/cri-umount.conf - - -%preun -%systemd_preun %{Name} - -%postun -%systemd_postun_with_restart %{Name} - -%changelog -* Mon Oct 31 2016 Dan Walsh - 0.0.1 -- Initial RPM release - diff --git a/contrib/system_containers/centos/Dockerfile b/contrib/system_containers/centos/Dockerfile new file mode 100644 index 00000000..0797fb14 --- /dev/null +++ b/contrib/system_containers/centos/Dockerfile @@ -0,0 +1,29 @@ +FROM centos + +ENV VERSION=0 RELEASE=1 ARCH=x86_64 +LABEL com.redhat.component="cri-o" \ + name="$FGC/cri-o" \ + version="$VERSION" \ + release="$RELEASE.$DISTTAG" \ + architecture="$ARCH" \ + usage="atomic install --system --system-package=no crio && systemctl start crio" \ + summary="The cri-o daemon as a system container." \ + maintainer="Yu Qi Zhang " \ + atomic.type="system" + +RUN yum-config-manager --nogpgcheck --add-repo https://cbs.centos.org/repos/virt7-container-common-candidate/x86_64/os/ && \ + yum install --disablerepo=extras --nogpgcheck --setopt=tsflags=nodocs -y iptables cri-o socat iproute runc && \ + rpm -V iptables cri-o iproute runc && \ + yum clean all && \ + mkdir -p /exports/hostfs/etc/crio /exports/hostfs/opt/cni/bin/ /exports/hostfs/var/lib/containers/storage/ && \ + cp /etc/crio/* /exports/hostfs/etc/crio && \ + if test -e /usr/libexec/cni; then cp -Lr /usr/libexec/cni/* /exports/hostfs/opt/cni/bin/; fi + +RUN sed -i '/storage_option =/s/.*/&\n"overlay.override_kernel_check=1",/' /exports/hostfs/etc/crio/crio.conf + +COPY manifest.json tmpfiles.template config.json.template service.template /exports/ + +COPY set_mounts.sh / +COPY run.sh /usr/bin/ + +CMD ["/usr/bin/run.sh"] diff --git a/contrib/system_containers/centos/README.md b/contrib/system_containers/centos/README.md new file mode 100644 index 00000000..428bc6ff --- /dev/null +++ b/contrib/system_containers/centos/README.md @@ -0,0 +1,57 @@ +# cri-o + +This is the cri-o daemon as a system container. + +## Building the image from source: + +``` +# git clone https://github.com/projectatomic/atomic-system-containers +# cd atomic-system-containers/cri-o +# docker build -t crio . +``` + +## Running the system container, with the atomic CLI: + +Pull from registry into ostree: + +``` +# atomic pull --storage ostree $REGISTRY/crio +``` + +Or alternatively, pull from local docker: + +``` +# atomic pull --storage ostree docker:crio:latest +``` + +Install the container: + +Currently we recommend using --system-package=no to avoid having rpmbuild create an rpm file +during installation. This flag will tell the atomic CLI to fall back to copying files to the +host instead. + +``` +# atomic install --system --system-package=no --name=crio ($REGISTRY)/crio +``` + +Start as a systemd service: + +``` +# systemctl start crio +``` + +Stopping the service + +``` +# systemctl stop crio +``` + +Removing the container + +``` +# atomic uninstall crio +``` + +## Binary version + +You can find the image automatically built as: registry.centos.org/projectatomic/cri-o:latest diff --git a/contrib/system_containers/centos/cccp.yml b/contrib/system_containers/centos/cccp.yml new file mode 100644 index 00000000..ec4dab74 --- /dev/null +++ b/contrib/system_containers/centos/cccp.yml @@ -0,0 +1,41 @@ +# This is for the purpose of building containers on the CentOS Community Container +# Pipeline. The containers are built, tested and delivered to registry.centos.org and +# lifecycled as well. A corresponding entry must exist in the container index itself, +# located at https://github.com/CentOS/container-index/tree/master/index.d +# You can know more at the following links: +# * https://github.com/CentOS/container-pipeline-service/blob/master/README.md +# * https://github.com/CentOS/container-index/blob/master/README.rst +# * https://wiki.centos.org/ContainerPipeline + +# This will be part of the name of the container. It should match the job-id in index entry +job-id: cri-o + +#the following are optional, can be left blank +#defaults, where applicable are filled in +#nulecule-file : nulecule + +# This flag tells the container pipeline to skip user defined tests on their container +test-skip : True + +# This is path of the script that initiates the user defined tests. It must be able to +# return an exit code. +test-script : null + +# This is the path of custom build script. +build-script : null + +# This is the path of the custom delivery script +delivery-script : null + +# This flag tells the pipeline to deliver this container to docker hub. +docker-index : True + +# This flag can be used to enable or disable the custom delivery +custom-delivery : False + +# This flag can be used to enable or disable delivery of container to local registry +local-delivery : True + +Upstreams : + - ref : + url : diff --git a/contrib/system_containers/centos/config.json.template b/contrib/system_containers/centos/config.json.template new file mode 100644 index 00000000..785383d4 --- /dev/null +++ b/contrib/system_containers/centos/config.json.template @@ -0,0 +1,427 @@ +{ + "ociVersion": "1.0.0", + "platform": { + "arch": "amd64", + "os": "linux" + }, + "process": { + "args": [ + "/usr/bin/run.sh" + ], + "capabilities": { + "ambient": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "bounding": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "effective": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "inheritable": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "permitted": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ] + }, + "selinuxLabel": "system_u:system_r:container_runtime_t:s0", + "cwd": "/", + "env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin:/root/go/bin", + "TERM=xterm", + "LOG_LEVEL=$LOG_LEVEL", + "NAME=$NAME" + ], + "noNewPrivileges": false, + "terminal": false, + "user": { + "gid": 0, + "uid": 0 + } + }, + "root": { + "path": "rootfs", + "readonly": true + }, + "hooks": {}, + "linux": { + "namespaces": [ + { + "type": "mount" + } + ], + "resources": { + "devices": [ + { + "access": "rwm", + "allow": true + } + ] + }, + "rootfsPropagation": "private" + }, + "mounts": [ + { + "destination": "/tmp", + "options": [ + "private", + "bind", + "rw", + "mode=755" + ], + "source": "/tmp", + "type": "bind" + }, + { + "destination": "/etc", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/etc", + "type": "bind" + }, + { + "destination": "/lib/modules", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/lib/modules", + "type": "bind" + }, + { + "destination": "/root", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/root", + "type": "bind" + }, + { + "destination": "/home", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/home", + "type": "bind" + }, + { + "destination": "/mnt", + "options": [ + "rbind", + "rw", + "rprivate", + "mode=755" + ], + "source": "/mnt", + "type": "bind" + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}", + "destination": "/run", + "options": [ + "rshared", + "rbind", + "rw", + "mode=755" + ] + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}/systemd", + "destination": "/run/systemd", + "options": [ + "rslave", + "bind", + "rw", + "mode=755" + ] + }, + { + "destination": "/var/log", + "options": [ + "rbind", + "rslave", + "rw" + ], + "source": "/var/log", + "type": "bind" + }, + { + "destination": "/var/lib", + "options": [ + "rbind", + "rprivate", + "rw" + ], + "source": "${STATE_DIRECTORY}", + "type": "bind" + }, + { + "destination": "/var/lib/containers/storage", + "options": [ + "rbind", + "rshared", + "rw" + ], + "source": "${VAR_LIB_CONTAINERS_STORAGE}", + "type": "bind" + }, + { + "destination": "/var/lib/origin", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_ORIGIN}", + "type": "bind" + }, + { + "destination": "/var/lib/kubelet", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_KUBE}", + "type": "bind" + }, + { + "destination": "/opt/cni", + "options": [ + "rbind", + "rprivate", + "ro", + "mode=755" + ], + "source": "${OPT_CNI}", + "type": "bind" + }, + { + "destination": "/dev", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/dev", + "type": "bind" + }, + { + "destination": "/sys", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/sys", + "type": "bind" + }, + { + "destination": "/proc", + "options": [ + "rbind", + "rw", + "mode=755" + ], + "source": "/proc", + "type": "proc" + } + ] +} diff --git a/contrib/system_containers/centos/manifest.json b/contrib/system_containers/centos/manifest.json new file mode 100644 index 00000000..38f4dc87 --- /dev/null +++ b/contrib/system_containers/centos/manifest.json @@ -0,0 +1,10 @@ +{ + "version": "1.0", + "defaultValues": { + "LOG_LEVEL" : "info", + "OPT_CNI" : "/opt/cni", + "VAR_LIB_CONTAINERS_STORAGE" : "/var/lib/containers/storage", + "VAR_LIB_ORIGIN" : "/var/lib/origin", + "VAR_LIB_KUBE" : "/var/lib/kubelet" + } +} diff --git a/contrib/system_containers/centos/run.sh b/contrib/system_containers/centos/run.sh new file mode 100755 index 00000000..7f34fd42 --- /dev/null +++ b/contrib/system_containers/centos/run.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +# Ensure that new process maintain this SELinux label +PID=$$ +LABEL=`tr -d '\000' < /proc/$PID/attr/current` +printf %s $LABEL > /proc/self/attr/exec + +test -e /etc/sysconfig/crio-storage && source /etc/sysconfig/crio-storage +test -e /etc/sysconfig/crio-network && source /etc/sysconfig/crio-network + +exec /usr/bin/crio --log-level=$LOG_LEVEL diff --git a/contrib/system_containers/centos/service.template b/contrib/system_containers/centos/service.template new file mode 100644 index 00000000..4c08b39d --- /dev/null +++ b/contrib/system_containers/centos/service.template @@ -0,0 +1,20 @@ +[Unit] +Description=crio daemon +After=network.target + +[Service] +Type=notify +ExecStartPre=/bin/sh $DESTDIR/rootfs/set_mounts.sh +ExecStart=$EXEC_START +ExecStop=$EXEC_STOP +Restart=on-failure +WorkingDirectory=$DESTDIR +RuntimeDirectory=${NAME} +TasksMax=infinity +LimitNOFILE=1048576 +LimitNPROC=1048576 +LimitCORE=infinity +TimeoutStartSec=0 + +[Install] +WantedBy=multi-user.target diff --git a/contrib/system_containers/centos/set_mounts.sh b/contrib/system_containers/centos/set_mounts.sh new file mode 100755 index 00000000..c1f0c050 --- /dev/null +++ b/contrib/system_containers/centos/set_mounts.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +findmnt /var/lib/containers/storage > /dev/null || mount --rbind --make-shared /var/lib/containers/storage /var/lib/containers/storage +findmnt /var/lib/origin > /dev/null || mount --bind --make-shared /var/lib/origin /var/lib/origin +findmnt /var/lib/kubelet > /dev/null || mount --bind --make-shared /var/lib/kubelet /var/lib/kubelet +mount --make-shared /run +findmnt /run/systemd > /dev/null || mount --bind --make-rslave /run/systemd /run/systemd diff --git a/contrib/system_containers/centos/tmpfiles.template b/contrib/system_containers/centos/tmpfiles.template new file mode 100644 index 00000000..94472677 --- /dev/null +++ b/contrib/system_containers/centos/tmpfiles.template @@ -0,0 +1,5 @@ +d ${RUN_DIRECTORY}/crio - - - - - +d /etc/crio - - - - - +Z /etc/crio - - - - - +d ${STATE_DIRECTORY}/origin - - - - - +d ${STATE_DIRECTORY}/kubelet - - - - - diff --git a/contrib/system_containers/fedora/Dockerfile b/contrib/system_containers/fedora/Dockerfile new file mode 100644 index 00000000..da12c6f0 --- /dev/null +++ b/contrib/system_containers/fedora/Dockerfile @@ -0,0 +1,30 @@ +FROM registry.fedoraproject.org/fedora:27 + +ENV VERSION=0 RELEASE=1 ARCH=x86_64 +LABEL com.redhat.component="cri-o" \ + name="$FGC/cri-o" \ + version="$VERSION" \ + release="$RELEASE.$DISTTAG" \ + architecture="$ARCH" \ + usage="atomic install --system --system-package=no crio && systemctl start crio" \ + summary="The cri-o daemon as a system container." \ + maintainer="Yu Qi Zhang " \ + atomic.type="system" + +COPY README.md / + +RUN dnf install --enablerepo=updates-testing --setopt=tsflags=nodocs -y iptables cri-o socat iproute runc && \ + rpm -V iptables cri-o iproute runc && \ + dnf clean all && \ + mkdir -p /exports/hostfs/etc/crio /exports/hostfs/opt/cni/bin/ /exports/hostfs/var/lib/containers/storage/ && \ + cp /etc/crio/* /exports/hostfs/etc/crio && \ + if test -e /usr/libexec/cni; then cp -Lr /usr/libexec/cni/* /exports/hostfs/opt/cni/bin/; fi + +RUN sed -i '/storage_option =/s/.*/&\n"overlay.override_kernel_check=1",/' /exports/hostfs/etc/crio/crio.conf + +COPY manifest.json tmpfiles.template config.json.template service.template /exports/ + +COPY set_mounts.sh / +COPY run.sh /usr/bin/ + +CMD ["/usr/bin/run.sh"] diff --git a/contrib/system_containers/fedora/README.md b/contrib/system_containers/fedora/README.md new file mode 100644 index 00000000..6de39964 --- /dev/null +++ b/contrib/system_containers/fedora/README.md @@ -0,0 +1,53 @@ +# cri-o + +This is the cri-o daemon as a system container. + +## Building the image from source: + +``` +# git clone https://github.com/projectatomic/atomic-system-containers +# cd atomic-system-containers/cri-o +# docker build -t crio . +``` + +## Running the system container, with the atomic CLI: + +Pull from registry into ostree: + +``` +# atomic pull --storage ostree $REGISTRY/crio +``` + +Or alternatively, pull from local docker: + +``` +# atomic pull --storage ostree docker:crio:latest +``` + +Install the container: + +Currently we recommend using --system-package=no to avoid having rpmbuild create an rpm file +during installation. This flag will tell the atomic CLI to fall back to copying files to the +host instead. + +``` +# atomic install --system --system-package=no --name=crio ($REGISTRY)/crio +``` + +Start as a systemd service: + +``` +# systemctl start crio +``` + +Stopping the service + +``` +# systemctl stop crio +``` + +Removing the container + +``` +# atomic uninstall crio +``` diff --git a/contrib/system_containers/fedora/config.json.template b/contrib/system_containers/fedora/config.json.template new file mode 100644 index 00000000..0642fbc1 --- /dev/null +++ b/contrib/system_containers/fedora/config.json.template @@ -0,0 +1,432 @@ +{ + "ociVersion": "1.0.0", + "platform": { + "arch": "amd64", + "os": "linux" + }, + "process": { + "args": [ + "/usr/bin/run.sh" + ], + "selinuxLabel": "system_u:system_r:container_runtime_t:s0", + "capabilities": { + "ambient": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "bounding": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "effective": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "inheritable": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "permitted": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ] + }, + "cwd": "/", + "env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin:/root/go/bin", + "TERM=xterm", + "LOG_LEVEL=$LOG_LEVEL", + "NAME=$NAME" + ], + "noNewPrivileges": false, + "terminal": false, + "user": { + "gid": 0, + "uid": 0 + } + }, + "root": { + "path": "rootfs", + "readonly": true + }, + "hooks": {}, + "linux": { + "namespaces": [ + { + "type": "mount" + } + ], + "resources": { + "devices": [ + { + "access": "rwm", + "allow": true + } + ] + }, + "rootfsPropagation": "private" + }, + "mounts": [ + { + "destination": "/tmp", + "options": [ + "private", + "bind", + "rw", + "mode=755" + ], + "source": "/tmp", + "type": "bind" + }, + { + "destination": "/etc", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/etc", + "type": "bind" + }, + { + "destination": "/lib/modules", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/lib/modules", + "type": "bind" + }, + { + "destination": "/root", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/root", + "type": "bind" + }, + { + "destination": "/home", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/home", + "type": "bind" + }, + { + "destination": "/mnt", + "options": [ + "rbind", + "rw", + "rprivate", + "mode=755" + ], + "source": "/mnt", + "type": "bind" + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}", + "destination": "/run", + "options": [ + "rshared", + "rbind", + "rw", + "mode=755" + ] + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}/systemd", + "destination": "/run/systemd", + "options": [ + "rslave", + "bind", + "rw", + "mode=755" + ] + }, + { + "destination": "/var/log", + "options": [ + "rbind", + "rslave", + "rw" + ], + "source": "/var/log", + "type": "bind" + }, + { + "destination": "/var/lib", + "options": [ + "rbind", + "rprivate", + "rw" + ], + "source": "${STATE_DIRECTORY}", + "type": "bind" + }, + { + "destination": "/var/lib/containers/storage", + "options": [ + "rbind", + "rshared", + "rw" + ], + "source": "${VAR_LIB_CONTAINERS_STORAGE}", + "type": "bind" + }, + { + "destination": "/var/lib/origin", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_ORIGIN}", + "type": "bind" + }, + { + "destination": "/var/lib/kubelet", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_KUBE}", + "type": "bind" + }, + { + "destination": "/opt/cni", + "options": [ + "rbind", + "rprivate", + "ro", + "mode=755" + ], + "source": "${OPT_CNI}", + "type": "bind" + }, + { + "destination": "/dev", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/dev", + "type": "bind" + }, + { + "destination": "/sys", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/sys", + "type": "bind" + }, + { + "destination": "/proc", + "options": [ + "rbind", + "rw", + "mode=755" + ], + "source": "/proc", + "type": "proc" + } + ] +} diff --git a/contrib/system_containers/fedora/manifest.json b/contrib/system_containers/fedora/manifest.json new file mode 100644 index 00000000..38f4dc87 --- /dev/null +++ b/contrib/system_containers/fedora/manifest.json @@ -0,0 +1,10 @@ +{ + "version": "1.0", + "defaultValues": { + "LOG_LEVEL" : "info", + "OPT_CNI" : "/opt/cni", + "VAR_LIB_CONTAINERS_STORAGE" : "/var/lib/containers/storage", + "VAR_LIB_ORIGIN" : "/var/lib/origin", + "VAR_LIB_KUBE" : "/var/lib/kubelet" + } +} diff --git a/contrib/system_containers/fedora/run.sh b/contrib/system_containers/fedora/run.sh new file mode 100755 index 00000000..7f34fd42 --- /dev/null +++ b/contrib/system_containers/fedora/run.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +# Ensure that new process maintain this SELinux label +PID=$$ +LABEL=`tr -d '\000' < /proc/$PID/attr/current` +printf %s $LABEL > /proc/self/attr/exec + +test -e /etc/sysconfig/crio-storage && source /etc/sysconfig/crio-storage +test -e /etc/sysconfig/crio-network && source /etc/sysconfig/crio-network + +exec /usr/bin/crio --log-level=$LOG_LEVEL diff --git a/contrib/system_containers/fedora/service.template b/contrib/system_containers/fedora/service.template new file mode 100644 index 00000000..4c08b39d --- /dev/null +++ b/contrib/system_containers/fedora/service.template @@ -0,0 +1,20 @@ +[Unit] +Description=crio daemon +After=network.target + +[Service] +Type=notify +ExecStartPre=/bin/sh $DESTDIR/rootfs/set_mounts.sh +ExecStart=$EXEC_START +ExecStop=$EXEC_STOP +Restart=on-failure +WorkingDirectory=$DESTDIR +RuntimeDirectory=${NAME} +TasksMax=infinity +LimitNOFILE=1048576 +LimitNPROC=1048576 +LimitCORE=infinity +TimeoutStartSec=0 + +[Install] +WantedBy=multi-user.target diff --git a/contrib/system_containers/fedora/set_mounts.sh b/contrib/system_containers/fedora/set_mounts.sh new file mode 100755 index 00000000..c1f0c050 --- /dev/null +++ b/contrib/system_containers/fedora/set_mounts.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +findmnt /var/lib/containers/storage > /dev/null || mount --rbind --make-shared /var/lib/containers/storage /var/lib/containers/storage +findmnt /var/lib/origin > /dev/null || mount --bind --make-shared /var/lib/origin /var/lib/origin +findmnt /var/lib/kubelet > /dev/null || mount --bind --make-shared /var/lib/kubelet /var/lib/kubelet +mount --make-shared /run +findmnt /run/systemd > /dev/null || mount --bind --make-rslave /run/systemd /run/systemd diff --git a/contrib/system_containers/fedora/tmpfiles.template b/contrib/system_containers/fedora/tmpfiles.template new file mode 100644 index 00000000..94472677 --- /dev/null +++ b/contrib/system_containers/fedora/tmpfiles.template @@ -0,0 +1,5 @@ +d ${RUN_DIRECTORY}/crio - - - - - +d /etc/crio - - - - - +Z /etc/crio - - - - - +d ${STATE_DIRECTORY}/origin - - - - - +d ${STATE_DIRECTORY}/kubelet - - - - - diff --git a/contrib/system_containers/rhel/Dockerfile b/contrib/system_containers/rhel/Dockerfile new file mode 100644 index 00000000..3c113fda --- /dev/null +++ b/contrib/system_containers/rhel/Dockerfile @@ -0,0 +1,41 @@ +#oit## This file is managed by the OpenShift Image Tool +#oit## by the OpenShift Continuous Delivery team. +#oit## +#oit## Any yum repos listed in this file will effectively be ignored during CD builds. +#oit## Yum repos must be enabled in the oit configuration files. +#oit## Some aspects of this file may be managed programmatically. For example, the image name, labels (version, +#oit## release, and other), and the base FROM. Changes made directly in distgit may be lost during the next +#oit## reconciliation. +#oit## +FROM rhel7:7-released + +RUN \ + yum install --setopt=tsflags=nodocs -y socat iptables cri-o iproute runc skopeo-containers container-selinux && \ + rpm -V socat iptables cri-o iproute runc skopeo-containers container-selinux && \ + yum clean all && \ + mkdir -p /exports/hostfs/etc/crio /exports/hostfs/opt/cni/bin/ /exports/hostfs/var/lib/containers/storage/ && \ + cp /etc/crio/* /exports/hostfs/etc/crio && \ + if test -e /usr/libexec/cni; then cp -Lr /usr/libexec/cni/* /exports/hostfs/opt/cni/bin/; fi + +COPY manifest.json tmpfiles.template config.json.template service.template /exports/ + +COPY set_mounts.sh / +COPY run.sh /usr/bin/ + +CMD ["/usr/bin/run.sh"] + +LABEL \ + com.redhat.component="cri-o-docker" \ + io.k8s.description="CRI-O is an implementation of the Kubernetes CRI. It is a lightweight, OCI-compliant runtime that is native to kubernetes. CRI-O supports OCI container images and can pull from any container registry." \ + maintainer="Jhon Honce " \ + name="openshift3/cri-o" \ + License="GPLv2+" \ + io.k8s.display-name="CRI-O" \ + summary="OCI-based implementation of Kubernetes Container Runtime Interface" \ + release="0.13.0.0" \ + version="v3.8.0" \ + architecture="x86_64" \ + usage="atomic install --system --system-package=no crio && systemctl start crio" \ + vendor="Red Hat" \ + io.openshift.tags="cri-o system rhel7" \ + atomic.type="system" diff --git a/contrib/system_containers/rhel/config.json.template b/contrib/system_containers/rhel/config.json.template new file mode 100644 index 00000000..a5eb001e --- /dev/null +++ b/contrib/system_containers/rhel/config.json.template @@ -0,0 +1,422 @@ +{ + "ociVersion": "1.0.0", + "platform": { + "arch": "amd64", + "os": "linux" + }, + "process": { + "args": [ + "/usr/bin/run.sh" + ], + "capabilities": { + "ambient": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "bounding": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "effective": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "inheritable": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "permitted": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ] + }, + "selinuxLabel": "system_u:system_r:container_runtime_t:s0", + "cwd": "/", + "env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin:/root/go/bin", + "TERM=xterm", + "LOG_LEVEL=$LOG_LEVEL", + "NAME=$NAME" + ], + "noNewPrivileges": false, + "terminal": false, + "user": { + "gid": 0, + "uid": 0 + } + }, + "root": { + "path": "rootfs", + "readonly": true + }, + "hooks": {}, + "linux": { + "namespaces": [{ + "type": "mount" + }], + "resources": { + "devices": [{ + "access": "rwm", + "allow": true + }] + }, + "rootfsPropagation": "private" + }, + "mounts": [{ + "destination": "/tmp", + "options": [ + "private", + "bind", + "rw", + "mode=755" + ], + "source": "/tmp", + "type": "bind" + }, + { + "destination": "/etc", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/etc", + "type": "bind" + }, + { + "destination": "/lib/modules", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/lib/modules", + "type": "bind" + }, + { + "destination": "/root", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/root", + "type": "bind" + }, + { + "destination": "/home", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/home", + "type": "bind" + }, + { + "destination": "/mnt", + "options": [ + "rbind", + "rw", + "rprivate", + "mode=755" + ], + "source": "/mnt", + "type": "bind" + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}", + "destination": "/run", + "options": [ + "rshared", + "rbind", + "rw", + "mode=755" + ] + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}/systemd", + "destination": "/run/systemd", + "options": [ + "rslave", + "bind", + "rw", + "mode=755" + ] + }, + { + "destination": "/var/log", + "options": [ + "rbind", + "rslave", + "rw" + ], + "source": "/var/log", + "type": "bind" + }, + { + "destination": "/var/lib", + "options": [ + "rbind", + "rprivate", + "rw" + ], + "source": "${STATE_DIRECTORY}", + "type": "bind" + }, + { + "destination": "/var/lib/containers/storage", + "options": [ + "rbind", + "rshared", + "rw" + ], + "source": "${VAR_LIB_CONTAINERS_STORAGE}", + "type": "bind" + }, + { + "destination": "/var/lib/origin", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_ORIGIN}", + "type": "bind" + }, + { + "destination": "/var/lib/kubelet", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_KUBE}", + "type": "bind" + }, + { + "destination": "/opt/cni", + "options": [ + "rbind", + "rprivate", + "ro", + "mode=755" + ], + "source": "${OPT_CNI}", + "type": "bind" + }, + { + "destination": "/dev", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/dev", + "type": "bind" + }, + { + "destination": "/sys", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/sys", + "type": "bind" + }, + { + "destination": "/proc", + "options": [ + "rbind", + "rw", + "mode=755" + ], + "source": "/proc", + "type": "proc" + } + ] +} diff --git a/contrib/system_containers/rhel/help.md b/contrib/system_containers/rhel/help.md new file mode 100644 index 00000000..e46702e7 --- /dev/null +++ b/contrib/system_containers/rhel/help.md @@ -0,0 +1,37 @@ +% CRI-O (1) Container Image Pages +% Jhon Honce +% September 7, 2017 + +# NAME +cri-o - OCI-based implementation of Kubernetes Container Runtime Interface + +# DESCRIPTION +CRI-O is an implementation of the Kubernetes CRI. It is a lightweight, OCI-compliant runtime that is native to kubernetes. CRI-O supports OCI container images and can pull from any container registry. + +You can find more information on the CRI-O project at + +# USAGE +Pull from local docker and install system container: + +``` +# atomic pull --storage ostree docker:openshift3/cri-o:latest +# atomic install --system --system-package=no --name cri-o openshift3/cri-o +``` + +Start and enable as a systemd service: +``` +# systemctl enable --now cri-o +``` + +Stopping the service +``` +# systemctl stop cri-o +``` + +Removing the container +``` +# atomic uninstall cri-o +``` + +# SEE ALSO +man systemd(1) diff --git a/contrib/system_containers/rhel/manifest.json b/contrib/system_containers/rhel/manifest.json new file mode 100644 index 00000000..727abf9e --- /dev/null +++ b/contrib/system_containers/rhel/manifest.json @@ -0,0 +1,10 @@ +{ + "version": "1.0", + "defaultValues": { + "LOG_LEVEL": "info", + "OPT_CNI": "/opt/cni", + "VAR_LIB_CONTAINERS_STORAGE": "/var/lib/containers/storage", + "VAR_LIB_ORIGIN": "/var/lib/origin", + "VAR_LIB_KUBE": "/var/lib/kubelet" + } +} diff --git a/contrib/system_containers/rhel/run.sh b/contrib/system_containers/rhel/run.sh new file mode 100755 index 00000000..7f34fd42 --- /dev/null +++ b/contrib/system_containers/rhel/run.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +# Ensure that new process maintain this SELinux label +PID=$$ +LABEL=`tr -d '\000' < /proc/$PID/attr/current` +printf %s $LABEL > /proc/self/attr/exec + +test -e /etc/sysconfig/crio-storage && source /etc/sysconfig/crio-storage +test -e /etc/sysconfig/crio-network && source /etc/sysconfig/crio-network + +exec /usr/bin/crio --log-level=$LOG_LEVEL diff --git a/contrib/system_containers/rhel/service.template b/contrib/system_containers/rhel/service.template new file mode 100644 index 00000000..4c08b39d --- /dev/null +++ b/contrib/system_containers/rhel/service.template @@ -0,0 +1,20 @@ +[Unit] +Description=crio daemon +After=network.target + +[Service] +Type=notify +ExecStartPre=/bin/sh $DESTDIR/rootfs/set_mounts.sh +ExecStart=$EXEC_START +ExecStop=$EXEC_STOP +Restart=on-failure +WorkingDirectory=$DESTDIR +RuntimeDirectory=${NAME} +TasksMax=infinity +LimitNOFILE=1048576 +LimitNPROC=1048576 +LimitCORE=infinity +TimeoutStartSec=0 + +[Install] +WantedBy=multi-user.target diff --git a/contrib/system_containers/rhel/set_mounts.sh b/contrib/system_containers/rhel/set_mounts.sh new file mode 100755 index 00000000..c1f0c050 --- /dev/null +++ b/contrib/system_containers/rhel/set_mounts.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +findmnt /var/lib/containers/storage > /dev/null || mount --rbind --make-shared /var/lib/containers/storage /var/lib/containers/storage +findmnt /var/lib/origin > /dev/null || mount --bind --make-shared /var/lib/origin /var/lib/origin +findmnt /var/lib/kubelet > /dev/null || mount --bind --make-shared /var/lib/kubelet /var/lib/kubelet +mount --make-shared /run +findmnt /run/systemd > /dev/null || mount --bind --make-rslave /run/systemd /run/systemd diff --git a/contrib/system_containers/rhel/tmpfiles.template b/contrib/system_containers/rhel/tmpfiles.template new file mode 100644 index 00000000..94472677 --- /dev/null +++ b/contrib/system_containers/rhel/tmpfiles.template @@ -0,0 +1,5 @@ +d ${RUN_DIRECTORY}/crio - - - - - +d /etc/crio - - - - - +Z /etc/crio - - - - - +d ${STATE_DIRECTORY}/origin - - - - - +d ${STATE_DIRECTORY}/kubelet - - - - - diff --git a/contrib/systemd/crio.service b/contrib/systemd/crio.service index 70a3d26b..35d6d427 100644 --- a/contrib/systemd/crio.service +++ b/contrib/systemd/crio.service @@ -12,7 +12,7 @@ ExecStart=/usr/local/bin/crio \ $CRIO_STORAGE_OPTIONS \ $CRIO_NETWORK_OPTIONS ExecReload=/bin/kill -s HUP $MAINPID -TasksMax=8192 +TasksMax=infinity LimitNOFILE=1048576 LimitNPROC=1048576 LimitCORE=infinity diff --git a/contrib/test/integration/build/cri-tools.yml b/contrib/test/integration/build/cri-tools.yml index e314225e..3d30824f 100644 --- a/contrib/test/integration/build/cri-tools.yml +++ b/contrib/test/integration/build/cri-tools.yml @@ -4,13 +4,23 @@ git: repo: "https://github.com/kubernetes-incubator/cri-tools.git" dest: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-tools" - version: "16e6fe4d7199c5689db4630a9330e6a8a12cecd1" + version: "{{ cri_tools_git_version }}" + force: "{{ force_clone | default(False) | bool}}" - name: install crictl command: "/usr/bin/go install github.com/kubernetes-incubator/cri-tools/cmd/crictl" +- name: install critest + command: "/usr/bin/go install github.com/kubernetes-incubator/cri-tools/cmd/critest" + - name: link crictl file: src: "{{ ansible_env.GOPATH }}/bin/crictl" dest: /usr/bin/crictl state: link + +- name: link critest + file: + src: "{{ ansible_env.GOPATH }}/bin/critest" + dest: /usr/bin/critest + state: link diff --git a/contrib/test/integration/build/kubernetes.yml b/contrib/test/integration/build/kubernetes.yml index 206cba44..63d907f1 100644 --- a/contrib/test/integration/build/kubernetes.yml +++ b/contrib/test/integration/build/kubernetes.yml @@ -2,9 +2,11 @@ - name: clone kubernetes source repo git: - repo: "https://github.com/runcom/kubernetes.git" + repo: "https://github.com/{{ k8s_github_fork }}/kubernetes.git" dest: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes" - version: "cri-o-node-e2e-patched" + # based on kube v1.9.0-alpha.2, update as needed + version: "{{ k8s_git_version }}" + force: "{{ force_clone | default(False) | bool}}" - name: install etcd command: "hack/install-etcd.sh" @@ -38,13 +40,15 @@ export PATH=/usr/local/go/bin:/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/root/bin:{{ ansible_env.GOPATH }}/bin:{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes/third_party/etcd:{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes/_output/local/bin/linux/amd64/ export CONTAINER_RUNTIME=remote export CGROUP_DRIVER=systemd - export CONTAINER_RUNTIME_ENDPOINT='/var/run/crio.sock --runtime-request-timeout=5m' + export CONTAINER_RUNTIME_ENDPOINT='{{ crio_socket }} --runtime-request-timeout=5m' export ALLOW_SECURITY_CONTEXT="," export ALLOW_PRIVILEGED=1 - export DNS_SERVER_IP={{ ansible_eth0.ipv4.address }} - export API_HOST={{ ansible_eth0.ipv4.address }} - export API_HOST_IP={{ ansible_eth0.ipv4.address }} + export DNS_SERVER_IP={{ ansible_default_ipv4.address }} + export API_HOST={{ ansible_default_ipv4.address }} + export API_HOST_IP={{ ansible_default_ipv4.address }} export KUBE_ENABLE_CLUSTER_DNS=true + export ENABLE_HOSTPATH_PROVISIONER=true + export KUBE_ENABLE_CLUSTER_DASHBOARD=true ./hack/local-up-cluster.sh mode: "u=rwx,g=rwx,o=x" diff --git a/contrib/test/integration/build/runc.yml b/contrib/test/integration/build/runc.yml index 8ec09c4c..f3221f4a 100644 --- a/contrib/test/integration/build/runc.yml +++ b/contrib/test/integration/build/runc.yml @@ -4,6 +4,7 @@ git: repo: "https://github.com/opencontainers/runc.git" dest: "{{ ansible_env.GOPATH }}/src/github.com/opencontainers/runc" + version: "c6e4a1ebeb1a72b529c6f1b6ee2b1ae5b868b14f" - name: build runc make: diff --git a/contrib/test/integration/critest.yml b/contrib/test/integration/critest.yml new file mode 100644 index 00000000..377ab59d --- /dev/null +++ b/contrib/test/integration/critest.yml @@ -0,0 +1,45 @@ +--- + +- name: enable and start CRI-O + systemd: + name: crio + state: started + enabled: yes + daemon_reload: yes + +- name: Flush the iptables + command: iptables -F + +- name: Enable localnet routing + command: sysctl -w net.ipv4.conf.all.route_localnet=1 + +- name: Add masquerade for localhost + command: iptables -t nat -I POSTROUTING -s 127.0.0.1 ! -d 127.0.0.1 -j MASQUERADE + +- name: run critest validation + shell: "critest -c --runtime-endpoint /var/run/crio/crio.sock --image-endpoint /var/run/crio/crio.sock v" + args: + chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o" + async: 5400 + poll: 30 + when: ansible_distribution not in ['RedHat', 'CentOS'] + + # XXX: RHEL has an additional test which fails because of selinux but disabling + # it doesn't solve the issue. + # TODO(runcom): enable skipped tests once we fix them (selinux) + # https://bugzilla.redhat.com/show_bug.cgi?id=1414236 + # https://access.redhat.com/solutions/2897781 +- name: run critest validation + shell: "critest -c --runtime-endpoint /var/run/crio/crio.sock --image-endpoint /var/run/crio/crio.sock -s 'should not allow privilege escalation when true' v" + args: + chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o" + async: 5400 + poll: 30 + when: ansible_distribution in ['RedHat', 'CentOS'] + +- name: run critest benchmarks + shell: "critest -c --runtime-endpoint /var/run/crio/crio.sock --image-endpoint /var/run/crio/crio.sock b" + args: + chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o" + async: 5400 + poll: 30 diff --git a/contrib/test/integration/e2e.yml b/contrib/test/integration/e2e.yml index a95b2231..17982d1d 100644 --- a/contrib/test/integration/e2e.yml +++ b/contrib/test/integration/e2e.yml @@ -10,7 +10,7 @@ - name: update the server address for the custom cluster lineinfile: dest: /usr/local/bin/createcluster.sh - line: "export {{ item }}={{ ansible_eth0.ipv4.address }}" + line: "export {{ item }}={{ ansible_default_ipv4.address }}" regexp: "^export {{ item }}=" state: present with_items: @@ -37,13 +37,14 @@ path: "{{ artifacts }}" state: directory +# TODO remove the last test skipped once https://github.com/kubernetes-incubator/cri-o/pull/1217 is merged - name: Buffer the e2e testing command to workaround Ansible YAML folding "feature" set_fact: e2e_shell_cmd: > /usr/bin/go run hack/e2e.go --test - -test_args="-host=https://{{ ansible_default_ipv4.address }}:6443 - --ginkgo.focus=\[Conformance\] + --test_args="-host=https://{{ ansible_default_ipv4.address }}:6443 + --ginkgo.skip=\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|PersistentVolumes|\[HPA\]|should.support.building.a.client.with.a.CSR|should.support.inline.execution.and.attach --report-dir={{ artifacts }}" &> {{ artifacts }}/e2e.log # Fix vim syntax hilighting: " diff --git a/contrib/test/integration/golang.yml b/contrib/test/integration/golang.yml index 63e55697..38f3d78e 100644 --- a/contrib/test/integration/golang.yml +++ b/contrib/test/integration/golang.yml @@ -1,9 +1,14 @@ --- +- name: ensure Golang dir is empty first + file: + path: /usr/local/go + state: absent + - name: fetch Golang unarchive: remote_src: yes - src: https://storage.googleapis.com/golang/go1.8.4.linux-amd64.tar.gz + src: "https://storage.googleapis.com/golang/go{{ version }}.linux-amd64.tar.gz" dest: /usr/local - name: link go toolchain @@ -47,5 +52,4 @@ - onsi/gomega - cloudflare/cfssl/cmd/... - jteeuwen/go-bindata/go-bindata - - vbatts/git-validation - cpuguy83/go-md2man diff --git a/contrib/test/integration/main.yml b/contrib/test/integration/main.yml index ce4a206f..1f6448f5 100644 --- a/contrib/test/integration/main.yml +++ b/contrib/test/integration/main.yml @@ -10,15 +10,23 @@ - name: install Golang tools include: golang.yml + vars: + version: "1.8.5" - name: clone build and install bats include: "build/bats.yml" - name: clone build and install cri-tools include: "build/cri-tools.yml" + vars: + cri_tools_git_version: "b42fc3f364dd48f649d55926c34492beeb9b2e99" - name: clone build and install kubernetes include: "build/kubernetes.yml" + vars: + k8s_git_version: "cri-o-node-e2e-patched-logs" + k8s_github_fork: "runcom" + crio_socket: "/var/run/crio.sock" - name: clone build and install runc include: "build/runc.yml" @@ -33,6 +41,8 @@ tags: - integration - e2e + - node-e2e + - critest tasks: - name: clone build and install cri-o include: "build/cri-o.yml" @@ -44,9 +54,54 @@ tags: - integration tasks: + - name: clone build and install cri-tools + include: "build/cri-tools.yml" + vars: + force_clone: True + cri_tools_git_version: "a9e38a4a000bc1a4052fb33de1c967b8cfe9ad40" - name: run cri-o integration tests include: test.yml +- hosts: all + remote_user: root + vars_files: + - "{{ playbook_dir }}/vars.yml" + tags: + - critest + tasks: + - name: install Golang tools + include: golang.yml + vars: + version: "1.9.2" + - name: setup critest + include: "build/cri-tools.yml" + vars: + force_clone: True + cri_tools_git_version: "a9e38a4a000bc1a4052fb33de1c967b8cfe9ad40" + - name: run critest validation and benchmarks + include: critest.yml + +- hosts: all + remote_user: root + vars_files: + - "{{ playbook_dir }}/vars.yml" + tags: + - node-e2e + tasks: + - name: install Golang tools + include: golang.yml + vars: + version: "1.9.2" + - name: clone build and install kubernetes + include: "build/kubernetes.yml" + vars: + force_clone: True + k8s_git_version: "master" + k8s_github_fork: "kubernetes" + crio_socket: "/var/run/crio/crio.sock" + - name: run k8s node-e2e tests + include: node-e2e.yml + - hosts: all remote_user: root vars_files: @@ -54,5 +109,17 @@ tags: - e2e tasks: + - name: install Golang tools + include: golang.yml + vars: + version: "1.9.2" + - name: clone build and install kubernetes + include: "build/kubernetes.yml" + vars: + force_clone: True + # master as of 12/11/2017 + k8s_git_version: "master-nfs-fix" + k8s_github_fork: "runcom" + crio_socket: "/var/run/crio/crio.sock" - name: run k8s e2e tests include: e2e.yml diff --git a/contrib/test/integration/node-e2e.yml b/contrib/test/integration/node-e2e.yml new file mode 100644 index 00000000..6ea8ac2c --- /dev/null +++ b/contrib/test/integration/node-e2e.yml @@ -0,0 +1,26 @@ +--- + +- name: enable and start CRI-O + systemd: + name: crio + state: started + enabled: yes + daemon_reload: yes + +- name: disable SELinux + command: setenforce 0 + +- name: Flush the iptables + command: iptables -F + +- name: run node-e2e tests + shell: | + # parametrize crio socket + # cgroup-driver??? + # TODO(runcom): remove conformance focus, we want everything for testgrid + make test-e2e-node PARALLELISM=1 RUNTIME=remote CONTAINER_RUNTIME_ENDPOINT=/var/run/crio.sock IMAGE_SERVICE_ENDPOINT=/var/run/crio/crio.sock TEST_ARGS='--prepull-images=true --kubelet-flags="--cgroup-driver=systemd"' FOCUS="\[Conformance\]" &> {{ artifacts }}/node-e2e.log + args: + chdir: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes" + async: 7200 + poll: 10 + ignore_errors: true diff --git a/contrib/test/integration/system.yml b/contrib/test/integration/system.yml index 6adc4f2a..453551fa 100644 --- a/contrib/test/integration/system.yml +++ b/contrib/test/integration/system.yml @@ -5,6 +5,7 @@ name: "{{ item }}" state: present with_items: + - atomic-registries - container-selinux - curl - device-mapper-devel @@ -41,9 +42,9 @@ - ostree-devel - pkgconfig - python - - python2-boto - python2-crypto - python-devel + - python-rhsm-certificates - python-virtualenv - PyYAML - redhat-rpm-config @@ -57,6 +58,22 @@ async: 600 poll: 10 +- name: Add python2-boto for Fedora + package: + name: "{{ item }}" + state: present + with_items: + - python2-boto + when: ansible_distribution in ['Fedora'] + +- name: Add python-boto for RHEL and CentOS + package: + name: "{{ item }}" + state: present + with_items: + - python-boto + when: ansible_distribution in ['RedHat', 'CentOS'] + - name: Add Btrfs for Fedora package: name: "{{ item }}" @@ -106,6 +123,12 @@ - name: Flush the iptables command: iptables -F +- name: Enable localnet routing + command: sysctl -w net.ipv4.conf.all.route_localnet=1 + +- name: Add masquerade for localhost + command: iptables -t nat -I POSTROUTING -s 127.0.0.1 ! -d 127.0.0.1 -j MASQUERADE + - name: Update the kernel cmdline to include quota support command: grubby --update-kernel=ALL --args="rootflags=pquota" - when: ansible_distribution in ['RedHat', 'CentOS'] \ No newline at end of file + when: ansible_distribution in ['RedHat', 'CentOS'] diff --git a/contrib/test/integration/test.yml b/contrib/test/integration/test.yml index 45c394ed..418ceff7 100644 --- a/contrib/test/integration/test.yml +++ b/contrib/test/integration/test.yml @@ -18,7 +18,7 @@ state: directory - name: run integration tests - shell: "CGROUP_MANAGER=cgroupfs STORAGE_OPTS='--storage-driver=overlay{{ extra_storage_opts | default('') }}' make localintegration >& {{ artifacts }}/testout.txt" + shell: "CGROUP_MANAGER=cgroupfs STORAGE_OPTIONS='--storage-driver=overlay{{ extra_storage_opts | default('') }}' make localintegration >& {{ artifacts }}/testout.txt" args: chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o" async: 5400 diff --git a/crictl.yaml b/crictl.yaml new file mode 100644 index 00000000..b6142efd --- /dev/null +++ b/crictl.yaml @@ -0,0 +1 @@ +runtime-endpoint: /var/run/crio/crio.sock diff --git a/docs/crio.8.md b/docs/crio.8.md index 082f6721..8408978b 100644 --- a/docs/crio.8.md +++ b/docs/crio.8.md @@ -5,166 +5,131 @@ crio - OCI Kubernetes Container Runtime daemon # SYNOPSIS -**crio** -[**--apparmor-profile**=[*value*]] -[**--cgroup-manager**=[*value*]] -[**--cni-config-dir**=[*value*]] -[**--cni-plugin-dir**=[*value*]] -[**--config**=[*value*]] -[**--conmon**=[*value*]] -[**--cpu-profile**=[*value*]] -[**--default-transport**=[*value*]] -[**--help**|**-h**] -[**--insecure-registry**=[*value*]] -[**--listen**=[*value*]] -[**--log**=[*value*]] -[**--log-format value**] -[**--log-level value**] -[**--pause-command**=[*value*]] -[**--pause-image**=[*value*]] -[**--registry**=[*value*]] -[**--root**=[*value*]] -[**--runroot**=[*value*]] -[**--runtime**=[*value*]] -[**--seccomp-profile**=[*value*]] -[**--selinux**] -[**--signature-policy**=[*value*]] -[**--storage-driver**=[*value*]] -[**--storage-opt**=[*value*]] -[**--version**|**-v**] - +crio +``` +[--apparmor-profile=[value]] +[--cgroup-manager=[value]] +[--cni-config-dir=[value]] +[--cni-plugin-dir=[value]] +[--config=[value]] +[--conmon=[value]] +[--cpu-profile=[value]] +[--default-transport=[value]] +[--help|-h] +[--insecure-registry=[value]] +[--listen=[value]] +[--log=[value]] +[--log-format value] +[--log-level value] +[--pause-command=[value]] +[--pause-image=[value]] +[--registry=[value]] +[--root=[value]] +[--runroot=[value]] +[--runtime=[value]] +[--seccomp-profile=[value]] +[--selinux] +[--signature-policy=[value]] +[--storage-driver=[value]] +[--storage-opt=[value]] +[--version|-v] +``` # DESCRIPTION OCI-based implementation of Kubernetes Container Runtime Interface Daemon crio is meant to provide an integration path between OCI conformant runtimes and the kubelet. Specifically, it implements the Kubelet Container Runtime Interface (CRI) using OCI conformant runtimes. The scope of crio is tied to the scope of the CRI. - * Support multiple image formats including the existing Docker image format - * Support for multiple means to download images including trust & image verification - * Container image management (managing image layers, overlay filesystems, etc) - * Container process lifecycle management - * Monitoring and logging required to satisfy the CRI - * Resource isolation as required by the CRI - -**crio [GLOBAL OPTIONS]** - -**crio [GLOBAL OPTIONS] config [OPTIONS]** +1. Support multiple image formats including the existing Docker image format. +2. Support for multiple means to download images including trust & image verification. +3. Container image management (managing image layers, overlay filesystems, etc). +4. Container process lifecycle management. +5. Monitoring and logging required to satisfy the CRI. +6. Resource isolation as required by the CRI. +**Usage**: +``` +crio [GLOBAL OPTIONS] +crio [GLOBAL OPTIONS] config [OPTIONS] +``` # GLOBAL OPTIONS +**--apparmor_profile**="": Name of the apparmor profile to be used as the runtime's default (default: "crio-default") -**--apparmor_profile**="" - Name of the apparmor profile to be used as the runtime's default (default: "crio-default") +**--cgroup-manager**="": cgroup manager (cgroupfs or systemd) -**--cgroup-manager**="" - cgroup manager (cgroupfs or systemd) +**--config**="": path to configuration file -**--config**="" - path to configuration file +**--conmon**="": path to the conmon executable (default: "/usr/local/libexec/crio/conmon") -**--conmon**="" - path to the conmon executable (default: "/usr/local/libexec/crio/conmon") +**--cpu-profile**="": set the CPU profile file path -**--cpu-profile**="" -set the CPU profile file path +**--default-transport**: A prefix to prepend to image names that can't be pulled as-is. -**--default-transport** - A prefix to prepend to image names that can't be pulled as-is. +**--help, -h**: Print usage statement -**--help, -h** - Print usage statement +**--insecure-registry=**: Enable insecure registry communication, i.e., enable un-encrypted and/or untrusted communication. -**--insecure-registry=** - Enable insecure registry communication, i.e., enable un-encrypted - and/or untrusted communication. +1. List of insecure registries can contain an element with CIDR notation to specify a whole subnet. +2. Insecure registries accept HTTP or accept HTTPS with certificates from unknown CAs. +3. Enabling `--insecure-registry` is useful when running a local registry. However, because its use creates security vulnerabilities, **it should ONLY be enabled for testing purposes**. For increased security, users should add their CA to their system's list of trusted CAs instead of using `--insecure-registry`. - List of insecure registries can contain an element with CIDR notation - to specify a whole subnet. Insecure registries accept HTTP and/or - accept HTTPS with certificates from unknown CAs. +**--image-volumes**="": Image volume handling ('mkdir', 'bind' or 'ignore') (default: "mkdir") - Enabling --insecure-registry is useful when running a local registry. - However, because its use creates security vulnerabilities it should - ONLY be enabled for testing purposes. For increased security, users - should add their CA to their system's list of trusted CAs instead of - using --insecure-registry. +1. mkdir: A directory is created inside the container root filesystem for the volumes. +2. bind: A directory is created inside container state directory and bind mounted into the container for the volumes. +3. ignore: All volumes are just ignored and no action is taken. -**--image-volumes**="" - Image volume handling ('mkdir', 'bind' or 'ignore') (default: "mkdir") - mkdir: A directory is created inside the container root filesystem for the volumes. - bind: A directory is created inside container state directory and bind mounted into - the container for the volumes. - ignore: All volumes are just ignored and no action is taken. +**--listen**="": Path to CRI-O socket (default: "/var/run/crio/crio.sock") -**--listen**="" - Path to crio socket (default: "/var/run/crio.sock") +**--log**="": Set the log file path where internal debug information is written -**--log**="" - Set the log file path where internal debug information is written +**--log-format**="": Set the format used by logs ('text' (default), or 'json') (default: "text") -**--log-format**="" - Set the format used by logs ('text' (default), or 'json') (default: "text") +**--log-level**="": log crio messages above specified level: debug, info (default), warn, error, fatal or panic -**--log-level**="" - log CRI-O messages above specified level: debug, info (default), warn, error, fatal or panic +**--log-size-max**="": Maximum log size in bytes for a container (default: -1 (no limit)). If it is positive, it must be >= 8192 (to match/exceed conmon read buffer). -**--log-size-max**="" - Maximum log size in bytes for a container (default: -1 (no limit)). - If it is positive, it must be >= 8192 (to match/exceed conmon read buffer). +**--pause-command**="": Path to the pause executable in the pause image (default: "/pause") -**--pause-command**="" - Path to the pause executable in the pause image (default: "/pause") +**--pause-image**="": Image which contains the pause executable (default: "kubernetes/pause") -**--pause-image**="" - Image which contains the pause executable (default: "kubernetes/pause") +**--pids-limit**="": Maximum number of processes allowed in a container (default: 1024) -**--pids-limit**="" - Maximum number of processes allowed in a container (default: 1024) +**--enable-shared-pid-namespace**="": Enable using a shared PID namespace for containers in a pod (default: false) -**--root**="" - CRIO root dir (default: "/var/lib/containers/storage") +**--root**="": The crio root dir (default: "/var/lib/containers/storage") -**--registry**="" - Registry host which will be prepended to unqualified images, can be specified multiple times +**--registry**="": Registry host which will be prepended to unqualified images, can be specified multiple times -**--runroot**="" - CRIO state dir (default: "/var/run/containers/storage") +**--runroot**="": The crio state dir (default: "/var/run/containers/storage") -**--runtime**="" - OCI runtime path (default: "/usr/bin/runc") +**--runtime**="": OCI runtime path (default: "/usr/bin/runc") -**--selinux**=*true*|*false* - Enable selinux support (default: false) +**--selinux**=**true**|**false**: Enable selinux support (default: false) -**--seccomp-profile**="" - Path to the seccomp json profile to be used as the runtime's default (default: "/etc/crio/seccomp.json") +**--seccomp-profile**="": Path to the seccomp json profile to be used as the runtime's default (default: "/etc/crio/seccomp.json") -**--signature-policy**="" - Path to the signature policy json file (default: "", to use the system-wide default) +**--signature-policy**="": Path to the signature policy json file (default: "", to use the system-wide default) -**--storage-driver** - OCI storage driver (default: "devicemapper") +**--storage-driver**: OCI storage driver (default: "devicemapper") -**--storage-opt** - OCI storage driver option (no default) +**--storage-opt**: OCI storage driver option (no default) -**--cni-config-dir**="" - CNI configuration files directory (default: "/etc/cni/net.d/") +**--cni-config-dir**="": CNI configuration files directory (default: "/etc/cni/net.d/") -**--cni-plugin-dir**="" - CNI plugin binaries directory (default: "/opt/cni/bin/") +**--cni-plugin-dir**="": CNI plugin binaries directory (default: "/opt/cni/bin/") -**--cpu-profile** - Set the CPU profile file path +**--cpu-profile**: Set the CPU profile file path -**--version, -v** - Print the version +**--version, -v**: Print the version # COMMANDS -CRIO's default command is to start the daemon. However, it currently offers a +CRI-O's default command is to start the daemon. However, it currently offers a single additional subcommand. ## config Outputs a commented version of the configuration file that would've been used -by CRIO. This allows you to save you current configuration setup and then load +by CRI-O. This allows you to save you current configuration setup and then load it later with **--config**. Global options will modify the output. **--default** diff --git a/docs/crio.conf.5.md b/docs/crio.conf.5.md index ced28c37..708f26e7 100644 --- a/docs/crio.conf.5.md +++ b/docs/crio.conf.5.md @@ -68,7 +68,7 @@ Example: ## CRIO.API TABLE **listen**="" - Path to crio socket (default: "/var/run/crio.sock") + Path to crio socket (default: "/var/run/crio/crio.sock") ## CRIO.RUNTIME TABLE @@ -87,6 +87,9 @@ Example: **pids_limit**="" Maximum number of processes allowed in a container (default: 1024) +**enable_shared_pid_namespace**="" + Enable using a shared PID namespace for containers in a pod (default: false) + **runtime**="" OCI runtime path (default: "/usr/bin/runc") @@ -105,6 +108,9 @@ Example: **no_pivot**=*true*|*false* Instructs the runtime to not use pivot_root, but instead use MS_MOVE +**default_mounts**=[] + List of mount points, in the form host:container, to be mounted in every container + ## CRIO.IMAGE TABLE **default_transport** diff --git a/docs/kpod-attach.1.md b/docs/kpod-attach.1.md deleted file mode 100644 index 4c2a0218..00000000 --- a/docs/kpod-attach.1.md +++ /dev/null @@ -1,31 +0,0 @@ -% kpod(1) kpod-attach - See the output of pid 1 of a container or enter the container -% Dan Walsh -# kpod-attach "1" "September 2017" "kpod" - -## NAME -kpod-attach - Attach to a running container - -## Description - -We chose not to implement the `attach` feature in `kpod` even though the upstream Docker -project has it. The upstream project has had lots of issues with attaching to running -processes that we did not want to replicate. The `kpod exec` and `kpod log` commands -offer you the same functionality far more dependably. - -**Reasons to attach to the primary PID of a container:** - - -1) Executing commands inside of the container - - We recommend that you use `kpod exec` to execute a command within a container - - `kpod exec CONTAINERID /bin/sh` - -2) Viewing the output stream of the primary process in the container - - We recommend that you use `kpod logs` to see the output from the container - - `kpod logs CONTAINERID` - -## SEE ALSO -kpod(1), kpod-exec(1), kpod-logs(1) diff --git a/docs/kpod-cp.1.md b/docs/kpod-cp.1.md deleted file mode 100644 index eea48e6c..00000000 --- a/docs/kpod-cp.1.md +++ /dev/null @@ -1,46 +0,0 @@ -% kpod(1) kpod-cp - Copy content between container's file system and the host -% Dan Walsh -# kpod-cp "1" "August 2017" "kpod" - -## NAME -kpod-cp - Copy files/folders between a container and the local filesystem. - -## Description -We chose not to implement the `cp` feature in `kpod` even though the upstream Docker -project has it. We have a much stronger capability. Using standard kpod-mount -and kpod-umount, we can take advantage of the entire linux tool chain, rather -then just cp. - -If a user wants to copy contents out of a container or into a container, they -can execute a few simple commands. - -You can copy from the container's file system to the local machine or the -reverse, from the local filesystem to the container. - -If you want to copy the /etc/foobar directory out of a container and onto /tmp -on the host, you could execute the following commands: - - mnt=$(kpod mount CONTAINERID) - cp -R ${mnt}/etc/foobar /tmp - kpod umount CONTAINERID - -If you want to untar a tar ball into a container, you can execute these commands: - - mnt=$(kpod mount CONTAINERID) - tar xf content.tgz -C ${mnt} - kpod umount CONTAINERID - -One last example, if you want to install a package into a container that -does not have dnf installed, you could execute something like: - - mnt=$(kpod mount CONTAINERID) - dnf install --installroot=${mnt} httpd - chroot ${mnt} rm -rf /var/log/dnf /var/cache/dnf - kpod umount CONTAINERID - -This shows that using `kpod mount` and `kpod umount` you can use all of the -standard linux tools for moving files into and out of containers, not just -the cp command. - -## SEE ALSO -kpod(1), kpod-mount(1), kpod-umount(1) diff --git a/docs/kpod-diff.1.md b/docs/kpod-diff.1.md deleted file mode 100644 index 1916780c..00000000 --- a/docs/kpod-diff.1.md +++ /dev/null @@ -1,45 +0,0 @@ -% kpod(1) kpod-diff - Inspect changes on a container or image's filesystem -% Dan Walsh -# kpod-diff "1" "August 2017" "kpod" - -## NAME -kpod diff - Inspect changes on a container or image's filesystem - -## SYNOPSIS -**kpod** **diff** [*options* [...]] NAME - -## DESCRIPTION -Displays changes on a container or image's filesystem. The container or image will be compared to its parent layer - -## OPTIONS - -**--format** - -Alter the output into a different format. The only valid format for diff is `json`. - - -## EXAMPLE - -kpod diff redis:alpine -C /usr -C /usr/local -C /usr/local/bin -A /usr/local/bin/docker-entrypoint.sh - -kpod diff --format json redis:alpine -{ - "changed": [ - "/usr", - "/usr/local", - "/usr/local/bin" - ], - "added": [ - "/usr/local/bin/docker-entrypoint.sh" - ] -} - -## SEE ALSO -kpod(1) - -## HISTORY -August 2017, Originally compiled by Ryan Cole diff --git a/docs/kpod-export.1.md b/docs/kpod-export.1.md deleted file mode 100644 index c344f136..00000000 --- a/docs/kpod-export.1.md +++ /dev/null @@ -1,44 +0,0 @@ -% kpod(1) kpod-export - Simple tool to export a container's filesystem as a tarball -% Urvashi Mohnani -# kpod-export "1" "July 2017" "kpod" - -## NAME -kpod-export - Export container's filesystem contents as a tar archive - -## SYNOPSIS -**kpod export** -**CONTAINER** -[**--output**|**-o**] -[**--help**|**-h**] - -## DESCRIPTION -**kpod export** exports the filesystem of a container and saves it as a tarball -on the local machine. **kpod export** writes to STDOUT by default and can be -redirected to a file using the **output flag**. - -**kpod [GLOBAL OPTIONS]** - -**kpod export [GLOBAL OPTIONS]** - -**kpod export [OPTIONS] CONTAINER** - -## OPTIONS - -**--output, -o** -Write to a file, default is STDOUT - -## EXAMPLES - -``` -# kpod export -o redis-container.tar 883504668ec465463bc0fe7e63d53154ac3b696ea8d7b233748918664ea90e57 -``` - -``` -# kpod export > redis-container.tar 883504668ec465463bc0fe7e63d53154ac3b696ea8d7b233748918664ea90e57 -``` - -## SEE ALSO -kpod(1), kpod-import(1), crio(8), crio.conf(5) - -## HISTORY -August 2017, Originally compiled by Urvashi Mohnani diff --git a/docs/kpod-history.1.md b/docs/kpod-history.1.md deleted file mode 100644 index ae75e376..00000000 --- a/docs/kpod-history.1.md +++ /dev/null @@ -1,106 +0,0 @@ -% kpod(1) kpod-history - Simple tool to view the history of an image -% Urvashi Mohnani -% kpod-history "1" "JULY 2017" "kpod" - -## NAME -kpod-history - Shows the history of an image - -## SYNOPSIS -**kpod history** -**IMAGE[:TAG|DIGEST]** -[**--human**|**-H**] -[**--no-trunc**] -[**--quiet**|**-q**] -[**--format**] -[**--help**|**-h**] - -## DESCRIPTION -**kpod history** displays the history of an image by printing out information -about each layer used in the image. The information printed out for each layer -include Created (time and date), Created By, Size, and Comment. The output can -be truncated or not using the **--no-trunc** flag. If the **--human** flag is -set, the time of creation and size are printed out in a human readable format. -The **--quiet** flag displays the ID of the image only when set and the **--format** -flag is used to print the information using the Go template provided by the user. - -Valid placeholders for the Go template are listed below: - -| **Placeholder** | **Description** | -| --------------- | ----------------------------------------------------------------------------- | -| .ID | Image ID | -| .Created | if **--human**, time elapsed since creation, otherwise time stamp of creation | -| .CreatedBy | Command used to create the layer | -| .Size | Size of layer on disk | -| .Comment | Comment for the layer | - -**kpod [GLOBAL OPTIONS]** - -**kpod history [GLOBAL OPTIONS]** - -**kpod history [OPTIONS] IMAGE[:TAG|DIGEST]** - -## OPTIONS - -**--human, -H** - Display sizes and dates in human readable format - -**--no-trunc** - Do not truncate the output - -**--quiet, -q** - Print the numeric IDs only - -**--format** - Alter the output for a format like 'json' or a Go template. - - -## EXAMPLES - -``` -# kpod history debian -ID CREATED CREATED BY SIZE COMMENT -b676ca55e4f2c 9 weeks ago /bin/sh -c #(nop) CMD ["bash"] 0 B - 9 weeks ago /bin/sh -c #(nop) ADD file:ebba725fb97cea4... 45.14 MB -``` - -``` -# kpod history --no-trunc=true --human=false debian -ID CREATED CREATED BY SIZE COMMENT -b676ca55e4f2c 2017-07-24T16:52:55Z /bin/sh -c #(nop) CMD ["bash"] 0 - 2017-07-24T16:52:54Z /bin/sh -c #(nop) ADD file:ebba725fb97cea4... 45142935 -``` - -``` -# kpod history --format "{{.ID}} {{.Created}}" debian -b676ca55e4f2c 9 weeks ago - 9 weeks ago -``` - -``` -# kpod history --format json debian -[ - { - "id": "b676ca55e4f2c0ce53d0636438c5372d3efeb5ae99b676fa5a5d1581bad46060", - "created": "2017-07-24T16:52:55.195062314Z", - "createdBy": "/bin/sh -c #(nop) CMD [\"bash\"]", - "size": 0, - "comment": "" - }, - { - "id": "b676ca55e4f2c0ce53d0636438c5372d3efeb5ae99b676fa5a5d1581bad46060", - "created": "2017-07-24T16:52:54.898893387Z", - "createdBy": "/bin/sh -c #(nop) ADD file:ebba725fb97cea45d0b1b35ccc8144e766fcfc9a78530465c23b0c4674b14042 in / ", - "size": 45142935, - "comment": "" - } -] -``` - -## history -Show the history of an image - -## SEE ALSO -kpod(1), crio(8), crio.conf(5) - -## HISTORY -July 2017, Originally compiled by Urvashi Mohnani diff --git a/docs/kpod-images.1.md b/docs/kpod-images.1.md deleted file mode 100644 index 96408090..00000000 --- a/docs/kpod-images.1.md +++ /dev/null @@ -1,60 +0,0 @@ -% kpod(1) kpod-images - List images in local storage -% Dan Walsh -# kpod-images "1" "March 2017" "kpod" - -## NAME -kpod images - List images in local storage - -## SYNOPSIS -**kpod** **images** [*options* [...]] - -## DESCRIPTION -Displays locally stored images, their names, and their IDs. - -## OPTIONS - -**--digests** - -Show image digests - -**--filter, -f=[]** - -Filter output based on conditions provided (default []) - -**--format** - -Change the default output format. This can be of a supported type like 'json' -or a Go template. - -**--noheading, -n** - -Omit the table headings from the listing of images. - -**--no-trunc, --notruncate** - -Do not truncate output. - -**--quiet, -q** - -Lists only the image IDs. - - -## EXAMPLE - -kpod images - -kpod images --quiet - -kpod images -q --noheading --notruncate - -kpod images --format json - -kpod images --format "{{.ID}}" - -kpod images --filter dangling=true - -## SEE ALSO -kpod(1) - -## HISTORY -March 2017, Originally compiled by Dan Walsh diff --git a/docs/kpod-info.1.md b/docs/kpod-info.1.md deleted file mode 100644 index 99deae9b..00000000 --- a/docs/kpod-info.1.md +++ /dev/null @@ -1,36 +0,0 @@ -% kpod(1) kpod-version - Simple tool to view version information -% Vincent Batts -% kpod-version "1" "JULY 2017" "kpod" - -## NAME -kpod-info - Display system information - - -## SYNOPSIS -**kpod** **info** [*options* [...]] - - -## DESCRIPTION - -Information display here pertain to the host, current storage stats, and build of kpod. Useful for the user and when reporting issues. - - -## OPTIONS - -**--debug, -D** - -Show additional information - -**--format** - -Change output format to "json" or a Go template. - - -## EXAMPLE - -`kpod info` - -`kpod info --debug --format json| jq .host.kernel` - -## SEE ALSO -crio(8), crio.conf(5) diff --git a/docs/kpod-inspect.1.md b/docs/kpod-inspect.1.md deleted file mode 100644 index 633f1fa4..00000000 --- a/docs/kpod-inspect.1.md +++ /dev/null @@ -1,171 +0,0 @@ -% kpod(1) kpod-inspect - Display a container or image's configuration -% Dan Walsh -# kpod-inspect "1" "July 2017" "kpod" - -## NAME -kpod inspect - Display a container or image's configuration - -## SYNOPSIS -**kpod** **inspect** [*options* [...]] name - -## DESCRIPTION -This displays the low-level information on containers and images identified by name or ID. By default, this will render all results in a JSON array. If the container and image have the same name, this will return container JSON for unspecified type. If a format is specified, the given template will be executed for each result. - -## OPTIONS - -**--type, t="TYPE"** - -Return data on items of the specified type. Type can be 'container', 'image' or 'all' (default: all) - -**--format, -f="FORMAT"** - -Format the output using the given Go template - -**--size** - -Display the total file size if the type is a container - - -## EXAMPLE - -kpod inspect redis:alpine - -{ - "ArgsEscaped": true, - "AttachStderr": false, - "AttachStdin": false, - "AttachStdout": false, - "Cmd": [ - "/bin/sh", - "-c", - "#(nop) ", - "CMD [\"redis-server\"]" - ], - "Domainname": "", - "Entrypoint": [ - "entrypoint.sh" - ], - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "REDIS_VERSION=3.2.9", - "REDIS_DOWNLOAD_URL=http://download.redis.io/releases/redis-3.2.9.tar.gz", - "REDIS_DOWNLOAD_SHA=6eaacfa983b287e440d0839ead20c2231749d5d6b78bbe0e0ffa3a890c59ff26" - ], - "ExposedPorts": { - "6379/tcp": {} - }, - "Hostname": "e1ede117fb1e", - "Image": "sha256:75e877aa15b534396de82d385386cc4dda7819d5cbb018b9f97b77aeb8f4b55a", - "Labels": {}, - "OnBuild": [], - "OpenStdin": false, - "StdinOnce": false, - "Tty": false, - "User": "", - "Volumes": { - "/data": {} - }, - "WorkingDir": "/data" -} -{ - "ID": "b3f2436bdb978c1d33b1387afb5d7ba7e3243ed2ce908db431ac0069da86cb45", - "Names": [ - "docker.io/library/redis:alpine" - ], - "Digests": [ - "sha256:88286f41530e93dffd4b964e1db22ce4939fffa4a4c665dab8591fbab03d4926", - "sha256:07b1ac6c7a5068201d8b63a09bb15358ec1616b813ef3942eb8cc12ae191227f", - "sha256:91e2e140ea27b3e89f359cd9fab4ec45647dda2a8e5fb0c78633217d9dca87b5", - "sha256:08957ceaa2b3be874cde8d7fa15c274300f47185acd62bca812a2ffb6228482d", - "sha256:acd3d12a6a79f772961a771f678c1a39e1f370e7baeb9e606ad8f1b92572f4ab", - "sha256:4ad88df090801e8faa8cf0be1f403b77613d13e11dad73f561461d482f79256c", - "sha256:159ac12c79e1a8d85dfe61afff8c64b96881719139730012a9697f432d6b739a" - ], - "Parent": "", - "Comment": "", - "Created": "2017-06-28T22:14:36.35280993Z", - "Container": "ba8d6c6b0d7fdd201fce404236136b44f3bfdda883466531a3d1a1f87906770b", - "ContainerConfig": { - "Hostname": "e1ede117fb1e", - "Domainname": "", - "User": "", - "AttachStdin": false, - "AttachStdout": false, - "AttachStderr": false, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "REDIS_VERSION=3.2.9", - "REDIS_DOWNLOAD_URL=http://download.redis.io/releases/redis-3.2.9.tar.gz", - "REDIS_DOWNLOAD_SHA=6eaacfa983b287e440d0839ead20c2231749d5d6b78bbe0e0ffa3a890c59ff26" - ], - "Cmd": [ - "/bin/sh", - "-c", - "#(nop) ", - "CMD [\"redis-server\"]" - ], - "ArgsEscaped": true, - "Image": "sha256:75e877aa15b534396de82d385386cc4dda7819d5cbb018b9f97b77aeb8f4b55a", - "Volumes": { - "/data": {} - }, - "WorkingDir": "/data", - "Entrypoint": [ - "entrypoint.sh" - ], - "Labels": {}, - "OnBuild": [] - }, - "Author": "", - "Config": { - "ExposedPorts": { - "6379/tcp": {} - }, - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "REDIS_VERSION=3.2.9", - "REDIS_DOWNLOAD_URL=http://download.redis.io/releases/redis-3.2.9.tar.gz", - "REDIS_DOWNLOAD_SHA=6eaacfa983b287e440d0839ead20c2231749d5d6b78bbe0e0ffa3a890c59ff26" - ], - "Entrypoint": [ - "entrypoint.sh" - ], - "Cmd": [ - "redis-server" - ], - "Volumes": { - "/data": {} - }, - "WorkingDir": "/data" - }, - "Architecture": "amd64", - "OS": "linux", - "Size": 3965955, - "VirtualSize": 19808086, - "GraphDriver": { - "Name": "overlay", - "Data": { - "MergedDir": "/var/lib/containers/storage/overlay/2059d805c90e034cb773d9722232ef018a72143dd31113b470fb876baeccd700/merged", - "UpperDir": "/var/lib/containers/storage/overlay/2059d805c90e034cb773d9722232ef018a72143dd31113b470fb876baeccd700/diff", - "WorkDir": "/var/lib/containers/storage/overlay/2059d805c90e034cb773d9722232ef018a72143dd31113b470fb876baeccd700/work" - } - }, - "RootFS": { - "type": "layers", - "diff_ids": [ - "sha256:5bef08742407efd622d243692b79ba0055383bbce12900324f75e56f589aedb0", - "sha256:c92a8fc997217611d0bfc9ff14d7ec00350ca564aef0ecbf726624561d7872d7", - "sha256:d4c406dea37a107b0cccb845611266a146725598be3e82ba31c55c08d1583b5a", - "sha256:8b4fa064e2b6c03a6c37089b0203f167375a8b49259c0ad7cb47c8c1e58b3fa0", - "sha256:c393e3d0b00ddf6b4166f1e2ad68245e08e9e3be0a0567a36d0a43854f03bfd6", - "sha256:38047b4117cb8bb3bba82991daf9a4e14ba01f9f66c1434d4895a7e96f67d8ba" - ] - } -} - - -## SEE ALSO -kpod(1) diff --git a/docs/kpod-kill.1.md b/docs/kpod-kill.1.md deleted file mode 100644 index 91247d28..00000000 --- a/docs/kpod-kill.1.md +++ /dev/null @@ -1,33 +0,0 @@ -% kpod(1) kpod-kill- Kill one or more containers with a signal -% Brent Baude -# kpod-kill"1" "September 2017" "kpod" - -## NAME -kpod kill - Kills one or more containers with a signal - -## SYNOPSIS -**kpod kill [OPTIONS] CONTAINER [...]** - -## DESCRIPTION -The main process inside each container specified will be sent SIGKILL, or any signal specified with option --signal. - -## OPTIONS - -**--signal, s** - -Signal to send to the container. For more information on Linux signals, refer to *man signal(7)*. - - -## EXAMPLE - -kpod kill mywebserver - -kpod kill 860a4b23 - -kpod kill --signal TERM 860a4b23 - -## SEE ALSO -kpod(1), kpod-stop(1) - -## HISTORY -September 2017, Originally compiled by Brent Baude diff --git a/docs/kpod-load.1.md b/docs/kpod-load.1.md deleted file mode 100644 index 34711c9f..00000000 --- a/docs/kpod-load.1.md +++ /dev/null @@ -1,66 +0,0 @@ -% kpod(1) kpod-load - Simple tool to load an image from an archive to containers-storage -% Urvashi Mohnani -# kpod-load "1" "July 2017" "kpod" - -## NAME -kpod-load - Load an image from docker archive - -## SYNOPSIS -**kpod load** -**NAME[:TAG|@DIGEST]** -[**--input**|**-i**] -[**--quiet**|**-q**] -[**--help**|**-h**] - -## DESCRIPTION -**kpod load** copies an image from either **docker-archive** or **oci-archive** stored -on the local machine. **kpod load** reads from stdin by default or a file if the **input** flag is set. -The **quiet** flag suppresses the output when set. - -**kpod [GLOBAL OPTIONS]** - -**kpod load [GLOBAL OPTIONS]** - -**kpod load [OPTIONS] NAME[:TAG|@DIGEST]** - -## OPTIONS - -**--input, -i** -Read from archive file, default is STDIN - -**--quiet, -q** -Suppress the output - -## EXAMPLES - -``` -# kpod load --quiet -i fedora.tar -``` - -``` -# kpod load < fedora.tar -Getting image source signatures -Copying blob sha256:5bef08742407efd622d243692b79ba0055383bbce12900324f75e56f589aedb0 - 0 B / 4.03 MB [---------------------------------------------------------------] -Copying config sha256:7328f6f8b41890597575cbaadc884e7386ae0acc53b747401ebce5cf0d624560 - 0 B / 1.48 KB [---------------------------------------------------------------] -Writing manifest to image destination -Storing signatures -``` - -``` -# cat fedora.tar | kpod load -Getting image source signatures -Copying blob sha256:5bef08742407efd622d243692b79ba0055383bbce12900324f75e56f589aedb0 - 0 B / 4.03 MB [---------------------------------------------------------------] -Copying config sha256:7328f6f8b41890597575cbaadc884e7386ae0acc53b747401ebce5cf0d624560 - 0 B / 1.48 KB [---------------------------------------------------------------] -Writing manifest to image destination -Storing signatures -``` - -## SEE ALSO -kpod(1), kpod-save(1), crio(8), crio.conf(5) - -## HISTORY -July 2017, Originally compiled by Urvashi Mohnani diff --git a/docs/kpod-login.1.md b/docs/kpod-login.1.md deleted file mode 100644 index 05b3097c..00000000 --- a/docs/kpod-login.1.md +++ /dev/null @@ -1,65 +0,0 @@ -% kpod(1) kpod-login - Simple tool to login to a registry server -% Urvashi Mohnani -# kpod-login "1" "August 2017" "kpod" - -## NAME -kpod-login - Login to a container registry - -## SYNOPSIS -**kpod login** -[**--help**|**-h**] -[**--authfile**] -[**--user**|**-u**] -[**--password**|**-p**] -**REGISTRY** - -## DESCRIPTION -**kpod login** logs into a specified registry server with the correct username -and password. **kpod login** reads in the username and password from STDIN. -The username and password can also be set using the **username** and **password** flags. -The path of the authentication file can be specified by the user by setting the **authfile** -flag. The default path used is **${XDG\_RUNTIME_DIR}/containers/auth.json**. - -**kpod [GLOBAL OPTIONS]** - -**kpod login [GLOBAL OPTIONS]** - -**kpod login [OPTIONS] REGISTRY [GLOBAL OPTIONS]** - -## OPTIONS - -**--password, -p** -Password for registry - -**--username, -u** -Username for registry - -**--authfile** -Path of the authentication file. Default is ${XDG_\RUNTIME\_DIR}/containers/auth.json - -## EXAMPLES - -``` -# kpod login docker.io -Username: umohnani -Password: -Login Succeeded! -``` - -``` -# kpod login -u testuser -p testpassword localhost:5000 -Login Succeeded! -``` - -``` -# kpod login --authfile authdir/myauths.json docker.io -Username: umohnani -Password: -Login Succeeded! -``` - -## SEE ALSO -kpod(1), kpod-logout(1), crio(8), crio.conf(5) - -## HISTORY -August 2017, Originally compiled by Urvashi Mohnani diff --git a/docs/kpod-logout.1.md b/docs/kpod-logout.1.md deleted file mode 100644 index 5f119a18..00000000 --- a/docs/kpod-logout.1.md +++ /dev/null @@ -1,56 +0,0 @@ -% kpod(1) kpod-logout - Simple tool to logout of a registry server -% Urvashi Mohnani -# kpod-logout "1" "August 2017" "kpod" - -## NAME -kpod-logout - Logout of a container registry - -## SYNOPSIS -**kpod logout** -[**--help**|**-h**] -[**--authfile**] -[**--all**|**-a**] -**REGISTRY** - -## DESCRIPTION -**kpod logout** logs out of a specified registry server by deleting the cached credentials -stored in the **auth.json** file. The path of the authentication file can be overrriden by the user by setting the **authfile** flag. -The default path used is **${XDG\_RUNTIME_DIR}/containers/auth.json**. -All the cached credentials can be removed by setting the **all** flag. - -**kpod [GLOBAL OPTIONS]** - -**kpod logout [GLOBAL OPTIONS]** - -**kpod logout [OPTIONS] REGISTRY [GLOBAL OPTIONS]** - -## OPTIONS - -**--authfile** -Path of the authentication file. Default is ${XDG_\RUNTIME\_DIR}/containers/auth.json - -**--all, -a** -Remove the cached credentials for all registries in the auth file - -## EXAMPLES - -``` -# kpod logout docker.io -Remove login credentials for https://registry-1.docker.io/v2/ -``` - -``` -# kpod logout --authfile authdir/myauths.json docker.io -Remove login credentials for https://registry-1.docker.io/v2/ -``` - -``` -# kpod logout --all -Remove login credentials for all registries -``` - -## SEE ALSO -kpod(1), kpod-login(1), crio(8), crio.conf(5) - -## HISTORY -August 2017, Originally compiled by Urvashi Mohnani diff --git a/docs/kpod-logs.1.md b/docs/kpod-logs.1.md deleted file mode 100644 index 25d108ed..00000000 --- a/docs/kpod-logs.1.md +++ /dev/null @@ -1,61 +0,0 @@ -% kpod(1) kpod-logs - Fetch the logs of a container -% Ryan Cole -# kpod-logs "1" "March 2017" "kpod" - -## NAME -kpod logs - Fetch the logs of a container - -## SYNOPSIS -**kpod** **logs** [*options* [...]] container - -## DESCRIPTION -The kpod logs command batch-retrieves whatever logs are present for a container at the time of execution. This does not guarantee execution order when combined with kpod run (i.e. your run may not have generated any logs at the time you execute kpod logs - -## OPTIONS - -**--follow, -f** - -Follow log output. Default is false - -**--since=TIMESTAMP** - -Show logs since TIMESTAMP - -**--tail=LINES** - -Ouput the specified number of LINES at the end of the logs. LINES must be a positive integer. Defaults to 0, which prints all lines - -## EXAMPLE - -kpod logs b3f2436bdb978c1d33b1387afb5d7ba7e3243ed2ce908db431ac0069da86cb45 - -2017/08/07 10:16:21 Seeked /var/log/crio/pods/eb296bd56fab164d4d3cc46e5776b54414af3bf543d138746b25832c816b933b/c49f49788da14f776b7aa93fb97a2a71f9912f4e5a3e30397fca7dfe0ee0367b.log - &{Offset:0 Whence:0} -1:C 07 Aug 14:10:09.055 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo -1:C 07 Aug 14:10:09.055 # Redis version=4.0.1, bits=64, commit=00000000, modified=0, pid=1, just started -1:C 07 Aug 14:10:09.055 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf -1:M 07 Aug 14:10:09.055 # You requested maxclients of 10000 requiring at least 10032 max file descriptors. -1:M 07 Aug 14:10:09.055 # Server can't set maximum open files to 10032 because of OS error: Operation not permitted. -1:M 07 Aug 14:10:09.055 # Current maximum open files is 4096. maxclients has been reduced to 4064 to compensate for low ulimit. If you need higher maxclients increase 'ulimit -n'. -1:M 07 Aug 14:10:09.056 * Running mode=standalone, port=6379. -1:M 07 Aug 14:10:09.056 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128. -1:M 07 Aug 14:10:09.056 # Server initialized - - -kpod logs --tail 2 b3f2436bdb97 - -1:M 07 Aug 14:10:09.056 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128. -1:M 07 Aug 14:10:09.056 # Server initialized - -kpod logs 224c375f27cd --since 2017-08-07T10:10:09.055837383-04:00 myserver - -1:M 07 Aug 14:10:09.055 # Server can't set maximum open files to 10032 because of OS error: Operation not permitted. -1:M 07 Aug 14:10:09.055 # Current maximum open files is 4096. maxclients has been reduced to 4064 to compensate for low ulimit. If you need higher maxclients increase 'ulimit -n'. -1:M 07 Aug 14:10:09.056 * Running mode=standalone, port=6379. -1:M 07 Aug 14:10:09.056 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128. -1:M 07 Aug 14:10:09.056 # Server initialized - -## SEE ALSO -kpod(1) - -## HISTORY -August 2017, Originally compiled by Ryan Cole diff --git a/docs/kpod-mount.1.md b/docs/kpod-mount.1.md deleted file mode 100644 index 25ccd937..00000000 --- a/docs/kpod-mount.1.md +++ /dev/null @@ -1,50 +0,0 @@ -% kpod(1) kpod-mount - Mount a working container's root filesystem. -% Dan Walsh -# kpod-mount "1" "July 2017" "kpod" - -## NAME -kpod mount - Mount a working container's root filesystem - -## SYNOPSIS -**kpod** **mount** - -**kpod** **mount** **containerID** - -## DESCRIPTION -Mounts the specified container's root file system in a location which can be -accessed from the host, and returns its location. - -If you execute the command without any arguments, the tool will list all of the -currently mounted containers. - -## RETURN VALUE -The location of the mounted file system. On error an empty string and errno is -returned. - -## OPTIONS - -**--format** - Print the mounted containers in specified format (json) - -**--notruncate** - -Do not truncate IDs in output. - -**--label** - -SELinux label for the mount point - -## EXAMPLE - -kpod mount c831414b10a3 - -/var/lib/containers/storage/overlay/f3ac502d97b5681989dff84dfedc8354239bcecbdc2692f9a639f4e080a02364/merged - -kpod mount - -c831414b10a3 /var/lib/containers/storage/overlay/f3ac502d97b5681989dff84dfedc8354239bcecbdc2692f9a639f4e080a02364/merged - -a7060253093b /var/lib/containers/storage/overlay/0ff7d7ca68bed1ace424f9df154d2dd7b5a125c19d887f17653cbcd5b6e30ba1/merged - -## SEE ALSO -kpod(1), kpod-umount(1), mount(8) diff --git a/docs/kpod-pause.1.md b/docs/kpod-pause.1.md deleted file mode 100644 index 4a1eee92..00000000 --- a/docs/kpod-pause.1.md +++ /dev/null @@ -1,24 +0,0 @@ -% kpod(1) kpod-pause - Pause one or more containers -% Dan Walsh -# kpod-pause "1" "September 2017" "kpod" - -## NAME -kpod pause - Pause one or more containers - -## SYNOPSIS -**kpod pause [OPTIONS] CONTAINER [...]** - -## DESCRIPTION -Pauses all the processes in one or more containers. You may use container IDs or names as input. - -## EXAMPLE - -kpod pause mywebserver - -kpod pause 860a4b23 - -## SEE ALSO -kpod(1), kpod-unpause(1) - -## HISTORY -September 2017, Originally compiled by Dan Walsh diff --git a/docs/kpod-ps.1.md b/docs/kpod-ps.1.md deleted file mode 100644 index e4285531..00000000 --- a/docs/kpod-ps.1.md +++ /dev/null @@ -1,131 +0,0 @@ -% kpod(1) kpod-ps - Simple tool to list containers -% Urvashi Mohnani -% kpod-ps "1" "AUGUST 2017" "kpod" - -## NAME -kpod-ps - Prints out information about containers - -## SYNOPSIS -**kpod ps** -[**--all**|**-a**] -[**--no-trunc**] -[**--quiet**|**-q**] -[**--fromat**] -[**--help**|**-h**] - -## DESCRIPTION -**kpod ps** lists the running containers on the system. Use the **--all** flag to view -all the containers information. By default it lists: - - * container id - * the name of the image the container is using - * the COMMAND the container is executing - * the time the container was created - * the status of the container - * port mappings the container is using - * alternative names for the container - -**kpod [GLOBAL OPTIONS]** - -**kpod ps [GLOBAL OPTIONS]** - -**kpod ps [OPTIONS]** - -## OPTIONS - -**--all, -a** - Show all the containers, default is only running containers - -**--no-trunc** - Display the extended information - -**--quiet, -q** - Print the numeric IDs of the containers only - -**--format** - Pretty-print containers to JSON or using a Go template - -Valid placeholders for the Go template are listed below: - -| **Placeholder** | **Description** | -| --------------- | ------------------------------------------------ | -| .ID | Container ID | -| .Image | Image ID/Name | -| .Command | Quoted command used | -| .CreatedAt | Creation time for container | -| .RunningFor | Time elapsed since container was started | -| .Status | Status of container | -| .Ports | Exposed ports | -| .Size | Size of container | -| .Names | Name of container | -| .Labels | All the labels assigned to the container | -| .Mounts | Volumes mounted in the container | - - -**--size, -s** - Display the total file size - -**--last, -n** - Print the n last created containers (all states) - -**--latest, -l** - show the latest container created (all states) - -**--namespace, --ns** - Display namespace information - -**--filter, -f** - Filter output based on conditions given - -Valid filters are listed below: - -| **Filter** | **Description** | -| --------------- | ------------------------------------------------------------------- | -| id | [ID] Container's ID | -| name | [Name] Container's name | -| label | [Key] or [Key=Value] Label assigned to a container | -| exited | [Int] Container's exit code | -| status | [Status] Container's status, e.g *running*, *stopped* | -| ancestor | [ImageName] Image or descendant used to create container | -| before | [ID] or [Name] Containers created before this container | -| since | [ID] or [Name] Containers created since this container | -| volume | [VolumeName] or [MountpointDestination] Volume mounted in container | - -## EXAMPLES - -``` -sudo kpod ps -a -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -02f65160e14ca redis:alpine "redis-server" 19 hours ago Exited (-1) 19 hours ago 6379/tcp k8s_podsandbox1-redis_podsandbox1_redhat.test.crio_redhat-test-crio_0 -69ed779d8ef9f redis:alpine "redis-server" 25 hours ago Created 6379/tcp k8s_container1_podsandbox1_redhat.test.crio_redhat-test-crio_1 -``` - -``` -sudo kpod ps -a -s -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE -02f65160e14ca redis:alpine "redis-server" 20 hours ago Exited (-1) 20 hours ago 6379/tcp k8s_podsandbox1-redis_podsandbox1_redhat.test.crio_redhat-test-crio_0 27.49 MB -69ed779d8ef9f redis:alpine "redis-server" 25 hours ago Created 6379/tcp k8s_container1_podsandbox1_redhat.test.crio_redhat-test-crio_1 27.49 MB -``` - -``` -sudo kpod ps -a --format "{{.ID}} {{.Image}} {{.Labels}} {{.Mounts}}" -02f65160e14ca redis:alpine tier=backend proc,tmpfs,devpts,shm,mqueue,sysfs,cgroup,/var/run/,/var/run/ -69ed779d8ef9f redis:alpine batch=no,type=small proc,tmpfs,devpts,shm,mqueue,sysfs,cgroup,/var/run/,/var/run/ -``` - -``` -sudo kpod ps --ns -a -CONTAINER ID NAMES PID CGROUP IPC MNT NET PIDNS USER UTS -3557d882a82e3 k8s_container2_podsandbox1_redhat.test.crio_redhat-test-crio_1 29910 4026531835 4026532585 4026532593 4026532508 4026532595 4026531837 4026532594 -09564cdae0bec k8s_container1_podsandbox1_redhat.test.crio_redhat-test-crio_1 29851 4026531835 4026532585 4026532590 4026532508 4026532592 4026531837 4026532591 -a31ebbee9cee7 k8s_podsandbox1-redis_podsandbox1_redhat.test.crio_redhat-test-crio_0 29717 4026531835 4026532585 4026532587 4026532508 4026532589 4026531837 4026532588 -``` - -## ps -Print a list of containers - -## SEE ALSO -kpod(1), crio(8), crio.conf(5) - -## HISTORY -August 2017, Originally compiled by Urvashi Mohnani diff --git a/docs/kpod-pull.1.md b/docs/kpod-pull.1.md deleted file mode 100644 index 647f6f89..00000000 --- a/docs/kpod-pull.1.md +++ /dev/null @@ -1,59 +0,0 @@ -% kpod(1) kpod-pull - Simple tool to pull an image from a registry -% Urvashi Mohnani -# kpod-pull "1" "July 2017" "kpod" - -## NAME -kpod-pull - Pull an image from a registry - -## SYNOPSIS -**kpod pull** -**NAME[:TAG|@DIGEST]** -[**--help**|**-h**] - -## DESCRIPTION -Copies an image from a registry onto the local machine. **kpod pull** pulls an -image from Docker Hub if a registry is not specified in the command line argument. -If an image tag is not specified, **kpod pull** defaults to the image with the -**latest** tag (if it exists) and pulls it. **kpod pull** can also pull an image -using its digest **kpod pull [image]@[digest]**. **kpod pull** can be used to pull -images from archives and local storage using different transports. - -## imageID -Image stored in local container/storage - -## DESTINATION - - The DESTINATION is a location to store container images - The Image "DESTINATION" uses a "transport":"details" format. - - Multiple transports are supported: - - **dir:**_path_ - An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection. - - **docker://**_docker-reference_ - An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$HOME/.docker/config.json`, which is set e.g. using `(docker login)`. - - **docker-archive:**_path_[**:**_docker-reference_] - An image is stored in the `docker save` formatted file. _docker-reference_ is only used when creating such a file, and it must not contain a digest. - - **docker-daemon:**_docker-reference_ - An image _docker-reference_ stored in the docker daemon internal storage. _docker-reference_ must contain either a tag or a digest. Alternatively, when reading images, the format can also be docker-daemon:algo:digest (an image ID). - - **oci:**_path_**:**_tag_ - An image _tag_ in a directory compliant with "Open Container Image Layout Specification" at _path_. - - **ostree:**_image_[**@**_/absolute/repo/path_] - An image in local OSTree repository. _/absolute/repo/path_ defaults to _/ostree/repo_. - -**kpod [GLOBAL OPTIONS]** - -**kpod pull [GLOBAL OPTIONS]** - -**kpod pull NAME[:TAG|@DIGEST]** - -## SEE ALSO -kpod(1), crio(8), crio.conf(5) - -## HISTORY -July 2017, Originally compiled by Urvashi Mohnani diff --git a/docs/kpod-push.1.md b/docs/kpod-push.1.md deleted file mode 100644 index 6970a172..00000000 --- a/docs/kpod-push.1.md +++ /dev/null @@ -1,98 +0,0 @@ -% kpod(1) kpod-push - Push an image from local storage to elsewhere -% Dan Walsh -# kpod-push "1" "June 2017" "kpod" - -## NAME -kpod push - Push an image from local storage to elsewhere - -## SYNOPSIS -**kpod** **push** [*options* [...]] **imageID** [**destination**] - -## DESCRIPTION -Pushes an image from local storage to a specified destination, decompressing -and recompressing layers as needed. - -## imageID -Image stored in local container/storage - -## DESTINATION - - The DESTINATION is a location to store container images - The Image "DESTINATION" uses a "transport":"details" format. - - Multiple transports are supported: - - **dir:**_path_ - An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection. - - **docker://**_docker-reference_ - An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$HOME/.docker/config.json`, which is set e.g. using `(docker login)`. - - **docker-archive:**_path_[**:**_docker-reference_] - An image is stored in the `docker save` formatted file. _docker-reference_ is only used when creating such a file, and it must not contain a digest. - - **docker-daemon:**_docker-reference_ - An image _docker-reference_ stored in the docker daemon internal storage. _docker-reference_ must contain either a tag or a digest. Alternatively, when reading images, the format can also be docker-daemon:algo:digest (an image ID). - - **oci:**_path_**:**_tag_ - An image _tag_ in a directory compliant with "Open Container Image Layout Specification" at _path_. - - **ostree:**_image_[**@**_/absolute/repo/path_] - An image in local OSTree repository. _/absolute/repo/path_ defaults to _/ostree/repo_. - -## OPTIONS - -**--creds="CREDENTIALS"** - -Credentials (USERNAME:PASSWORD) to use for authenticating to a registry - -**cert-dir="PATHNAME"** - -Pathname of a directory containing TLS certificates and keys - -**--disable-compression, -D** - -Don't compress copies of filesystem layers which will be pushed - -**--quiet, -q** - -When writing the output image, suppress progress output - -**--remove-signatures** - -Discard any pre-existing signatures in the image - -**--signature-policy="PATHNAME"** - -Pathname of a signature policy file to use. It is not recommended that this -option be used, as the default behavior of using the system-wide default policy -(frequently */etc/containers/policy.json*) is most often preferred - -**--sign-by="KEY"** - -Add a signature at the destination using the specified key - -**--tls-verify** - -Require HTTPS and verify certificates when contacting registries (default: true) - -## EXAMPLE - -This example extracts the imageID image to a local directory in docker format. - - `# kpod push imageID dir:/path/to/image` - -This example extracts the imageID image to a local directory in oci format. - - `# kpod push imageID oci:/path/to/layout` - -This example extracts the imageID image to a container registry named registry.example.com - - `# kpod push imageID docker://registry.example.com/repository:tag` - -This example extracts the imageID image and puts into the local docker container store - - `# kpod push imageID docker-daemon:image:tag` - -## SEE ALSO -kpod(1) diff --git a/docs/kpod-rename.1.md b/docs/kpod-rename.1.md deleted file mode 100644 index e59dbd99..00000000 --- a/docs/kpod-rename.1.md +++ /dev/null @@ -1,24 +0,0 @@ -% kpod(1) kpod-rename - Rename a container -% Ryan Cole -# kpod-images "1" "March 2017" "kpod" - -## NAME -kpod rename - Rename a container - -## SYNOPSIS -**kpod** **rename** CONTAINER NEW-NAME - -## DESCRIPTION -Rename a container. Container may be created, running, paused, or stopped - -## EXAMPLE - -kpod rename redis-container webserver - -kpod rename a236b9a4 mycontainer - -## SEE ALSO -kpod(1) - -## HISTORY -March 2017, Originally compiled by Ryan Cole diff --git a/docs/kpod-rm.1.md b/docs/kpod-rm.1.md deleted file mode 100644 index 4e2c490a..00000000 --- a/docs/kpod-rm.1.md +++ /dev/null @@ -1,31 +0,0 @@ -% kpod(1) kpod-rm - Remove one or more containers -% Ryan Cole -# kpod-rm "1" "August 2017" "kpod" - -## NAME -kpod rm - Remove one or more containers - -## SYNOPSIS -**kpod** **rm** [*options* [...]] container - -## DESCRIPTION -Kpod rm will remove one or more containers from the host. The container name or ID can be used. This does not remove images. Running containers will not be removed without the -f option - -## OPTIONS - -**--force, f** - -Force the removal of a running container - - -## EXAMPLE - -kpod rm mywebserver - -kpod rm -f 860a4b23 - -## SEE ALSO -kpod(1), kpod-rmi(1) - -## HISTORY -August 2017, Originally compiled by Ryan Cole diff --git a/docs/kpod-rmi.1.md b/docs/kpod-rmi.1.md deleted file mode 100644 index 1566f961..00000000 --- a/docs/kpod-rmi.1.md +++ /dev/null @@ -1,32 +0,0 @@ -% kpod(1) kpod-rmi - Removes one or more images -% Dan Walsh -# kpod-rmi "1" "March 2017" "kpod" - -## NAME -kpod rmi - Removes one or more images - -## SYNOPSIS -**kpod** **rmi** **imageID [...]** - -## DESCRIPTION -Removes one or more locally stored images. - -## OPTIONS - -**--force, -f** - -Executing this command will stop all containers that are using the image and remove them from the system - -## EXAMPLE - -kpod rmi imageID - -kpod rmi --force imageID - -kpod rmi imageID1 imageID2 imageID3 - -## SEE ALSO -kpod(1) - -## HISTORY -March 2017, Originally compiled by Dan Walsh diff --git a/docs/kpod-save.1.md b/docs/kpod-save.1.md deleted file mode 100644 index dbcf96d0..00000000 --- a/docs/kpod-save.1.md +++ /dev/null @@ -1,60 +0,0 @@ -% kpod(1) kpod-save - Simple tool to save an image to an archive -% Urvashi Mohnani -# kpod-save "1" "July 2017" "kpod" - -## NAME -kpod-save - Save an image to docker-archive or oci-archive - -## SYNOPSIS -**kpod save** -**NAME[:TAG]** -[**--quiet**|**-q**] -[**--format**] -[**--output**|**-o**] -[**--help**|**-h**] - -## DESCRIPTION -**kpod save** saves an image to either **docker-archive** or **oci-archive** -on the local machine, default is **docker-archive**. -**kpod save** writes to STDOUT by default and can be redirected to a file using the **output** flag. -The **quiet** flag suppresses the output when set. - -**kpod [GLOBAL OPTIONS]** - -**kpod save [GLOBAL OPTIONS]** - -**kpod save [OPTIONS] NAME[:TAG]** - -## OPTIONS - -**--output, -o** -Write to a file, default is STDOUT - -**--format** -Save image to **oci-archive** -``` ---format oci-archive -``` - -**--quiet, -q** -Suppress the output - -## EXAMPLES - -``` -# kpod save --quiet -o alpine.tar alpine:2.6 -``` - -``` -# kpod save > alpine-all.tar alpine -``` - -``` -# kpod save -o oci-alpine.tar --format oci-archive alpine -``` - -## SEE ALSO -kpod(1), kpod-load(1), crio(8), crio.conf(5) - -## HISTORY -July 2017, Originally compiled by Urvashi Mohnani diff --git a/docs/kpod-stats.1.md b/docs/kpod-stats.1.md deleted file mode 100644 index 1c1c0b35..00000000 --- a/docs/kpod-stats.1.md +++ /dev/null @@ -1,37 +0,0 @@ -% kpod(1) kpod-stats - Display a live stream of 1 or more containers' resource usage statistics -% Ryan Cole -# kpod-stats "1" "July 2017" "kpod" - -## NAME -kpod-stats - Display a live stream of 1 or more containers' resource usage statistics - -## SYNOPSIS -**kpod** **stats** [*options* [...]] [container] - -## DESCRIPTION -Display a live stream of one or more containers' resource usage statistics - -## OPTIONS - -**--all, -a** - -Show all containers. Only running containers are shown by default - -**--no-stream** - -Disable streaming stats and only pull the first result, default setting is false - -**--format="TEMPLATE"** - -Pretty-print images using a Go template - - -## EXAMPLE - -TODO - -## SEE ALSO -kpod(1) - -## HISTORY -July 2017, Originally compiled by Ryan Cole diff --git a/docs/kpod-stop.1.md b/docs/kpod-stop.1.md deleted file mode 100644 index 52a35815..00000000 --- a/docs/kpod-stop.1.md +++ /dev/null @@ -1,35 +0,0 @@ -% kpod(1) kpod-stop - Stop one or more containers -% Brent Baude -# kpod-stop "1" "September 2017" "kpod" - -## NAME -kpod stop - Stop one or more containers - -## SYNOPSIS -**kpod stop [OPTIONS] CONTAINER [...]** - -## DESCRIPTION -Stops one or more containers. You may use container IDs or names as input. The **--timeout** switch -allows you to specify the number of seconds to wait before forcibly stopping the container after the stop command -is issued to the container. The default is 10 seconds. - -## OPTIONS - -**--timeout, t** - -Timeout to wait before forcibly stopping the container - - -## EXAMPLE - -kpod stop mywebserver - -kpod stop 860a4b23 - -kpod stop --timeout 2 860a4b23 - -## SEE ALSO -kpod(1), kpod-rm(1) - -## HISTORY -September 2018, Originally compiled by Brent Baude diff --git a/docs/kpod-tag.1.md b/docs/kpod-tag.1.md deleted file mode 100644 index b92b2eb1..00000000 --- a/docs/kpod-tag.1.md +++ /dev/null @@ -1,34 +0,0 @@ -% kpod(1) kpod-tag - Add tags to an image -% Ryan Cole -# kpod-tag "1" "July 2017" "kpod" - -## NAME -kpod tag - Add an additional name to a local image - -## SYNOPSIS -**kpod tag** -[**--help**|**-h**] - -## DESCRIPTION -Assigns a new alias to an image in a registry. An alias refers to the entire image name, including the optional **TAG** after the ':' - -**kpod [GLOBAL OPTIONS]** - -**kpod [GLOBAL OPTIONS] tag [OPTIONS]** - -## GLOBAL OPTIONS - -**--help, -h** - Print usage statement - -## EXAMPLES - - kpod tag 0e3bbc2 fedora:latest - - kpod tag httpd myregistryhost:5000/fedora/httpd:v2 - -## SEE ALSO -kpod(1), crio(8), crio.conf(5) - -## HISTORY -July 2017, Originally compiled by Ryan Cole diff --git a/docs/kpod-umount.1.md b/docs/kpod-umount.1.md deleted file mode 100644 index 2ee03356..00000000 --- a/docs/kpod-umount.1.md +++ /dev/null @@ -1,19 +0,0 @@ -% kpod(1) kpod-umount - Unmount a working container's root filesystem. -% Dan Walsh -# kpod-umount "1" "July 2017" "kpod" - -## NAME -kpod umount - Unmount a working container's root file system - -## SYNOPSIS -**kpod** **umount** **containerID** - -## DESCRIPTION -Unmounts the specified container's root file system. - -## EXAMPLE - -kpod umount containerID - -## SEE ALSO -kpod(1), kpod-mount(1) diff --git a/docs/kpod-unpause.1.md b/docs/kpod-unpause.1.md deleted file mode 100644 index 52a81002..00000000 --- a/docs/kpod-unpause.1.md +++ /dev/null @@ -1,24 +0,0 @@ -% kpod(1) kpod-unpause - Unpause one or more containers -% Dan Walsh -# kpod-unpause "1" "September 2017" "kpod" - -## NAME -kpod unpause - Unpause one or more containers - -## SYNOPSIS -**kpod unpause [OPTIONS] CONTAINER [...]** - -## DESCRIPTION -Unpauses the processes in one or more containers. You may use container IDs or names as input. - -## EXAMPLE - -kpod unpause mywebserver - -kpod unpause 860a4b23 - -## SEE ALSO -kpod(1), kpod-pause(1) - -## HISTORY -September 2017, Originally compiled by Dan Walsh diff --git a/docs/kpod-version.1.md b/docs/kpod-version.1.md deleted file mode 100644 index cdc2c925..00000000 --- a/docs/kpod-version.1.md +++ /dev/null @@ -1,24 +0,0 @@ -% kpod(1) kpod-version - Simple tool to view version information -% Urvashi Mohnani -# kpod-version "1" "July 2017" "kpod" - -## NAME -kpod-version - Display the KPOD Version Information - -## SYNOPSIS -**kpod version** -[**--help**|**-h**] - -## DESCRIPTION -Shows the the following information: Version, Go Version, Git Commit, Build Time, -OS, and Architecture. - -**kpod [GLOBAL OPTIONS]** - -**kpod version** - -## SEE ALSO -kpod(1), crio(8), crio.conf(5) - -## HISTORY -July 2017, Originally compiled by Urvashi Mohnani diff --git a/docs/kpod-wait.1.md b/docs/kpod-wait.1.md deleted file mode 100644 index 290cdedf..00000000 --- a/docs/kpod-wait.1.md +++ /dev/null @@ -1,36 +0,0 @@ -% kpod(1) kpod-wait - Waits on a container -% Brent Baude -# kpod-wait "1" "September 2017" "kpod" - -## NAME -kpod wait - Waits on one or more containers to stop and prints exit code - -## SYNOPSIS -**kpod wait** -[**--help**|**-h**] - -## DESCRIPTION -Waits on one or more containers to stop. The container can be referred to by its -name or ID. In the case of multiple containers, kpod will wait on each consecutively. -After the container stops, the container's return code is printed. - -**kpod [GLOBAL OPTIONS] wait ** - -## GLOBAL OPTIONS - -**--help, -h** - Print usage statement - -## EXAMPLES - - kpod wait mywebserver - - kpod wait 860a4b23 - - kpod wait mywebserver myftpserver - -## SEE ALSO -kpod(1), crio(8), crio.conf(5) - -## HISTORY -September 2017, Originally compiled by Brent Baude diff --git a/docs/kpod.1.md b/docs/kpod.1.md deleted file mode 100644 index 27de1ca5..00000000 --- a/docs/kpod.1.md +++ /dev/null @@ -1,137 +0,0 @@ -% kpod(1) kpod - Simple management tool for pods and images -% Dan Walsh -# kpod "1" "September 2016" "kpod" -## NAME -kpod - Simple management tool for containers and images - -## SYNOPSIS -**kpod** [*options*] COMMAND - -# DESCRIPTION -kpod is a simple client only tool to help with debugging issues when daemons -such as CRI runtime and the kubelet are not responding or failing. A shared API -layer could be created to share code between the daemon and kpod. kpod does not -require any daemon running. kpod utilizes the same underlying components that -crio uses i.e. containers/image, container/storage, oci-runtime-tool/generate, -runc or any other OCI compatible runtime. kpod shares state with crio and so -has the capability to debug pods/images created by crio. - -**kpod [GLOBAL OPTIONS]** - -## GLOBAL OPTIONS - -**--help, -h** - Print usage statement - -**--config value, -c**=**"config.file"** - Path of a config file detailing container server configuration options - -**--log-level** - log messages above specified level: debug, info, warn, error (default), fatal or panic - -**--root**=**value** - Path to the root directory in which data, including images, is stored - -**--runroot**=**value** - Path to the 'run directory' where all state information is stored - -**--runtime**=**value** - Path to the OCI compatible binary used to run containers - -**--storage-driver, -s**=**value** - Select which storage driver is used to manage storage of images and containers (default is overlay) - -**--storage-opt**=**value** - Used to pass an option to the storage driver - -**--version, -v** - Print the version - -## COMMANDS - -### diff -Inspect changes on a container or image's filesystem - -### export -Export container's filesystem contents as a tar archive - -### history -Shows the history of an image - -### images -List images in local storage - -### info -Displays system information - -### inspect -Display a container or image's configuration - -### kill -Kill the main process in one or more containers - -### load -Load an image from docker archive - -### login -Login to a container registry - -### logout -Logout of a container registry - -### logs -Display the logs of a container - -### mount -Mount a working container's root filesystem - -### pause -Pause one or more containers - -### ps -Prints out information about containers - -### pull -Pull an image from a registry - -### push -Push an image from local storage to elsewhere - -### rename -Rename a container - -### rm -Remove one or more containers - -### rmi -Removes one or more locally stored images - -### save -Save an image to docker-archive or oci - -### stats -Display a live stream of one or more containers' resource usage statistics - -### stop -Stops one or more running containers. - -### tag -Add an additional name to a local image - -### umount -Unmount a working container's root file system - -### unpause -Unpause one or more containers - -### version -Display the version information - -### wait -Wait on one or more containers to stop and print their exit codes - -## SEE ALSO -crio(8), crio.conf(5) - -## HISTORY -Dec 2016, Originally compiled by Dan Walsh diff --git a/docs/play.png b/docs/play.png deleted file mode 100644 index 9be2868f..00000000 Binary files a/docs/play.png and /dev/null differ diff --git a/hack/libdm_installed.sh b/hack/libdm_installed.sh new file mode 100755 index 00000000..f48c7e27 --- /dev/null +++ b/hack/libdm_installed.sh @@ -0,0 +1,7 @@ +#!/bin/bash +cc -E - > /dev/null 2> /dev/null << EOF +#include +EOF +if test $? -ne 0 ; then + echo exclude_graphdriver_devicemapper +fi diff --git a/hack/libdm_tag.sh b/hack/libdm_no_deferred_remove_tag.sh similarity index 100% rename from hack/libdm_tag.sh rename to hack/libdm_no_deferred_remove_tag.sh diff --git a/hack/ostree_tag.sh b/hack/ostree_tag.sh new file mode 100755 index 00000000..89499c5e --- /dev/null +++ b/hack/ostree_tag.sh @@ -0,0 +1,4 @@ +#!/bin/bash +if ! pkg-config ostree-1 2> /dev/null ; then + echo containers_image_ostree_stub +fi diff --git a/hack/selinux_tag.sh b/hack/selinux_tag.sh new file mode 100755 index 00000000..ff80fda0 --- /dev/null +++ b/hack/selinux_tag.sh @@ -0,0 +1,4 @@ +#!/bin/bash +if pkg-config libselinux 2> /dev/null ; then + echo selinux +fi diff --git a/hack/validate/.validate b/hack/validate/.validate new file mode 100644 index 00000000..9f05ff11 --- /dev/null +++ b/hack/validate/.validate @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -e -o pipefail + +if [ -z "$VALIDATE_UPSTREAM" ]; then + # this is kind of an expensive check, so let's not do this twice if we + # are running more than one validate bundlescript + + VALIDATE_REPO='https://github.com/kubernetes-incubator/cri-o.git' + VALIDATE_BRANCH='master' + + VALIDATE_HEAD="$(git rev-parse --verify HEAD)" + + git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH" + VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)" + + VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD" + VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD" + + validate_diff() { + if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then + git diff "$VALIDATE_COMMIT_DIFF" "$@" + fi + } + validate_log() { + if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then + git log "$VALIDATE_COMMIT_LOG" "$@" + fi + } +fi diff --git a/hack/verify-gofmt.sh b/hack/verify-gofmt.sh deleted file mode 100755 index 5577d1b9..00000000 --- a/hack/verify-gofmt.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -set -o errexit -set -o nounset -set -o pipefail - -find_files() { - find . -not \( \ - \( \ - -wholename '*/vendor/*' \ - \) -prune \ - \) -name '*.go' -} - -GOFMT="gofmt -s" -bad_files=$(find_files | xargs $GOFMT -l) -if [[ -n "${bad_files}" ]]; then - echo "!!! '$GOFMT' needs to be run on the following files: " - echo "${bad_files}" - exit 1 -fi diff --git a/hooks.md b/hooks.md index 809dbdc2..cd3d0a40 100644 --- a/hooks.md +++ b/hooks.md @@ -53,6 +53,7 @@ type HookParams struct { Cmds []string `json:"cmds"` Annotations []string `json:"annotations"` HasBindMounts bool `json:"hasbindmounts"` + Arguments []string `json:"arguments"` } ``` @@ -63,6 +64,7 @@ type HookParams struct { | cmds | List of regular expressions to match the command for running the container. If the command matches a regex, the hook will be run | Optional | | annotations | List of regular expressions to match against the Annotations in the container runtime spec, if an Annotation matches the hook will be run|optional | | hasbindmounts | Tells CRI-O to run the hook if the container has bind mounts from the host into the container | Optional | +| arguments | Additional arguments to append to the hook command when executing it. For example --debug | Optional | ### Example @@ -85,6 +87,7 @@ cat /etc/containers/oci/hooks.d/oci-systemd-hook.json "hasbindmounts": true, "hook": "/usr/libexec/oci/hooks.d/oci-umount", "stages": [ "prestart" ] + "arguments": [ "--debug" ] } ``` -In this example the oci-umount will only be run during the prestart phase if the container has volume/bind mounts from the host into the container. +In this example the oci-umount will only be run during the prestart phase if the container has volume/bind mounts from the host into the container, it will also execute oci-umount with the --debug argument. diff --git a/kpod-images.json b/kpod-images.json deleted file mode 100644 index a87f1d5b..00000000 --- a/kpod-images.json +++ /dev/null @@ -1,18446 +0,0 @@ -{ - "version": 1, - "width": 211, - "height": 52, - "duration": 66890.973605, - "command": null, - "title": null, - "env": { - "TERM": "xterm-256color", - "SHELL": "/usr/bin/zsh" - }, - "stdout": [ - [ - 0.127359, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.026267, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001269, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 8.9e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000195, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 2.3e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 3.5e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.6e-05, - "\u001b[?2004h" - ], - [ - 0.431984, - "s" - ], - [ - 0.085317, - "\bsu" - ], - [ - 0.129126, - "d" - ], - [ - 0.088005, - "o" - ], - [ - 0.135671, - " " - ], - [ - 0.08011, - "k" - ], - [ - 0.080023, - "p" - ], - [ - 0.120208, - "o" - ], - [ - 0.063327, - "d" - ], - [ - 0.136258, - " " - ], - [ - 0.120298, - "i" - ], - [ - 0.07945, - "m" - ], - [ - 0.048586, - "a" - ], - [ - 0.111702, - "g" - ], - [ - 0.120452, - "e" - ], - [ - 0.055606, - "s" - ], - [ - 0.55923, - " " - ], - [ - 0.144712, - "-" - ], - [ - 0.152054, - "-" - ], - [ - 0.176129, - "n" - ], - [ - 0.100308, - "o" - ], - [ - 0.207951, - "-" - ], - [ - 0.140053, - "t" - ], - [ - 0.143338, - "r" - ], - [ - 0.076824, - "u" - ], - [ - 0.187154, - "n" - ], - [ - 0.107836, - "c" - ], - [ - 0.11269, - "\u001b[?1l\u001b>" - ], - [ - 0.000169, - "\u001b[?2004l\r\r\n" - ], - [ - 0.0058, - "\u001b]2;sudo kpod images --no-trunc\u0007\u001b]1;kpod\u0007" - ], - [ - 0.930864, - "[sudo] password for ryan: " - ], - [ - 2.238114, - "\r\n" - ], - [ - 1.996034, - "Sorry, try again.\r\n" - ], - [ - 0.916497, - "[sudo] password for ryan: " - ], - [ - 2.392113, - "\r\n" - ], - [ - 0.093899, - "\u001b[34mINFO\u001b[0m[0000] [graphdriver] using prior storage driver \"overlay\" \r\n" - ], - [ - 0.00085, - "IMAGE ID IMAGE NAME CREATED AT SIZE\r\n" - ], - [ - 0.000894, - "3edb693215a22336c352ba66d101fafda7e2ecbad1ecf2137e1c495e461d8f23 docker.io/kubernetes/pause:latest Jul 19, 2014 07:02 241 KB\r\n" - ], - [ - 0.000324, - "1adfcf922a991e2d59a98dd2b5adc813b590261737d77c3ec7ae23e4f927d6bb docker.io/library/fedora:latest Jul 20, 2017 17:07 219.9 MB\r\n" - ], - [ - 0.000358, - "524b9482e987a953b81321580372c07c3c765ce7c336445797428658384c6812 docker.io/library/redis:latest Jul 24, 2017 18:37 101 MB\r\n" - ], - [ - 0.000352, - "9518288ded9bd43a055a4022d84c440b3ac16981f943bb099b60e0984e9e23d2 docker.io/library/redis:alpine Jul 24, 2017 18:39 26.22 MB\r\n" - ], - [ - 0.003194, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024028, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m9s\u001b[39m\r\n" - ], - [ - 0.001135, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000102, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.8e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000176, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 4.3e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=" - ], - [ - 4.8e-05, - "\u001b[?2004h" - ], - [ - 1.658662, - "s" - ], - [ - 0.071652, - "\bsu" - ], - [ - 0.15257, - "d" - ], - [ - 0.143602, - "o" - ], - [ - 0.160358, - " " - ], - [ - 0.119704, - "k" - ], - [ - 0.104527, - "p" - ], - [ - 0.087276, - "o" - ], - [ - 0.096382, - "d" - ], - [ - 0.088303, - " " - ], - [ - 0.08811, - "i" - ], - [ - 0.0717, - "m" - ], - [ - 0.079483, - "a" - ], - [ - 0.103857, - "g" - ], - [ - 0.080719, - "e" - ], - [ - 0.056202, - "s" - ], - [ - 0.111434, - " " - ], - [ - 0.152746, - "-" - ], - [ - 0.111237, - "q" - ], - [ - 0.144032, - "\u001b[?1l\u001b>" - ], - [ - 0.000167, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004109, - "\u001b]2;sudo kpod images -q\u0007\u001b]1;kpod\u0007" - ], - [ - 0.094407, - "\u001b[34mINFO\u001b[0m[0000] [graphdriver] using prior storage driver \"overlay\" \r\n" - ], - [ - 0.001695, - "3edb693215a22336c352ba66d101fafda7e2ecbad1ecf2137e1c495e461d8f23\r\n" - ], - [ - 0.000296, - "1adfcf922a991e2d59a98dd2b5adc813b590261737d77c3ec7ae23e4f927d6bb\r\n" - ], - [ - 0.000377, - "524b9482e987a953b81321580372c07c3c765ce7c336445797428658384c6812\r\n" - ], - [ - 0.000392, - "9518288ded9bd43a055a4022d84c440b3ac16981f943bb099b60e0984e9e23d2\r\n" - ], - [ - 0.002686, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024178, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001405, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000211, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 5.2e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000251, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.9e-05, - "\u001b[?1h\u001b=" - ], - [ - 7.2e-05, - "\u001b[?2004h" - ], - [ - 0.566345, - "s" - ], - [ - 0.119935, - "\bsu" - ], - [ - 0.142907, - "d" - ], - [ - 0.104809, - "o" - ], - [ - 0.223809, - " " - ], - [ - 0.144031, - "k" - ], - [ - 0.127885, - "p" - ], - [ - 0.104093, - "o" - ], - [ - 0.136222, - "d" - ], - [ - 0.111758, - " " - ], - [ - 0.088148, - "i" - ], - [ - 0.079691, - "m" - ], - [ - 0.09608, - "a" - ], - [ - 0.111767, - "g" - ], - [ - 0.096631, - "e" - ], - [ - 0.063607, - "s" - ], - [ - 0.120256, - " " - ], - [ - 0.223186, - "-" - ], - [ - 0.16889, - "-" - ], - [ - 0.151472, - "d" - ], - [ - 0.120605, - "i" - ], - [ - 0.11131, - "g" - ], - [ - 0.096141, - "e" - ], - [ - 0.120451, - "s" - ], - [ - 0.095792, - "t" - ], - [ - 0.120047, - "s" - ], - [ - 0.272206, - " " - ], - [ - 0.1516, - "r" - ], - [ - 0.056258, - "e" - ], - [ - 0.175241, - "d" - ], - [ - 0.144489, - "i" - ], - [ - 0.136449, - "s" - ], - [ - 0.159879, - "\u001b[?1l\u001b>" - ], - [ - 0.000108, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004344, - "\u001b]2;sudo kpod images --digests redis\u0007\u001b]1;kpod\u0007" - ], - [ - 0.090151, - "\u001b[34mINFO\u001b[0m[0000] [graphdriver] using prior storage driver \"overlay\" \r\n" - ], - [ - 0.000807, - "IMAGE ID IMAGE NAME DIGEST CREATED AT SIZE\r\n" - ], - [ - 0.000758, - "524b9482e987 docker.io/library/redis:latest sha256:b839545984cee95685e514aeb441a8b0624818559d89910aa31ad645c904210f Jul 24, 2017 18:37 101 MB\r\n" - ], - [ - 0.000371, - "9518288ded9b docker.io/library/redis:alpine sha256:e633cded055a94202e4ccccb8125b7f383cd6ee56527ab890db643383a2647dd Jul 24, 2017 18:39 26.22 MB\r\n" - ], - [ - 0.00292, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.023228, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001399, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000108, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 5.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000122, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 2.7e-05, - "\u001b[?1h\u001b=" - ], - [ - 4.6e-05, - "\u001b[?2004h" - ], - [ - 0.987423, - "s" - ], - [ - 0.11214, - "\bsu" - ], - [ - 0.12844, - "d" - ], - [ - 0.079782, - "o" - ], - [ - 0.183347, - " " - ], - [ - 0.096053, - "k" - ], - [ - 0.103724, - "p" - ], - [ - 0.095897, - "o" - ], - [ - 0.128513, - "d" - ], - [ - 0.119949, - " " - ], - [ - 0.096107, - "i" - ], - [ - 0.087565, - "m" - ], - [ - 0.080849, - "a" - ], - [ - 0.095739, - "g" - ], - [ - 0.096047, - "e" - ], - [ - 0.055373, - "s" - ], - [ - 0.160498, - " " - ], - [ - 0.168065, - "-" - ], - [ - 0.135891, - "-" - ], - [ - 0.160099, - "f" - ], - [ - 0.144124, - "i" - ], - [ - 0.312147, - "l" - ], - [ - 0.183726, - "t" - ], - [ - 0.183798, - "e" - ], - [ - 0.104556, - "r" - ], - [ - 1.071509, - " " - ], - [ - 0.208609, - "\"" - ], - [ - 0.25584, - "s" - ], - [ - 0.191556, - "i" - ], - [ - 0.119659, - "n" - ], - [ - 0.536775, - "c" - ], - [ - 0.09534, - "e" - ], - [ - 0.54414, - "=" - ], - [ - 0.144045, - "f" - ], - [ - 0.06374, - "e" - ], - [ - 0.184989, - "d" - ], - [ - 0.135526, - "o" - ], - [ - 0.136149, - "r" - ], - [ - 0.080286, - "a" - ], - [ - 0.454973, - ":" - ], - [ - 0.624879, - "l" - ], - [ - 0.096184, - "a" - ], - [ - 0.111656, - "t" - ], - [ - 0.120147, - "e" - ], - [ - 0.08794, - "s" - ], - [ - 0.06356, - "t" - ], - [ - 0.288506, - "\"" - ], - [ - 0.543802, - " " - ], - [ - 0.200564, - "-" - ], - [ - 0.142861, - "-" - ], - [ - 0.128436, - "f" - ], - [ - 0.112036, - "o" - ], - [ - 0.151167, - "r" - ], - [ - 0.128503, - "m" - ], - [ - 0.120291, - "a" - ], - [ - 0.088635, - "t" - ], - [ - 0.143602, - " " - ], - [ - 0.256717, - "\"" - ], - [ - 1.46299, - "t" - ], - [ - 0.107784, - "a" - ], - [ - 0.156707, - "b" - ], - [ - 0.123646, - "l" - ], - [ - 0.223815, - "e" - ], - [ - 0.22418, - " " - ], - [ - 0.304259, - "{" - ], - [ - 0.20813, - "{" - ], - [ - 0.599583, - "." - ], - [ - 0.335909, - "I" - ], - [ - 0.268363, - "D" - ], - [ - 0.591353, - "\b \b" - ], - [ - 0.119781, - "\b \b" - ], - [ - 0.156399, - "\b \b" - ], - [ - 0.376287, - " " - ], - [ - 0.095973, - "t" - ], - [ - 0.151826, - "r" - ], - [ - 0.039602, - "u" - ], - [ - 0.192491, - "n" - ], - [ - 0.088154, - "c" - ], - [ - 0.147825, - "a" - ], - [ - 0.123835, - "t" - ], - [ - 0.104099, - "e" - ], - [ - 0.135647, - " " - ], - [ - 0.216888, - "." - ], - [ - 0.303994, - "I" - ], - [ - 0.191778, - "D" - ], - [ - 0.574958, - " " - ], - [ - 0.19329, - "8" - ], - [ - 0.439791, - "}" - ], - [ - 0.143884, - "}" - ], - [ - 0.143784, - " " - ], - [ - 0.256871, - "{" - ], - [ - 0.151265, - "{" - ], - [ - 0.671939, - "." - ], - [ - 0.880114, - "\b \b" - ], - [ - 0.759869, - " " - ], - [ - 0.167976, - "." - ], - [ - 0.344489, - "N" - ], - [ - 0.191508, - "a" - ], - [ - 0.095324, - "m" - ], - [ - 0.223992, - "e" - ], - [ - 0.128763, - " " - ], - [ - 0.15222, - "|" - ], - [ - 0.255382, - " " - ], - [ - 0.144061, - "p" - ], - [ - 0.128167, - "r" - ], - [ - 0.096088, - "i" - ], - [ - 0.071635, - "n" - ], - [ - 0.120055, - "t" - ], - [ - 0.264652, - "f" - ], - [ - 0.151244, - " " - ], - [ - 0.192646, - "\\" - ], - [ - 0.26379, - "\"" - ], - [ - 1.311952, - "%" - ], - [ - 4.68053, - "-" - ], - [ - 1.015928, - "t" - ], - [ - 0.551856, - "\b \b" - ], - [ - 1.032351, - "6" - ], - [ - 0.111812, - "4" - ], - [ - 0.295612, - "s" - ], - [ - 0.719816, - "\\" - ], - [ - 0.248899, - "\"" - ], - [ - 0.599051, - " " - ], - [ - 0.311681, - "}" - ], - [ - 0.137392, - "}" - ], - [ - 0.894842, - " " - ], - [ - 0.224545, - "{" - ], - [ - 0.127645, - "{" - ], - [ - 0.224058, - "." - ], - [ - 0.184217, - "C" - ], - [ - 0.248132, - "r" - ], - [ - 0.079278, - "e" - ], - [ - 0.104455, - "a" - ], - [ - 0.087831, - "t" - ], - [ - 0.064342, - "e" - ], - [ - 0.19221, - "d" - ], - [ - 0.199615, - "A" - ], - [ - 0.464438, - "t" - ], - [ - 0.367064, - "}" - ], - [ - 0.15281, - "}" - ], - [ - 0.559881, - "\"" - ], - [ - 0.448076, - "\u001b[?1l\u001b>" - ], - [ - 0.000334, - "\u001b[?2004l\r\r\n" - ], - [ - 0.005051, - "\u001b]2;sudo kpod images --filter \"since=fedora:latest\" --format \u0007" - ], - [ - 9.2e-05, - "\u001b]1;kpod\u0007" - ], - [ - 0.092964, - "\u001b[34mINFO\u001b[0m[0000] [graphdriver] using prior storage driver \"overlay\" \r\n" - ], - [ - 0.003089, - "template: image:1: function \"truncate\" not defined\r\nTemplate parsing error\r\nmain.outputUsingTemplate\r\n\t/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/cmd/kpod/images.go:178\r\nmain.outputImages\r\n\t/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/cmd/kpod/images.go:165\r\nmain.imagesCmd\r\n\t/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/cmd/kpod/images.go:120\r\ngithub.com/kubernetes-incubator/cri-o/vendor/github.com/urfave/cli.HandleAction\r\n\t/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/vendor/github.com/urfave/cli/app.go:485\r\ngithub.com/kubernetes-incubator/cri-o/vendor/github.com/urfave/cli.Command.Run\r\n\t/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/vendor/github.com/urfave/cli/command.go:193\r\ngithub.com/kubernetes-incubator/cri-o/vendor/github.com/urfave/cli.(*App).Run\r\n\t/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/vendor/github.com/urfave/cli/app.go:250\r\nmain.main\r\n\t" - ], - [ - 2.8e-05, - "/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/cmd/kpod/main.go:61\r\nruntime.main\r\n\t/usr/lib/golang/src/runtime/proc.go:185\r\nruntime.goexit\r\n\t/usr/lib/golang/src/runtime/asm_amd64.s:2197\r\n" - ], - [ - 0.002932, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.026506, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.000971, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000117, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.00031, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.9e-05, - "\u001b[?2004h" - ], - [ - 5.719975, - "sudo kpod images --filter \"since=fedora:latest\" --format \"table {{ truncate .ID 8}} {{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\"" - ], - [ - 0.111141, - "\b" - ], - [ - 0.500452, - "\b" - ], - [ - 0.030822, - "\b" - ], - [ - 0.031199, - "\b" - ], - [ - 0.030859, - "\b" - ], - [ - 0.030714, - "\b" - ], - [ - 0.029362, - "\b" - ], - [ - 0.031473, - "\b" - ], - [ - 0.030015, - "\b" - ], - [ - 0.030409, - "\b" - ], - [ - 0.030238, - "\b" - ], - [ - 0.031711, - "\b" - ], - [ - 0.029911, - "\b" - ], - [ - 0.031066, - "\b" - ], - [ - 0.029076, - "\b" - ], - [ - 0.03054, - "\b" - ], - [ - 0.031011, - "\b" - ], - [ - 0.031156, - "\b" - ], - [ - 0.030493, - "\b" - ], - [ - 0.029937, - "\b" - ], - [ - 0.030608, - "\b" - ], - [ - 0.030465, - "\b" - ], - [ - 0.030741, - "\b" - ], - [ - 0.030923, - "\b" - ], - [ - 0.029979, - "\b" - ], - [ - 0.0302, - "\b" - ], - [ - 0.031404, - "\b" - ], - [ - 0.030985, - "\b" - ], - [ - 0.030991, - "\b" - ], - [ - 0.030493, - "\b" - ], - [ - 0.031775, - "\b" - ], - [ - 0.030463, - "\b" - ], - [ - 0.03012, - "\b" - ], - [ - 0.030377, - "\b" - ], - [ - 0.030266, - "\b" - ], - [ - 0.031003, - "\b" - ], - [ - 0.029286, - "\b" - ], - [ - 0.031623, - "\b" - ], - [ - 0.029799, - "\b" - ], - [ - 0.029213, - "\b" - ], - [ - 0.03082, - "\b" - ], - [ - 0.030044, - "\b" - ], - [ - 0.030151, - "\b" - ], - [ - 0.030665, - "\b" - ], - [ - 0.030377, - "\b" - ], - [ - 0.030146, - "\b" - ], - [ - 0.093081, - "\b" - ], - [ - 0.191739, - "\b" - ], - [ - 0.144119, - "\b" - ], - [ - 0.327779, - "\b}} {{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[50D" - ], - [ - 0.176138, - "\b}} {{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[50D" - ], - [ - 0.39199, - "\b" - ], - [ - 0.223786, - "\b" - ], - [ - 0.144097, - "\b" - ], - [ - 0.279236, - "\u001b[1C" - ], - [ - 0.501129, - "\u001b[1C" - ], - [ - 0.029576, - "\u001b[1C" - ], - [ - 0.029751, - "\u001b[1C" - ], - [ - 0.032205, - "\u001b[1C" - ], - [ - 0.02899, - "\u001b[1C" - ], - [ - 0.515239, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.499546, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.030618, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.031062, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.030453, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.03064, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.029812, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.030975, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.030209, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.03139, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.030639, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.031502, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.030081, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.029882, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.030344, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.031029, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.030391, - "\u001b[1C .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.121577, - "\u001b[1C .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.167127, - "\b{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\" \u001b[47D" - ], - [ - 0.248096, - "\u001b[?1l\u001b>" - ], - [ - 0.000222, - "\u001b[?2004l\r\r\n" - ], - [ - 0.003167, - "\u001b]2;sudo kpod images --filter \"since=fedora:latest\" --format \u0007\u001b]1;kpod\u0007" - ], - [ - 0.095477, - "\u001b[34mINFO\u001b[0m[0000] [graphdriver] using prior storage driver \"overlay\" \r\n" - ], - [ - 0.003009, - "tabledocker.io/library/redis:latest Jul 24, 2017 18:37\r\n" - ], - [ - 0.000421, - "tabledocker.io/library/redis:alpine Jul 24, 2017 18:39\r\n" - ], - [ - 0.003381, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.023509, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001093, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 5.4e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000102, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 5.2e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.1e-05, - "\u001b[?2004h" - ], - [ - 0.493915, - "sudo kpod images --filter \"since=fedora:latest\" --format \"table{{ .Name | printf \\\"%-64s\\\" }} {{.CreatedAt}}\"" - ], - [ - 4.023647, - "\u001b[?2004l\r\r\n" - ], - [ - 0.000724, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.033033, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001493, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.00011, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 5.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 7.4e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m " - ], - [ - 6.4e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=" - ], - [ - 4.6e-05, - "\u001b[?2004h" - ], - [ - 2.477293, - "r" - ], - [ - 0.095704, - "\brm" - ], - [ - 0.159753, - " " - ], - [ - 0.216552, - "k" - ], - [ - 0.168314, - "\u0007" - ], - [ - 0.000157, - "\r\r\n" - ], - [ - 7.6e-05, - "\u001b[J\u001b[38;5;40mkpod\u001b[0m* \u001b[Jkpod-images.json \u001b[Jkubernetes.md \u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\u001b[39m\r\u001b[2Crm k\u001b[K\u001b[204C\u001b[90m\u001b[39m\u001b[39m\u001b[204D" - ], - [ - 0.679935, - "p" - ], - [ - 0.233955, - "\r\r\n\u001b[J\u001b[A\u001b[7Cod" - ], - [ - 0.493364, - "i" - ], - [ - 0.2874, - "\b \b" - ], - [ - 0.208224, - "-" - ], - [ - 0.208753, - "i" - ], - [ - 0.079436, - "m" - ], - [ - 0.137168, - "ages.json\u001b[1m \u001b[0m" - ], - [ - 0.598602, - "\b\u001b[0m \b" - ], - [ - 0.000266, - "\u001b[?1l\u001b>\u001b[?2004l\r\r\n\u001b[J" - ], - [ - 0.004718, - "\u001b]2;rm kpod-images.json\u0007\u001b]1;rm\u0007" - ], - [ - 0.002042, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.028039, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.000994, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000103, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.4e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 6.4e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 5.8e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.6e-05, - "\u001b[?2004h" - ], - [ - 0.779169, - "g" - ], - [ - 0.136128, - "\bgi" - ], - [ - 0.048534, - "t" - ], - [ - 0.104105, - " " - ], - [ - 0.079724, - "s" - ], - [ - 0.104241, - "t" - ], - [ - 0.079697, - "a" - ], - [ - 0.11214, - "t" - ], - [ - 0.119676, - "u" - ], - [ - 0.208639, - "s" - ], - [ - 0.191009, - "\u001b[?1l\u001b>" - ], - [ - 3.6e-05, - "\u001b[?2004l" - ], - [ - 2.7e-05, - "\r\r\n" - ], - [ - 0.00253, - "\u001b]2;git status\u0007\u001b]1;git\u0007" - ], - [ - 0.017282, - "On branch kpod-format-table\r\nnothing to commit, working tree clean\r\n" - ], - [ - 0.000574, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.036636, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001716, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 9.3e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 3.6e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8.1e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 4.9e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.6e-05, - "\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.75012, - "s" - ], - [ - 0.599165, - "\b \b" - ], - [ - 0.19198, - "m" - ], - [ - 0.064457, - "\bma" - ], - [ - 0.127947, - "k" - ], - [ - 0.136116, - "e" - ], - [ - 0.07172, - " " - ], - [ - 0.096215, - "k" - ], - [ - 0.071462, - "p" - ], - [ - 0.080589, - "o" - ], - [ - 0.10344, - "d" - ], - [ - 0.152208, - "\u001b[?1l\u001b>" - ], - [ - 5.1e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.003042, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 6.729784, - "make: 'kpod' is up to date.\r\n" - ], - [ - 0.000291, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.026264, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m7s\u001b[39m\r\n" - ], - [ - 0.001087, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 7.6e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.9e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 7.5e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000122, - "\u001b[?1h\u001b=" - ], - [ - 2.8e-05, - "\u001b[?2004h" - ], - [ - 13.163278, - "s" - ], - [ - 0.168391, - "\bsu" - ], - [ - 0.127831, - "d" - ], - [ - 0.10395, - "o" - ], - [ - 0.223816, - " " - ], - [ - 0.12801, - "k" - ], - [ - 0.615488, - "\b \b" - ], - [ - 0.176737, - "m" - ], - [ - 0.040224, - "a" - ], - [ - 0.216001, - "k" - ], - [ - 0.120125, - "e" - ], - [ - 0.095559, - " " - ], - [ - 0.176055, - "i" - ], - [ - 0.087364, - "n" - ], - [ - 0.064466, - "s" - ], - [ - 0.096026, - "t" - ], - [ - 0.128305, - "a" - ], - [ - 0.159057, - "l" - ], - [ - 0.152629, - "l" - ], - [ - 1.088301, - "\u001b[?1l\u001b>" - ], - [ - 0.000115, - "\u001b[?2004l\r\r\n" - ], - [ - 0.008092, - "\u001b]2;sudo make install\u0007\u001b]1;make\u0007" - ], - [ - 0.035813, - "mkdir -p \"/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/_output/src/github.com/kubernetes-incubator\"\r\n" - ], - [ - 0.003636, - "ln -s \"/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\" \"/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/_output/src/github.com/kubernetes-incubator\"\r\n" - ], - [ - 0.001734, - "touch \"/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/_output/.gopathok\"\r\n" - ], - [ - 0.001583, - "install -D -m 755 crio /usr/local/bin/crio\r\n" - ], - [ - 0.053752, - "install -D -m 755 crioctl /usr/local/bin/crioctl\r\n" - ], - [ - 0.032266, - "install -D -m 755 kpod /usr/local/bin/kpod\r\n" - ], - [ - 0.046161, - "install -D -m 755 conmon/conmon /usr/local/libexec/crio/conmon\r\n" - ], - [ - 0.003465, - "install -D -m 755 pause/pause /usr/local/libexec/crio/pause\r\n" - ], - [ - 0.004506, - "install -d -m 755 /usr/local/share/man/man1\r\n" - ], - [ - 0.000924, - "install -d -m 755 /usr/local/share/man/man5\r\n" - ], - [ - 0.000709, - "install -d -m 755 /usr/local/share/man/man8\r\n" - ], - [ - 0.000731, - "install -m 644 docs/kpod-diff.1 docs/kpod-push.1 docs/kpod-cp.1 docs/kpod.1 docs/kpod-export.1 docs/kpod-load.1 docs/kpod-logs.1 docs/kpod-images.1 docs/kpod-umount.1 docs/kpod-save.1 docs/kpod-mount.1 docs/kpod-info.1 docs/kpod-inspect.1 docs/kpod-history.1 docs/kpod-pull.1 docs/kpod-rmi.1 docs/kpod-version.1 docs/kpod-tag.1 -t /usr/local/share/man/man1\r\n" - ], - [ - 0.014923, - "install -m 644 docs/crio.conf.5 -t /usr/local/share/man/man5\r\n" - ], - [ - 0.006695, - "install -m 644 docs/crio.8 -t /usr/local/share/man/man8\r\n" - ], - [ - 0.005468, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.030073, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.00096, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 8.4e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.4e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8.2e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 2.2e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 3.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.5e-05, - "\u001b[?2004h" - ], - [ - 1.588728, - "s" - ], - [ - 0.158798, - "\bsu" - ], - [ - 0.12764, - "d" - ], - [ - 0.088209, - "o" - ], - [ - 0.160458, - " " - ], - [ - 0.152138, - "." - ], - [ - 0.200633, - "/" - ], - [ - 0.343621, - "\b \b" - ], - [ - 0.142987, - "\b \b" - ], - [ - 0.336517, - "k" - ], - [ - 0.192818, - "p" - ], - [ - 0.079464, - "o" - ], - [ - 0.167625, - "d" - ], - [ - 0.160306, - " " - ], - [ - 0.49613, - "i" - ], - [ - 0.120488, - "m" - ], - [ - 0.247645, - "a" - ], - [ - 0.160515, - "g" - ], - [ - 0.087028, - "e" - ], - [ - 0.072479, - "s" - ], - [ - 0.08742, - " " - ], - [ - 0.120873, - "-" - ], - [ - 0.127862, - "-" - ], - [ - 0.119898, - "f" - ], - [ - 0.12007, - "o" - ], - [ - 0.127375, - "r" - ], - [ - 0.111896, - "m" - ], - [ - 0.088551, - "a" - ], - [ - 0.119081, - "t" - ], - [ - 0.144899, - " " - ], - [ - 0.40945, - "{" - ], - [ - 0.598946, - "\b \b" - ], - [ - 0.497455, - "\"" - ], - [ - 0.498444, - "t" - ], - [ - 0.107574, - "a" - ], - [ - 0.143907, - "b" - ], - [ - 0.136371, - "l" - ], - [ - 0.119485, - "e" - ], - [ - 0.080146, - " " - ], - [ - 0.336807, - "{" - ], - [ - 0.231791, - "{" - ], - [ - 0.271322, - "." - ], - [ - 0.319863, - "I" - ], - [ - 0.160571, - "D" - ], - [ - 0.415578, - "}" - ], - [ - 0.424342, - "}" - ], - [ - 0.264361, - "\"" - ], - [ - 0.431581, - "\u001b[?1l\u001b>" - ], - [ - 6.9e-05, - "\u001b[?2004l" - ], - [ - 5.4e-05, - "\r\r\n" - ], - [ - 0.004691, - "\u001b]2;sudo kpod images --format \"table {{.ID}}\"\u0007\u001b]1;kpod\u0007" - ], - [ - 0.096877, - "IMAGE ID \r\n3edb693215a22336c352ba66d101fafda7e2ecbad1ecf2137e1c495e461d8f23\r\n1adfcf922a991e2d59a98dd2b5adc813b590261737d77c3ec7ae23e4f927d6bb\r\n524b9482e987a953b81321580372c07c3c765ce7c336445797428658384c6812\r\n9518288ded9bd43a055a4022d84c440b3ac16981f943bb099b60e0984e9e23d2\r\n" - ], - [ - 0.003109, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024093, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001338, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.00015, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.2e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000103, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 5.9e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.6e-05, - "\u001b[?2004h" - ], - [ - 5.653254, - "w" - ], - [ - 0.123493, - "\bwh" - ], - [ - 0.080798, - "i" - ], - [ - 0.127546, - "c" - ], - [ - 0.112514, - "h" - ], - [ - 0.079037, - " " - ], - [ - 0.129196, - "k" - ], - [ - 0.135422, - "p" - ], - [ - 0.076117, - "o" - ], - [ - 0.116658, - "d" - ], - [ - 0.144849, - "\u001b[?1l\u001b>\u001b[?2004l\r\r\n" - ], - [ - 0.002681, - "\u001b]2;( alias; declare -f; ) | /usr/bin/which --tty-only --read-alias --show-tilde\u0007\u001b]1;which\u0007" - ], - [ - 0.004242, - "/usr/local/bin/kpod\r\n" - ], - [ - 0.000126, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.026103, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.002394, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000493, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000187, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.9e-05, - "\u001b[?1h\u001b=" - ], - [ - 6.2e-05, - "\u001b[?2004h" - ], - [ - 86.650128, - "m" - ], - [ - 0.161011, - "\bma" - ], - [ - 0.655411, - "k" - ], - [ - 0.103692, - "e" - ], - [ - 0.063223, - " " - ], - [ - 0.09703, - "k" - ], - [ - 0.119343, - "p" - ], - [ - 0.049093, - "o" - ], - [ - 0.303802, - "d" - ], - [ - 0.144347, - "\u001b[?1l\u001b>" - ], - [ - 4.4e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004019, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 6.725858, - "go build -ldflags '-X main.gitCommit=99495909 -X main.buildInfo=1502916060' -tags \"selinux seccomp \" -o kpod github.com/kubernetes-incubator/cri-o/cmd/kpod\r\n" - ], - [ - 4.213403, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.018496, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m11s\u001b[39m\r\n" - ], - [ - 0.001197, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000111, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000112, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 5.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 8e-05, - "\u001b[?2004h" - ], - [ - 0.768399, - "s" - ], - [ - 0.111451, - "\bsu" - ], - [ - 0.096019, - "d" - ], - [ - 0.103589, - "o" - ], - [ - 0.104726, - " " - ], - [ - 0.079095, - "k" - ], - [ - 0.177292, - "p" - ], - [ - 0.358902, - "\b \b" - ], - [ - 0.12001, - "\b \b" - ], - [ - 0.167745, - "m" - ], - [ - 0.096101, - "a" - ], - [ - 0.096699, - "k" - ], - [ - 0.088325, - "e" - ], - [ - 0.087158, - " " - ], - [ - 0.064768, - "i" - ], - [ - 0.071949, - "n" - ], - [ - 0.048283, - "s" - ], - [ - 0.095575, - "t" - ], - [ - 0.080741, - "a" - ], - [ - 0.135503, - "l" - ], - [ - 0.111733, - "l" - ], - [ - 0.135263, - "\u001b[?1l\u001b>" - ], - [ - 0.000111, - "\u001b[?2004l\r\r\n" - ], - [ - 0.001633, - "\u001b]2;sudo make install\u0007\u001b]1;make\u0007" - ], - [ - 3.04395, - "install -D -m 755 crio /usr/local/bin/crio\r\n" - ], - [ - 0.05693, - "install -D -m 755 crioctl /usr/local/bin/crioctl\r\n" - ], - [ - 0.033176, - "install -D -m 755 kpod /usr/local/bin/kpod\r\n" - ], - [ - 0.04205, - "install -D -m 755 conmon/conmon /usr/local/libexec/crio/conmon\r\n" - ], - [ - 0.002903, - "install -D -m 755 pause/pause /usr/local/libexec/crio/pause\r\n" - ], - [ - 0.004348, - "install -d -m 755 /usr/local/share/man/man1\r\n" - ], - [ - 0.000355, - "install -d -m 755 /usr/local/share/man/man5\r\n" - ], - [ - 0.000615, - "install -d -m 755 /usr/local/share/man/man8\r\n" - ], - [ - 0.000636, - "install -m 644 docs/kpod-diff.1 docs/kpod-push.1 docs/kpod-cp.1 docs/kpod.1 docs/kpod-export.1 docs/kpod-load.1 docs/kpod-logs.1 docs/kpod-images.1 docs/kpod-umount.1 docs/kpod-save.1 docs/kpod-mount.1 docs/kpod-info.1 docs/kpod-inspect.1 docs/kpod-history.1 docs/kpod-pull.1 docs/kpod-rmi.1 docs/kpod-version.1 docs/kpod-tag.1 -t /usr/local/share/man/man1\r\n" - ], - [ - 0.011201, - "install -m 644 docs/crio.conf.5 -t /usr/local/share/man/man5\r\n" - ], - [ - 0.004078, - "install -m 644 docs/crio.8 -t /usr/local/share/man/man8\r\n" - ], - [ - 0.00848, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.020721, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001114, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 9.5e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.1e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000165, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 388.817771, - "v" - ], - [ - 0.052535, - "\bvi" - ], - [ - 0.099467, - " " - ], - [ - 0.312442, - "m" - ], - [ - 0.159718, - "c" - ], - [ - 0.207542, - "\b \b" - ], - [ - 0.111893, - "\b \b" - ], - [ - 0.088207, - "c" - ], - [ - 0.120601, - "m" - ], - [ - 0.122793, - "d\u001b[1m/\u001b[0m" - ], - [ - 0.06091, - "\b\u001b[0m/k" - ], - [ - 0.12778, - "pod\u001b[1m/\u001b[0m" - ], - [ - 0.339925, - "\b\u001b[0m/r" - ], - [ - 0.124315, - "m" - ], - [ - 0.119998, - "i.go\u001b[1m \u001b[0m" - ], - [ - 0.071475, - "\b\u001b[0m i" - ], - [ - 0.511445, - "\b \b" - ], - [ - 0.145079, - "\b" - ], - [ - 0.195924, - "\u001b[?1l\u001b>" - ], - [ - 0.000365, - "\u001b[?2004l\r\r\n" - ], - [ - 0.008801, - "\u001b]2;vim cmd/kpod/rmi.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.134848, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000609, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"cmd/kpod/rmi.go\"" - ], - [ - 0.000144, - " 123L, 3096C" - ], - [ - 0.008903, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.001667, - "\u001b[1;1H\u001b[96m\u001b[47m 1 \u001b[m\u001b[93m\u001b[107m\u001b[32mpackage\u001b[m\u001b[93m\u001b[107m main\r\n\u001b[96m\u001b[47m 2 \r\n 3 \u001b[m\u001b[93m\u001b[107m\u001b[32mimport\u001b[m\u001b[93m\u001b[107m (\r\n\u001b[96m\u001b[47m 4 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"fmt\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 5 \r\n 6 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"github.com/containers/storage\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 7 \u001b[m\u001b[93m\u001b[107m libkpodimage \u001b[36m\"github.com/kubernetes-incubator/cri-o/libkpod/image\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 8 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"github.com/pkg/errors\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 9 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"github.com/urfave/cli\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 10 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 11 \r\n 12 \u001b[m\u001b[93m\u001b[107m\u001b[32mvar\u001b[m\u001b[93m\u001b[107m (\r\n\u001b[96m\u001b[47m 13 \u001b[m\u001b[93m\u001b[107m rmiDescription = \u001b[36m\"removes one or more locally stored images.\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 14 \u001b[m\u001b[93m\u001b[107m rmiFlags\u001b[7C= []cli.Flag{\r\n\u001b[96m\u001b[47m 15 \u001b[m\u001b[93m\u001b[107m\u001b[8Ccli.BoolFlag{\r\n\u001b[96m\u001b[47m 16 \u001b[m\u001b[93m\u001b[107m\u001b[12CName: \u001b[36m\"force, f\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 17 \u001b[m\u001b[93m\u001b[107m\u001b[12CUsage: \u001b[36m\"force removal of the image\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[" - ], - [ - 2e-05, - "96m\u001b[47m 18 \u001b[m\u001b[93m\u001b[107m\u001b[8C},\r\n\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m rmiCommand = cli.Command{\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m\u001b[8CName:\u001b[8C\u001b[36m\"rmi\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m\u001b[8CUsage:\u001b[7C\u001b[36m\"removes one or more images from local storage\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m\u001b[8CDescription: rmiDescription,\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[8CAction: rmiCmd,\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m\u001b[8CArgsUsage: \u001b[36m\"IMAGE-NAME-OR-ID [...]\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m\u001b[8CFlags:\u001b[7CrmiFlags,\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 28 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 29 \r\n 30 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m rmiCmd(c *cli.Context) \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 31 \r\n 32 \u001b[m\u001b[93m\u001b[107m force := \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m c.IsSet(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m\u001b[8Cforce = c.Bool(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m" - ], - [ - 0.028983, - "\u001b[107m }\r\n\u001b[96m\u001b[47m 36 \r\n 37 \u001b[m\u001b[93m\u001b[107m args := c.Args()\r\n\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(args) == \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Errorf(\u001b[36m\"image name or ID must be specified\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 41 \r\n 42 \u001b[m\u001b[93m\u001b[107m config, err := getConfig(c)\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Could not get config\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m store, err := getStore(config)\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 50 \u001b[m\u001b[93m\u001b[107m\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m" - ], - [ - 2.8e-05, - "\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;50H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                             \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m   1%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m  1\u001b[m\u001b[93m" - ], - [ - 0.009152, - "\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1  \u001b[1;5H\u001b[?12l\u001b[?25h\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 0.418144, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K\u001b[52;1H/\u001b[?2004h" - ], - [ - 6.9e-05, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.594911, - "m\u001b[?25l" - ], - [ - 0.011367, - "\u001b[1;13H\u001b[7m\u001b[91mm\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[51;2H\u001b[1m\u001b[38;5;22m\u001b[48;5;148mCOMMND \u001b[m\u001b[93m\u001b[107m\u001b[200C\u001b[38;5;22m\u001b[48;5;252m9\r\n\u001b[m\u001b[93m\u001b[107m/m\u001b[?12l\u001b[?25h" - ], - [ - 0.22557, - "u\u001b[?25l" - ], - [ - 0.014952, - "\u001b[1;13Hm\u001b[39;52H\u001b[7m\u001b[91mmu\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  32%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m39\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:48\r\n\u001b[m\u001b[93m\u001b[107m/mu\u001b[?12l\u001b[?25h" - ], - [ - 0.081033, - "s\u001b[?25l" - ], - [ - 0.011335, - "\u001b[39;54H\u001b[7m\u001b[91ms\u001b[52;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.076678, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mt\u001b[?25l" - ], - [ - 0.012142, - "\u001b[39;55H\u001b[7m\u001b[91mt\u001b[52;6H\u001b[?12l\u001b[?25h" - ], - [ - 0.180332, - "\u001b[?25l" - ], - [ - 0.008175, - "\u001b[39;56H \u001b[52;7H\u001b[?12l\u001b[?25h" - ], - [ - 0.087385, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mf\u001b[?25l" - ], - [ - 0.011478, - "\u001b[1;50r\u001b[1;1H\u001b[19M\u001b[1;52r\u001b[20;52H\u001b[36mmust \u001b[m\u001b[93m\u001b[107m\u001b[32;1H\u001b[96m\u001b[47m 51 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, id := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m args {\r\n\u001b[96m\u001b[47m 52 \u001b[m\u001b[93m\u001b[107m\u001b[8Cimage, err := libkpodimage.FindImage(store, id)\r\n\u001b[96m\u001b[47m 53 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 54 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"could not get image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 55 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 56 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m image != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 57 \u001b[m\u001b[93m\u001b[107m\u001b[12CctrIDs, err := runningContainers(image, store)\r\n\u001b[96m\u001b[47m 58 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 59 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"error getting running containers for image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 60 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 61" - ], - [ - 2.8e-05, - " \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(ctrIDs) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m && \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(image.Names) <= \u001b[36m1\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 62 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m force {\r\n\u001b[96m\u001b[47m 63 \u001b[m\u001b[93m\u001b[107m\u001b[20CremoveContainers(ctrIDs, store)\r\n\u001b[96m\u001b[47m 64 \u001b[m\u001b[93m\u001b[107m\u001b[16C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 65 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mfor\u001b[m\u001b[93m\u001b[107m ctrID := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\r\n\u001b[96m\u001b[47m 66 \u001b[m\u001b[93m\u001b[107m\u001b[24C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m fmt.Errorf(\u001b[36m\"Could not remove image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m (\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[91mmust f\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[36morce) - container \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\r\n\u001b[96m\u001b[47m 67 \u001b[m\u001b[93m\u001b[107m\u001b[20C}\r\n\u001b[96m\u001b[47m 68 \u001b[m\u001b[93m\u001b[107m\u001b[16C}\r\n\u001b[96m\u001b[47m 69 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:71\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K\u001b[52;1H/" - ], - [ - 2.2e-05, - "must f\u001b[?12l\u001b[?25h" - ], - [ - 0.084161, - "o\u001b[?25l" - ], - [ - 0.012305, - "\u001b[47;81H\u001b[7m\u001b[91mo\u001b[52;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.116387, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mr\u001b[?25l" - ], - [ - 0.011734, - "\u001b[47;82H\u001b[7m\u001b[91mr\u001b[52;10H\u001b[?12l\u001b[?25h" - ], - [ - 0.255362, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mc\u001b[?25l" - ], - [ - 0.011191, - "\u001b[47;83H\u001b[7m\u001b[91mc\u001b[52;11H\u001b[?12l\u001b[?25h" - ], - [ - 0.0573, - "\u001b[27m\u001b[m\u001b[93m\u001b[107me\u001b[?25l" - ], - [ - 0.013394, - "\u001b[47;84H\u001b[7m\u001b[91me\u001b[52;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.202815, - "\r\u001b[?25l" - ], - [ - 0.008924, - "\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[47;75H\u001b[7m\u001b[33mmust force\u001b[m\u001b[93m\u001b[107m\u001b[51;2H\u001b[1m\u001b[38;5;22m\u001b[48;5;148mNORMAL \u001b[47;75H\u001b[?12l\u001b[?25h" - ], - [ - 1.635195, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[45;51H\u001b[1m\u001b[31m\u001b[106m{\u001b[47;25H}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 70 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[96m// If the user supplied an ID, we cannot delete the image if it is referred to by multiple tags\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:2\u001b[47;25H\u001b[?12l\u001b[?25h" - ], - [ - 0.536079, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[45;51H{\u001b[47;25H}\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:7\u001b[46;75H\u001b[?12l\u001b[?25h" - ], - [ - 0.422312, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[45;51H\u001b[1m\u001b[31m\u001b[106m{\u001b[47;25H}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  53%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:47\u001b[45;51H\u001b[?12l\u001b[?25h" - ], - [ - 7.590339, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{\u001b[47;25H}\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:71\u001b[46;75H\u001b[?12l\u001b[?25h" - ], - [ - 0.524344, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[45;51H\u001b[1m\u001b[31m\u001b[106m{\u001b[47;25H}\u001b[m\u001b[93m\u001b[107m\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:2\u001b[47;25H\u001b[?12l\u001b[?25h" - ], - [ - 0.384796, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[43;28H\u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\u001b[44;51H{\u001b[46;25H}\u001b[47;21H\u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 71 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m libkpodimage.MatchesID(image.ID, id) {\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  55%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:17\u001b[47;21H\u001b[?12l\u001b[?25h" - ], - [ - 1.34616, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[43;28H{\u001b[44;51H\u001b[1m\u001b[31m\u001b[106m{\u001b[46;25H}\u001b[m\u001b[93m\u001b[107m\u001b[47;21H}\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:21\u001b[46;25H\u001b[?12l\u001b[?25h" - ], - [ - 6.08698, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[44;51H{\u001b[46;25H}\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:7\u001b[45;75H\u001b[?12l\u001b[?25h" - ], - [ - 0.759914, - "\u001b[51;210H2\u001b[45;76H" - ], - [ - 0.494462, - "\u001b[51;210H3\u001b[45;77H" - ], - [ - 0.033092, - "\u001b[51;210H4\u001b[45;78H" - ], - [ - 0.027331, - "\u001b[51;210H5\u001b[45;79H" - ], - [ - 0.032653, - "\u001b[51;210H6\u001b[45;80H" - ], - [ - 0.028729, - "\u001b[51;210H7\u001b[45;81H" - ], - [ - 0.034268, - "\u001b[51;210H8\u001b[45;82H" - ], - [ - 0.022711, - "\u001b[51;210H9\u001b[45;83H" - ], - [ - 0.033818, - "\u001b[51;209H80\u001b[45;84H" - ], - [ - 0.033041, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[45;74H\u001b[1m\u001b[31m\u001b[106m(\u001b[10C)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;252m1\u001b[45;85H\u001b[?12l\u001b[?25h" - ], - [ - 0.029548, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[45;74H\u001b[36m(\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[33mm\u001b[m\u001b[93m\u001b[107m\u001b[9C\u001b[36m) \u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;252m2\u001b[45;86H\u001b[?12l\u001b[?25h" - ], - [ - 0.02919, - "\u001b[51;210H3\u001b[45;87H" - ], - [ - 0.030088, - "\u001b[51;210H4\u001b[45;88H" - ], - [ - 0.032142, - "\u001b[51;210H5\u001b[45;89H" - ], - [ - 0.03027, - "\u001b[51;210H6\u001b[45;90H" - ], - [ - 0.030009, - "\u001b[51;210H7\u001b[45;91H" - ], - [ - 0.031925, - "\u001b[51;210H8\u001b[45;92H" - ], - [ - 0.027725, - "\u001b[51;210H9\u001b[45;93H" - ], - [ - 0.031953, - "\u001b[51;209H90\u001b[45;94H" - ], - [ - 0.029383, - "\u001b[51;210H1\u001b[45;95H" - ], - [ - 0.030754, - "\u001b[51;210H2\u001b[45;96H" - ], - [ - 0.03042, - "\u001b[51;210H3\u001b[45;97H" - ], - [ - 0.030576, - "\u001b[51;210H4\u001b[45;98H" - ], - [ - 0.031217, - "\u001b[51;210H5\u001b[45;99H" - ], - [ - 0.031561, - "\u001b[51;210H6\u001b[45;100H" - ], - [ - 0.136981, - "\u001b[51;210H7\u001b[45;101H" - ], - [ - 8.584841, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[44;51H\u001b[1m\u001b[31m\u001b[106m{\u001b[46;25H}\u001b[m\u001b[93m\u001b[107m\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:21\u001b[46;25H\u001b[?12l\u001b[?25h" - ], - [ - 0.501453, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[43;28H\u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\u001b[44;51H{\u001b[46;25H}\u001b[47;21H\u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  55%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:17\u001b[47;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.032681, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[39;61H\u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\u001b[42;28H{\u001b[46;21H}\u001b[47;17H\u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 72 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(image.Names) > \u001b[36m1\u001b[m\u001b[93m\u001b[107m && !force {\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  56%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:13\u001b[47;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.026312, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[38;61H{\u001b[46;17H}\u001b[50;1H\u001b[96m\u001b[47m 73 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m fmt.Errorf(\u001b[36m\"unable to delete \u001b[m\u001b[93m\u001b[107m\u001b[31m%s\u001b[m\u001b[93m\u001b[107m\u001b[36m (\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[33mmust force\u001b[m\u001b[93m\u001b[107m\u001b[36m) - image is referred to in multiple tags\"\u001b[m\u001b[93m\u001b[107m, image.ID)\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  57%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m70\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:97\u001b[47;101H\u001b[?12l\u001b[?25h" - ], - [ - 0.027588, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[50;1H\u001b[96m\u001b[47m 74 \u001b[m\u001b[93m\u001b[107m\u001b[16C}\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  58%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:53\u001b[47;57H\u001b[?12l\u001b[?25h" - ], - [ - 0.034204, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[47;55H\u001b[1m\u001b[31m\u001b[106m{\u001b[49;21H}\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 75 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[96m// If it is forced, we have to untag the image so that it can be deleted\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  59%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:51\u001b[47;55H\u001b[?12l\u001b[?25h" - ], - [ - 0.025136, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[46;55H{\u001b[48;21H}\r\n\r\n\u001b[96m\u001b[47m 76 \u001b[m\u001b[93m\u001b[107m\u001b[16Cimage.Names = image.Names[:\u001b[36m0\u001b[m\u001b[93m\u001b[107m]\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:97\u001b[47;101H\u001b[?12l\u001b[?25h" - ], - [ - 0.037748, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[45;55H\u001b[1m\u001b[31m\u001b[106m{\u001b[47;21H}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 77 \u001b[m\u001b[93m\u001b[107m\u001b[12C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  60%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[47;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.029845, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[44;55H{\u001b[46;21H}\u001b[50;1H\u001b[96m\u001b[47m 78 \u001b[m\u001b[93m\u001b[107m\u001b[16Cname, err2 := libkpodimage.UntagImage(store, image, id)\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  61%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:88\u001b[47;92H\u001b[?12l\u001b[?25h" - ], - [ - 0.029474, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[47;46H\u001b[1m\u001b[31m\u001b[106m[\u001b[2C]\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 79 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err2 != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  62%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:45\u001b[47;49H\u001b[?12l\u001b[?25h" - ], - [ - 0.027846, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[46;46H[:\u001b[1C]\u001b[50;1H\u001b[96m\u001b[47m 80 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  63%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:20\u001b[47;24H\u001b[?12l\u001b[?25h" - ], - [ - 0.028156, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[47;58H\u001b[1m\u001b[31m\u001b[106m(\u001b[16C)\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 81 \u001b[m\u001b[93m\u001b[107m\u001b[16C}\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:71\u001b[47;75H\u001b[?12l\u001b[?25h" - ], - [ - 0.036773, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[46;58H(s\u001b[15C)\u001b[47;36H\u001b[1m\u001b[31m\u001b[106m{\u001b[49;21H}\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 82 \u001b[m\u001b[93m\u001b[107m\u001b[16Cfmt.Printf(\u001b[36m\"untagged: \u001b[m\u001b[93m\u001b[107m\u001b[31m%s\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, name)\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  64%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:32\u001b[47;36H\u001b[?12l\u001b[?25h" - ], - [ - 0.029719, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[46;36H{\u001b[48;21H}\r\n\r\n\u001b[96m\u001b[47m 83 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  65%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m80\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:30\u001b[47;34H\u001b[?12l\u001b[?25h" - ], - [ - 0.032707, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[45;36H\u001b[1m\u001b[31m\u001b[106m{\u001b[47;21H}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 84 \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  66%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:17\u001b[47;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.026852, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[44;36H{\u001b[46;21H}\u001b[47;31H\u001b[1m\u001b[31m\u001b[106m(\u001b[20C)\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 85 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(image.Names) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  67%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:48\u001b[47;52H\u001b[?12l\u001b[?25h" - ], - [ - 0.026473, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[41;24H\u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\u001b[46;31H(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[19C)\u001b[47;17H\u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 86 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mcontinue\u001b[m\u001b[93m\u001b[107m\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:13\u001b[47;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.032368, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[40;24H{\u001b[46;17H}\u001b[50;1H\u001b[96m\u001b[47m 87 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  68%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1 \u001b[47;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.03189, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[47;41H\u001b[1m\u001b[31m\u001b[106m{\u001b[49;17H}\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 88 \u001b[m\u001b[93m\u001b[107m\u001b[12Cid, err := libkpodimage.RemoveImage(image, store)\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  69%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:37\u001b[47;41H\u001b[?12l\u001b[?25h" - ], - [ - 0.036978, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[46;41H{\u001b[48;17H}\r\n\r\n\u001b[96m\u001b[47m 89 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  70%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:24\u001b[47;28H\u001b[?12l\u001b[?25h" - ], - [ - 0.031976, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[45;41H\u001b[1m\u001b[31m\u001b[106m{\u001b[47;17H}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 90 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  71%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:13\u001b[47;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.034896, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[44;41H{\u001b[46;17H}\u001b[47;52H\u001b[1m\u001b[31m\u001b[106m(\u001b[12C)\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 91 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  72%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:61\u001b[47;65H\u001b[?12l\u001b[?25h" - ], - [ - 0.017603, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[46;52H(i\u001b[11C)\u001b[47;31H\u001b[1m\u001b[31m\u001b[106m{\u001b[49;17H}\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 92 \u001b[m\u001b[93m\u001b[107m\u001b[12Cfmt.Printf(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[31m%s\\n\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:27\u001b[47;31H\u001b[?12l\u001b[?25h" - ], - [ - 0.040503, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[46;31H{\u001b[48;17H}\r\n\r\n\u001b[96m\u001b[47m 93 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  73%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m90\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:26\u001b[47;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.028582, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[45;31H\u001b[1m\u001b[31m\u001b[106m{\u001b[47;17H}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 94 \u001b[m\u001b[93m\u001b[107m }\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  74%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:13\u001b[47;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.031483, - "\u001b[?25l\u001b[1;50r\u001b[m\u001b[93m\u001b[107m\u001b[50;1H\r\n\u001b[1;52r\u001b[44;31H{\u001b[46;17H}\u001b[47;27H\u001b[1m\u001b[31m\u001b[106m(\u001b[10C)\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 95 \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  75%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:34\u001b[47;38H\u001b[?12l\u001b[?25h" - ], - [ - 0.161023, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[44;31H\u001b[1m\u001b[31m\u001b[106m{\u001b[46;17H}\u001b[m\u001b[93m\u001b[107m\u001b[47;27H(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[9C)\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  74%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:13\u001b[46;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.499146, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[44;31H{\u001b[46;17H}\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  73%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:26\u001b[45;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.027118, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[44;31H\u001b[1m\u001b[31m\u001b[106m{\u001b[46;17H}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  72%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m89\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:27\u001b[44;31H\u001b[?12l\u001b[?25h" - ], - [ - 0.033789, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[43;52H\u001b[1m\u001b[31m\u001b[106m(\u001b[12C)\u001b[m\u001b[93m\u001b[107m\u001b[44;31H{\u001b[46;17H}\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:61\u001b[43;65H\u001b[?12l\u001b[?25h" - ], - [ - 0.026661, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[40;41H\u001b[1m\u001b[31m\u001b[106m{\u001b[42;17H}\u001b[m\u001b[93m\u001b[107m\u001b[43;52H(i\u001b[11C)\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  71%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:13\u001b[42;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.032548, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[40;41H{\u001b[42;17H}\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  70%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:24\u001b[41;28H\u001b[?12l\u001b[?25h" - ], - [ - 0.029278, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[40;41H\u001b[1m\u001b[31m\u001b[106m{\u001b[42;17H}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  69%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:37\u001b[40;41H\u001b[?12l\u001b[?25h" - ], - [ - 0.026258, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{\u001b[42;17H}\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  68%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1 \u001b[39;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.034793, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[32;24H\u001b[1m\u001b[31m\u001b[106m{\u001b[38;17H}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  67%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:13\u001b[38;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.032971, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[32;24H{\u001b[37;31H\u001b[1m\u001b[31m\u001b[106m(\u001b[20C)\u001b[m\u001b[93m\u001b[107m\u001b[38;17H}\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:48\u001b[37;52H\u001b[?12l\u001b[?25h" - ], - [ - 0.029998, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[34;36H\u001b[1m\u001b[31m\u001b[106m{\u001b[36;21H}\u001b[m\u001b[93m\u001b[107m\u001b[37;31H(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[19C)\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  66%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:17\u001b[36;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.030022, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[34;36H{\u001b[36;21H}\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  65%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:30\u001b[35;34H\u001b[?12l\u001b[?25h" - ], - [ - 0.031828, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[34;36H\u001b[1m\u001b[31m\u001b[106m{\u001b[36;21H}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  64%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m79\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:32\u001b[34;36H\u001b[?12l\u001b[?25h" - ], - [ - 0.027492, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[33;58H\u001b[1m\u001b[31m\u001b[106m(\u001b[16C)\u001b[m\u001b[93m\u001b[107m\u001b[34;36H{\u001b[36;21H}\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  63%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:71\u001b[33;75H\u001b[?12l\u001b[?25h" - ], - [ - 0.032435, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[32;24H\u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\u001b[33;58H(s\u001b[15C)\u001b[38;17H\u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:20\u001b[32;24H\u001b[?12l\u001b[?25h" - ], - [ - 0.031073, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[31;46H\u001b[1m\u001b[31m\u001b[106m[\u001b[2C]\u001b[m\u001b[93m\u001b[107m\u001b[32;24H{\u001b[38;17H}\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  62%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:45\u001b[31;49H\u001b[?12l\u001b[?25h" - ], - [ - 0.025565, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\b\b[:\u001b[1C]\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  61%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:88\u001b[30;92H\u001b[?12l\u001b[?25h" - ], - [ - 0.034775, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[27;55H\u001b[1m\u001b[31m\u001b[106m{\u001b[29;21H}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  60%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:17\u001b[29;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.031444, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[27;55H{\u001b[29;21H}\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  59%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[28;101H\u001b[?12l\u001b[?25h" - ], - [ - 0.027046, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[27;55H\u001b[1m\u001b[31m\u001b[106m{\u001b[29;21H}\u001b[m\u001b[93m\u001b[107m\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:51\u001b[27;55H\u001b[?12l\u001b[?25h" - ], - [ - 0.034344, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[26;57H\u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\u001b[27;55H{\u001b[29;21H}\u001b[32;17H\u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  58%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:53\u001b[26;57H\u001b[?12l\u001b[?25h" - ], - [ - 0.032004, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{\u001b[32;17H} \u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  57%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:97\u001b[25;101H\u001b[?12l\u001b[?25h" - ], - [ - 0.031757, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[16;61H\u001b[1m\u001b[31m\u001b[106m{\u001b[24;17H}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  56%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m69\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:13\u001b[24;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.027946, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[16;61H{\u001b[19;28H\u001b[1m\u001b[31m\u001b[106m{\u001b[23;21H}\u001b[m\u001b[93m\u001b[107m\u001b[24;17H}\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  55%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:17\u001b[23;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.032109, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[19;28H{\u001b[20;51H\u001b[1m\u001b[31m\u001b[106m{\u001b[22;25H}\u001b[m\u001b[93m\u001b[107m\u001b[23;21H}\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:21\u001b[22;25H\u001b[?12l\u001b[?25h" - ], - [ - 0.185364, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[20;51H{\u001b[22;25H}\u001b[51;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:97\u001b[21;101H\u001b[?12l\u001b[?25h" - ], - [ - 12.251553, - "\u001b[51;210H8\u001b[21;102H" - ], - [ - 0.640334, - "\u001b[51;210H9\u001b[21;103H" - ], - [ - 0.179298, - "\u001b[m\u001b[93m\u001b[107m\u001b[51;175H\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;181H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;189H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;194H\u001b[38;5;107m\u001b[48;5;240m \u001b[1C54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;201H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:100\u001b[21;104H" - ], - [ - 0.807758, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[34m-- INSERT --" - ], - [ - 0.042962, - "\u001b[m\u001b[93m\u001b[107m\u001b[51;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[51;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[51;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mrmi.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[m\u001b[93m\u001b[107m\u001b[51;49H\u001b[38;5;31m\u001b[48;5;24m\u001b[51;50H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                            \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;181H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;189H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;194H\u001b[38;5;186m\u001b[4" - ], - [ - 2.9e-05, - "8;5;31m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;201H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:100 \u001b[21;104H\u001b[?12l\u001b[?25h" - ], - [ - 0.145919, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;142H\u001b[K\u001b[51;48H\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[51;52H \u001b[m\u001b[93m\u001b[107m\u001b[122C\u001b[38;5;231m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;196H\u001b[38;5;186m\u001b[48;5;31m 54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m6\b 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:99\u001b[21;103H\u001b[?12l\u001b[?25h" - ], - [ - 0.48937, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;141H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[21;102H\u001b[?12l\u001b[?25h" - ], - [ - 0.027128, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36musing its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;140H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[21;101H\u001b[?12l\u001b[?25h" - ], - [ - 0.02875, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\b\u001b[36m% using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;139H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[21;100H\u001b[?12l\u001b[?25h" - ], - [ - 0.031194, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;138H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[21;99H\u001b[?12l\u001b[?25h" - ], - [ - 0.02624, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36musing its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;137H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[21;98H\u001b[?12l\u001b[?25h" - ], - [ - 0.032683, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;136H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[21;97H\u001b[?12l\u001b[?25h" - ], - [ - 0.029086, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;135H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[21;96H\u001b[?12l\u001b[?25h" - ], - [ - 0.030766, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;134H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[21;95H\u001b[?12l\u001b[?25h" - ], - [ - 0.036114, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;133H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m0\u001b[21;94H\u001b[?12l\u001b[?25h" - ], - [ - 0.119662, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;132H\u001b[K\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m89\u001b[21;93H\u001b[?12l\u001b[?25h" - ], - [ - 0.150503, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;131H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[21;92H\u001b[?12l\u001b[?25h" - ], - [ - 0.162647, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;130H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[21;91H\u001b[?12l\u001b[?25h" - ], - [ - 0.143355, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;129H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[21;90H\u001b[?12l\u001b[?25h" - ], - [ - 0.152961, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[21;128H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[21;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.214499, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mo using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[21;90H\u001b[?12l\u001b[?25h" - ], - [ - 0.118991, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mn using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[21;91H\u001b[?12l\u001b[?25h" - ], - [ - 0.211947, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[21;92H\u001b[?12l\u001b[?25h" - ], - [ - 0.109298, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[21;93H\u001b[?12l\u001b[?25h" - ], - [ - 0.165278, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mo using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m90\u001b[21;94H\u001b[?12l\u001b[?25h" - ], - [ - 0.123492, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mr using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[21;95H\u001b[?12l\u001b[?25h" - ], - [ - 0.096682, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[21;96H\u001b[?12l\u001b[?25h" - ], - [ - 0.08435, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mm using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[21;97H\u001b[?12l\u001b[?25h" - ], - [ - 0.130601, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mo using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[21;98H\u001b[?12l\u001b[?25h" - ], - [ - 0.052001, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mr using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[21;99H\u001b[?12l\u001b[?25h" - ], - [ - 0.075667, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[21;100H\u001b[?12l\u001b[?25h" - ], - [ - 0.06988, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[21;101H\u001b[?12l\u001b[?25h" - ], - [ - 0.107312, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mc using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[21;102H\u001b[?12l\u001b[?25h" - ], - [ - 0.052857, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mo using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[21;103H\u001b[?12l\u001b[?25h" - ], - [ - 0.080952, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mn using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;175H\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;181H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;189H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;194H\u001b[38;5;186m\u001b[48;5;31m \u001b[1C54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;201H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:100\u001b[21;104H\u001b[?12l\u001b[?25h" - ], - [ - 0.106035, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mt using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[21;105H\u001b[?12l\u001b[?25h" - ], - [ - 0.069849, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36ma using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[21;106H\u001b[?12l\u001b[?25h" - ], - [ - 0.080188, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mi using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[21;107H\u001b[?12l\u001b[?25h" - ], - [ - 0.053983, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mn using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[21;108H\u001b[?12l\u001b[?25h" - ], - [ - 0.083169, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[21;109H\u001b[?12l\u001b[?25h" - ], - [ - 0.064025, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mr using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[21;110H\u001b[?12l\u001b[?25h" - ], - [ - 0.114373, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36ms using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[21;111H\u001b[?12l\u001b[?25h" - ], - [ - 0.140141, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[21;112H\u001b[?12l\u001b[?25h" - ], - [ - 0.178511, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36ma using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[21;113H\u001b[?12l\u001b[?25h" - ], - [ - 0.078589, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mr using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m10\u001b[21;114H\u001b[?12l\u001b[?25h" - ], - [ - 0.088307, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[21;115H\u001b[?12l\u001b[?25h" - ], - [ - 0.270773, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K" - ], - [ - 0.007389, - "\u001b[51;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[51;51H\u001b[38;5;240m\u001b[48;5;236m\u001b[51;52H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                          \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;181H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;189H\u001b[38;5;247m\u001b[48;5;236m" - ], - [ - 3.4e-05, - " go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;194H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;201H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:110 \u001b[21;114H\u001b[?12l\u001b[?25h" - ], - [ - 0.232519, - "\u001b[?25l\u001b[52;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.143782, - "w\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.064041, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.737175, - "\r\u001b[?25l\u001b[?2004l" - ], - [ - 0.022609, - "\"cmd/kpod/rmi.go\"" - ], - [ - 0.010737, - " 123L, 3107C written" - ], - [ - 0.01326, - "\r\r\r\n\u001b[39;49m\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.002223, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.022486, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m54s\u001b[39m\r\n" - ], - [ - 0.001213, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 8.7e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000306, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.1e-05, - "\u001b[?1h\u001b=" - ], - [ - 2e-05, - "\u001b[?2004h" - ], - [ - 4.650947, - "s" - ], - [ - 0.114659, - "\bsu" - ], - [ - 0.136564, - "d" - ], - [ - 0.088137, - "o" - ], - [ - 0.111636, - " " - ], - [ - 0.12095, - "m" - ], - [ - 0.271219, - "\b \b" - ], - [ - 0.500424, - "\b" - ], - [ - 0.030636, - "\b \b" - ], - [ - 0.03127, - "\b \b" - ], - [ - 0.031598, - "\b\bs \b" - ], - [ - 0.029362, - "\b \b" - ], - [ - 0.153403, - "m" - ], - [ - 0.127382, - "\bma" - ], - [ - 0.14863, - "k" - ], - [ - 0.131117, - "e" - ], - [ - 0.088373, - " " - ], - [ - 0.064223, - "k" - ], - [ - 0.087523, - "p" - ], - [ - 0.096297, - "o" - ], - [ - 0.119679, - "d" - ], - [ - 0.112054, - "\u001b[?1l\u001b>" - ], - [ - 0.000159, - "\u001b[?2004l\r\r\n" - ], - [ - 0.00324, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 6.788264, - "go build -ldflags '-X main.gitCommit=99495909 -X main.buildInfo=1502916531' -tags \"selinux seccomp \" -o kpod github.com/kubernetes-incubator/cri-o/cmd/kpod\r\n" - ], - [ - 4.283762, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.017721, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m11s\u001b[39m\r\n" - ], - [ - 0.001236, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000128, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.00018, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 3.3e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 8e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.1e-05, - "\u001b[?2004h" - ], - [ - 168.290484, - "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 2.584789, - "g" - ], - [ - 0.198102, - "\bgi" - ], - [ - 0.19998, - "t" - ], - [ - 0.600599, - "\b \b" - ], - [ - 0.127993, - "\b\bg \b" - ], - [ - 0.175966, - "\b \b" - ], - [ - 0.216044, - "s" - ], - [ - 0.112522, - "\bsu" - ], - [ - 0.135935, - "d" - ], - [ - 0.079322, - "o" - ], - [ - 0.14454, - " " - ], - [ - 0.079601, - "m" - ], - [ - 0.128569, - "a" - ], - [ - 0.08771, - "k" - ], - [ - 0.09614, - "e" - ], - [ - 0.095448, - " " - ], - [ - 0.056775, - "i" - ], - [ - 0.063421, - "n" - ], - [ - 0.072685, - "s" - ], - [ - 0.111636, - "t" - ], - [ - 0.080215, - "a" - ], - [ - 0.119661, - "l" - ], - [ - 0.135786, - "l" - ], - [ - 0.296278, - "\u001b[?1l\u001b>" - ], - [ - 8.3e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.00983, - "\u001b]2;sudo make install\u0007\u001b]1;make\u0007" - ], - [ - 0.947581, - "[sudo] password for ryan: " - ], - [ - 2.602557, - "\r\n" - ], - [ - 2.991429, - "install -D -m 755 crio /usr/local/bin/crio\r\n" - ], - [ - 0.053259, - "install -D -m 755 crioctl /usr/local/bin/crioctl\r\n" - ], - [ - 0.030024, - "install -D -m 755 kpod /usr/local/bin/kpod\r\n" - ], - [ - 0.041823, - "install -D -m 755 conmon/conmon /usr/local/libexec/crio/conmon\r\n" - ], - [ - 0.002995, - "install -D -m 755 pause/pause /usr/local/libexec/crio/pause\r\n" - ], - [ - 0.005043, - "install -d -m 755 /usr/local/share/man/man1\r\n" - ], - [ - 0.001982, - "install -d -m 755 /usr/local/share/man/man5\r\n" - ], - [ - 0.000749, - "install -d -m 755 /usr/local/share/man/man8\r\n" - ], - [ - 0.000879, - "install -m 644 docs/kpod-diff.1 docs/kpod-push.1 docs/kpod-cp.1 docs/kpod.1 docs/kpod-export.1 docs/kpod-load.1 docs/kpod-logs.1 docs/kpod-images.1 docs/kpod-umount.1 docs/kpod-save.1 docs/kpod-mount.1 docs/kpod-info.1 docs/kpod-inspect.1 docs/kpod-history.1 docs/kpod-pull.1 docs/kpod-rmi.1 docs/kpod-version.1 docs/kpod-tag.1 -t /usr/local/share/man/man1\r\n" - ], - [ - 0.011056, - "install -m 644 docs/crio.conf.5 -t /usr/local/share/man/man5\r\n" - ], - [ - 0.002986, - "install -m 644 docs/crio.8 -t /usr/local/share/man/man8\r\n" - ], - [ - 0.004783, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.019387, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m7s\u001b[39m\r\n" - ], - [ - 0.002291, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 9.3e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.1e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8.8e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.6e-05, - "\u001b[?2004h" - ], - [ - 15.331424, - "g" - ], - [ - 0.398976, - "\b \b" - ], - [ - 0.536701, - "v" - ], - [ - 0.160554, - "\bvi" - ], - [ - 0.127435, - " " - ], - [ - 1.040078, - "\b" - ], - [ - 0.144093, - "\b\bv \b" - ], - [ - 0.167843, - "\b \b" - ], - [ - 0.448464, - "g" - ], - [ - 0.087315, - "\bgi" - ], - [ - 0.104665, - "t" - ], - [ - 0.119248, - " " - ], - [ - 0.176373, - "r" - ], - [ - 0.151944, - "e" - ], - [ - 0.080321, - "s" - ], - [ - 0.175774, - "e" - ], - [ - 0.120008, - "t" - ], - [ - 0.168118, - " " - ], - [ - 0.647723, - "c" - ], - [ - 0.11246, - "m" - ], - [ - 0.353225, - "\u0007" - ], - [ - 0.0001, - "k" - ], - [ - 0.225653, - "\u0007" - ], - [ - 0.388813, - "\b \b" - ], - [ - 0.152155, - "d" - ], - [ - 0.225421, - "\u0007" - ], - [ - 0.142048, - "m" - ], - [ - 0.448625, - "\b \b" - ], - [ - 0.248141, - "/" - ], - [ - 0.303478, - "m" - ], - [ - 0.415244, - "\b \b" - ], - [ - 0.501387, - "\b \b" - ], - [ - 0.030592, - "\b \b" - ], - [ - 0.031729, - "\b \b" - ], - [ - 0.276247, - "\b \b" - ], - [ - 0.296245, - "H" - ], - [ - 0.111636, - "E" - ], - [ - 0.128194, - "A" - ], - [ - 0.160328, - "D" - ], - [ - 0.216523, - " " - ], - [ - 0.160304, - "m" - ], - [ - 0.159999, - "e" - ], - [ - 0.22375, - "\b \b" - ], - [ - 0.159675, - "\b \b" - ], - [ - 0.088274, - "c" - ], - [ - 0.111635, - "m" - ], - [ - 0.178731, - "\u0007" - ], - [ - 0.05329, - "k" - ], - [ - 0.196815, - "\u0007" - ], - [ - 0.282938, - "\b \b" - ], - [ - 0.136923, - "d" - ], - [ - 0.239219, - "/" - ], - [ - 0.368537, - "c" - ], - [ - 0.319505, - "\b \b" - ], - [ - 0.232433, - "k" - ], - [ - 0.504033, - "p" - ], - [ - 0.176414, - "o" - ], - [ - 0.126785, - "d" - ], - [ - 0.864534, - "/" - ], - [ - 0.155176, - "r" - ], - [ - 0.109177, - "m" - ], - [ - 0.119347, - "i" - ], - [ - 0.24468, - "." - ], - [ - 0.195895, - "g" - ], - [ - 0.087415, - "o" - ], - [ - 1.588076, - "\u001b[?1l\u001b>" - ], - [ - 0.000131, - "\u001b[?2004l\r\r\n" - ], - [ - 0.003239, - "\u001b]2;git reset HEAD cmd/kpod/rmi.go\u0007\u001b]1;git\u0007" - ], - [ - 0.005443, - "Unstaged changes after reset:\r\nM\tcmd/kpod/images.go\r\n" - ], - [ - 0.000165, - "M\tcmd/kpod/rmi.go\r\n" - ], - [ - 0.010191, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.023696, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001223, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000118, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000101, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.3e-05, - "\u001b[?2004h" - ], - [ - 117.912011, - "v" - ], - [ - 0.072168, - "\bvi" - ], - [ - 0.13525, - " " - ], - [ - 0.224662, - "m" - ], - [ - 0.447667, - "\b \b" - ], - [ - 0.119783, - "c" - ], - [ - 0.096089, - "m" - ], - [ - 0.146517, - "d\u001b[1m/\u001b[0m" - ], - [ - 0.10283, - "\b\u001b[0m/k" - ], - [ - 0.110527, - "pod\u001b[1m/\u001b[0m" - ], - [ - 0.48823, - "\b\u001b[0m/r" - ], - [ - 0.176379, - "m" - ], - [ - 0.10899, - "i.go\u001b[1m \u001b[0m" - ], - [ - 0.387068, - "\b\u001b[0m \b" - ], - [ - 0.000493, - "\u001b[?1l\u001b>\u001b[?2004l\r\r\n" - ], - [ - 0.004413, - "\u001b]2;vim cmd/kpod/rmi.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.139925, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000574, - "\u001b[1;54r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[54;1H\"cmd/kpod/rmi.go\"" - ], - [ - 0.000165, - " 123L, 3107C" - ], - [ - 0.009214, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.003104, - "\u001b[1;1H\u001b[96m\u001b[47m 18 \u001b[m\u001b[93m\u001b[107m\u001b[8C},\r\n\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m rmiCommand = cli.Command{\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m\u001b[8CName:\u001b[8C\u001b[36m\"rmi\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m\u001b[8CUsage:\u001b[7C\u001b[36m\"removes one or more images from local storage\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m\u001b[8CDescription: rmiDescription,\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[8CAction: rmiCmd,\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m\u001b[8CArgsUsage: \u001b[36m\"IMAGE-NAME-OR-ID [...]\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m\u001b[8CFlags:\u001b[7CrmiFlags,\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 28 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 29 \r\n 30 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m rmiCmd(c *cli.Context) \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 31 \r\n 32 \u001b[m\u001b[93m\u001b[107m force := \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m c.IsSet(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m\u001b[8Cforce = c.Bool(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m\u001b[107m " - ], - [ - 1.9e-05, - "}\r\n\u001b[96m\u001b[47m 36 \r\n 37 \u001b[m\u001b[93m\u001b[107m args := c.Args()\r\n\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(args) == \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Errorf(\u001b[36m\"image name or ID must be specified\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 41 \r\n 42 \u001b[m\u001b[93m\u001b[107m config, err := getConfig(c)\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Could not get config\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m store, err := getStore(config)\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 50 \r\n 51 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, id := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m args {\r\n\u001b[96m\u001b[47m 52 " - ], - [ - 0.032096, - "\u001b[m\u001b[93m\u001b[107m\u001b[8Cimage, err := libkpodimage.FindImage(store, id)\r\n\u001b[96m\u001b[47m 53 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 54 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"could not get image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 55 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 56 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m image != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 57 \u001b[m\u001b[93m\u001b[107m\u001b[12CctrIDs, err := runningContainers(image, store)\r\n\u001b[96m\u001b[47m 58 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 59 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"error getting running containers for image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 60 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 61 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(ctrIDs) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m && \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(image.Names) <= \u001b[36m1\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 62" - ], - [ - 9.1e-05, - " \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m force {\r\n\u001b[96m\u001b[47m 63 \u001b[m\u001b[93m\u001b[107m\u001b[20CremoveContainers(ctrIDs, store)\r\n\u001b[96m\u001b[47m 64 \u001b[m\u001b[93m\u001b[107m\u001b[16C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 65 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mfor\u001b[m\u001b[93m\u001b[107m ctrID := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\r\n\u001b[96m\u001b[47m 66 \u001b[m\u001b[93m\u001b[107m\u001b[24C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m fmt.Errorf(\u001b[36m\"Could not remove image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m (\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[33mmust force\u001b[m\u001b[93m\u001b[107m\u001b[36m) - one or more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\r\n\u001b[96m\u001b[47m 67 \u001b[m\u001b[93m\u001b[107m\u001b[20C}\r\n\u001b[96m\u001b[47m 68 \u001b[m\u001b[93m\u001b[107m\u001b[16C}\r\n\u001b[96m\u001b[47m 69 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;220m\u001b[48;5;240m M \u001b[m\u001b[93m" - ], - [ - 0.00914, - "\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;52H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                           \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:25 \u001b[49;29H\u001b[?12l\u001b[?25h\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 1.315626, - "\u001b[?25l\u001b[53;210H6\u001b[49;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.490663, - "\u001b[53;210H7\u001b[49;31H" - ], - [ - 0.024219, - "\u001b[53;210H8\u001b[49;32H" - ], - [ - 0.030955, - "\u001b[53;210H9\u001b[49;33H" - ], - [ - 0.03195, - "\u001b[53;209H30\u001b[49;34H" - ], - [ - 0.028457, - "\u001b[53;210H1\u001b[49;35H" - ], - [ - 0.030479, - "\u001b[53;210H2\u001b[49;36H" - ], - [ - 0.031626, - "\u001b[53;210H3\u001b[49;37H" - ], - [ - 0.032067, - "\u001b[53;210H4\u001b[49;38H" - ], - [ - 0.026593, - "\u001b[53;210H5\u001b[49;39H" - ], - [ - 0.033566, - "\u001b[53;210H6\u001b[49;40H" - ], - [ - 0.027309, - "\u001b[53;210H7\u001b[49;41H" - ], - [ - 0.03294, - "\u001b[53;210H8\u001b[49;42H" - ], - [ - 0.030662, - "\u001b[53;210H9\u001b[49;43H" - ], - [ - 0.031394, - "\u001b[53;209H40\u001b[49;44H" - ], - [ - 0.032204, - "\u001b[53;210H1\u001b[49;45H" - ], - [ - 0.033628, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mf\u001b[1m\u001b[31m\u001b[106m(\u001b[106C)\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m2\u001b[49;46H\u001b[?12l\u001b[?25h" - ], - [ - 0.026953, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[105C)\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m3\u001b[49;47H\u001b[?12l\u001b[?25h" - ], - [ - 0.031767, - "\u001b[53;210H4\u001b[49;48H" - ], - [ - 0.026243, - "\u001b[53;210H5\u001b[49;49H" - ], - [ - 0.032594, - "\u001b[53;210H6\u001b[49;50H" - ], - [ - 0.02764, - "\u001b[53;210H7\u001b[49;51H" - ], - [ - 0.034964, - "\u001b[53;210H8\u001b[49;52H" - ], - [ - 0.026422, - "\u001b[53;210H9\u001b[49;53H" - ], - [ - 0.032607, - "\u001b[53;209H50\u001b[49;54H" - ], - [ - 0.028623, - "\u001b[53;210H1\u001b[49;55H" - ], - [ - 0.028244, - "\u001b[53;210H2\u001b[49;56H" - ], - [ - 0.033991, - "\u001b[53;210H3\u001b[49;57H" - ], - [ - 0.032985, - "\u001b[53;210H4\u001b[49;58H" - ], - [ - 0.027615, - "\u001b[53;210H5\u001b[49;59H" - ], - [ - 0.028609, - "\u001b[53;210H6\u001b[49;60H" - ], - [ - 0.02978, - "\u001b[53;210H7\u001b[49;61H" - ], - [ - 0.030592, - "\u001b[53;210H8\u001b[49;62H" - ], - [ - 0.030011, - "\u001b[53;210H9\u001b[49;63H" - ], - [ - 0.031518, - "\u001b[53;209H60\u001b[49;64H" - ], - [ - 0.030297, - "\u001b[53;210H1\u001b[49;65H" - ], - [ - 0.029334, - "\u001b[53;210H2\u001b[49;66H" - ], - [ - 0.031119, - "\u001b[53;210H3\u001b[49;67H" - ], - [ - 0.027634, - "\u001b[53;210H4\u001b[49;68H" - ], - [ - 0.031088, - "\u001b[53;210H5\u001b[49;69H" - ], - [ - 0.03047, - "\u001b[53;210H6\u001b[49;70H" - ], - [ - 0.03291, - "\u001b[53;210H7\u001b[49;71H" - ], - [ - 0.029259, - "\u001b[53;210H8\u001b[49;72H" - ], - [ - 0.034775, - "\u001b[53;210H9\u001b[49;73H" - ], - [ - 0.026392, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[1m\u001b[31m\u001b[106m(\u001b[10C)\u001b[m\u001b[93m\u001b[107m\u001b[53;209H\u001b[38;5;22m\u001b[48;5;252m70\u001b[49;74H\u001b[?12l\u001b[?25h" - ], - [ - 0.031136, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36m(\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[33mm\u001b[m\u001b[93m\u001b[107m\u001b[9C\u001b[36m) \u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m1\u001b[49;75H\u001b[?12l\u001b[?25h" - ], - [ - 0.02956, - "\u001b[53;210H2\u001b[49;76H" - ], - [ - 0.029258, - "\u001b[53;210H3\u001b[49;77H" - ], - [ - 0.032617, - "\u001b[53;210H4\u001b[49;78H" - ], - [ - 0.02963, - "\u001b[53;210H5\u001b[49;79H" - ], - [ - 0.027516, - "\u001b[53;210H6\u001b[49;80H" - ], - [ - 0.031034, - "\u001b[53;210H7\u001b[49;81H" - ], - [ - 0.0348, - "\u001b[53;210H8\u001b[49;82H" - ], - [ - 0.026686, - "\u001b[53;210H9\u001b[49;83H" - ], - [ - 0.031977, - "\u001b[53;209H80\u001b[49;84H" - ], - [ - 0.032521, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[49;74H\u001b[1m\u001b[31m\u001b[106m(\u001b[10C)\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m1\u001b[49;85H\u001b[?12l\u001b[?25h" - ], - [ - 0.027602, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[49;74H\u001b[36m(\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[33mm\u001b[m\u001b[93m\u001b[107m\u001b[9C\u001b[36m) \u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m2\u001b[49;86H\u001b[?12l\u001b[?25h" - ], - [ - 0.144764, - "\u001b[53;210H3\u001b[49;87H" - ], - [ - 0.167503, - "\u001b[53;210H4\u001b[49;88H" - ], - [ - 0.165071, - "\u001b[53;210H5\u001b[49;89H" - ], - [ - 0.254279, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mne or more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;153H\u001b[K\u001b[53;50H\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;54H \u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.501147, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me or more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;152H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.019148, - "\u001b[?25l\u001b[36m or more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;151H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.032617, - "\u001b[?25l\u001b[36mor more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;150H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.031692, - "\u001b[?25l\u001b[36mr more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;149H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.030868, - "\u001b[?25l\u001b[36m more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;148H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.03056, - "\u001b[?25l\u001b[36mmore containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;147H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.03168, - "\u001b[?25l\u001b[36more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;146H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.027805, - "\u001b[?25l\u001b[36mre containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;145H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.031902, - "\u001b[?25l\u001b[36me containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;144H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.283191, - "\u001b[?25l\u001b[36m containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;143H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.178515, - "\u001b[?25l\u001b[36mcontainers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;142H\u001b[K\u001b[49;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.38466, - "\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m6\u001b[49;90H" - ], - [ - 0.501961, - "\u001b[53;210H7\u001b[49;91H" - ], - [ - 0.025949, - "\u001b[53;210H8\u001b[49;92H" - ], - [ - 0.029349, - "\u001b[53;210H9\u001b[49;93H" - ], - [ - 0.031181, - "\u001b[53;209H90\u001b[49;94H" - ], - [ - 0.030544, - "\u001b[53;210H1\u001b[49;95H" - ], - [ - 0.030634, - "\u001b[53;210H2\u001b[49;96H" - ], - [ - 0.030484, - "\u001b[53;210H3\u001b[49;97H" - ], - [ - 0.033104, - "\u001b[53;210H4\u001b[49;98H" - ], - [ - 0.024166, - "\u001b[53;210H5\u001b[49;99H" - ], - [ - 0.032752, - "\u001b[53;210H6\u001b[49;100H" - ], - [ - 0.030921, - "\u001b[53;210H7\u001b[49;101H" - ], - [ - 0.517314, - "\u001b[53;210H6\u001b[49;100H" - ], - [ - 3.312677, - "\u001b[53;210H7\u001b[49;101H" - ], - [ - 0.50269, - "\u001b[53;210H8\u001b[49;102H" - ], - [ - 0.030313, - "\u001b[53;210H9\u001b[49;103H" - ], - [ - 0.024339, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;175H\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;181H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;189H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;194H\u001b[38;5;107m\u001b[48;5;240m \u001b[1C54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;201H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:100\u001b[49;104H" - ], - [ - 0.033592, - "\u001b[53;210H1\u001b[49;105H" - ], - [ - 0.031721, - "\u001b[53;210H2\u001b[49;106H" - ], - [ - 0.02682, - "\u001b[53;210H3\u001b[49;107H" - ], - [ - 0.03135, - "\u001b[53;210H4\u001b[49;108H" - ], - [ - 0.029141, - "\u001b[53;210H5\u001b[49;109H" - ], - [ - 0.03261, - "\u001b[53;210H6\u001b[49;110H" - ], - [ - 0.028367, - "\u001b[53;210H7\u001b[49;111H" - ], - [ - 0.028863, - "\u001b[53;210H8\u001b[49;112H" - ], - [ - 0.032551, - "\u001b[53;210H9\u001b[49;113H" - ], - [ - 0.02896, - "\u001b[53;209H10\u001b[49;114H" - ], - [ - 0.029171, - "\u001b[53;210H1\u001b[49;115H" - ], - [ - 0.032167, - "\u001b[53;210H2\u001b[49;116H" - ], - [ - 0.029809, - "\u001b[53;210H3\u001b[49;117H" - ], - [ - 0.025445, - "\u001b[53;210H4\u001b[49;118H" - ], - [ - 0.020289, - "\u001b[53;210H3\u001b[49;117H" - ], - [ - 0.501457, - "\u001b[53;210H2\u001b[49;116H" - ], - [ - 0.033753, - "\u001b[53;210H1\u001b[49;115H" - ], - [ - 0.030371, - "\u001b[53;210H0\u001b[49;114H" - ], - [ - 0.026993, - "\u001b[53;209H09\u001b[49;113H" - ], - [ - 0.030967, - "\u001b[53;210H8\u001b[49;112H" - ], - [ - 0.026829, - "\u001b[53;210H7\u001b[49;111H" - ], - [ - 0.032439, - "\u001b[53;210H6\u001b[49;110H" - ], - [ - 0.03057, - "\u001b[53;210H5\u001b[49;109H" - ], - [ - 0.031109, - "\u001b[53;210H4\u001b[49;108H" - ], - [ - 0.032679, - "\u001b[53;210H3\u001b[49;107H" - ], - [ - 0.033994, - "\u001b[53;210H2\u001b[49;106H" - ], - [ - 0.025439, - "\u001b[53;210H1\u001b[49;105H" - ], - [ - 0.032603, - "\u001b[53;210H0\u001b[49;104H" - ], - [ - 0.028393, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;175H\u001b[38;5;231m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;196H\u001b[38;5;107m\u001b[48;5;240m 54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\b 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:99\u001b[49;103H" - ], - [ - 0.136603, - "\u001b[53;210H8\u001b[49;102H" - ], - [ - 0.1836, - "\u001b[53;210H7\u001b[49;101H" - ], - [ - 0.185079, - "\u001b[53;210H6\u001b[49;100H" - ], - [ - 0.383613, - "\u001b[53;210H5\u001b[49;99H" - ], - [ - 0.161001, - "\u001b[53;210H4\u001b[49;98H" - ], - [ - 0.669446, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36m are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[49;141H\u001b[K\u001b[49;98H\u001b[?12l\u001b[?25h" - ], - [ - 0.586565, - "\u001b[?25l\u001b[54;1H\u001b[34m-- INSERT --\u001b[m\u001b[93m\u001b[107m\u001b[54;13H\u001b[K" - ], - [ - 0.043535, - "\u001b[53;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[53;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[53;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[53;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;220m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;220m\u001b[48;5;31m M\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[m\u001b[93m\u001b[107m\u001b[53;53H\u001b[38;5;31m\u001b[48;5;24m\u001b[53;54H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                         \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[" - ], - [ - 2.9e-05, - "38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;186m\u001b[48;5;31m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:94 \u001b[49;98H\u001b[?12l\u001b[?25h" - ], - [ - 0.189046, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[49;99H\u001b[?12l\u001b[?25h" - ], - [ - 1.150929, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m are using its reference image\u001b[36m\", id, ctrID)\u001b[50;5H }\u001b[51;5H }\u001b[52;5H }\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[49;100H\u001b[?12l\u001b[?25h" - ], - [ - 0.222856, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m% are using its reference image\u001b[36m\", id, ctrID)\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[49;101H\u001b[?12l\u001b[?25h" - ], - [ - 0.508774, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mq are using its reference image\u001b[36m\", id, ctrID)\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[49;102H\u001b[?12l\u001b[?25h" - ], - [ - 0.429575, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36m\" are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[50;5H }\u001b[51;5H }\u001b[52;5H }\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[49;103H\u001b[?12l\u001b[?25h" - ], - [ - 0.293573, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[53;175H\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;181H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;189H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;194H\u001b[38;5;186m\u001b[48;5;31m \u001b[1C54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[53;201H\u001b[38;5;24m\u001b[48;5;117m \u001b[53;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:100\u001b[49;104H\u001b[?12l\u001b[?25h" - ], - [ - 1.664414, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[K" - ], - [ - 0.007242, - "\u001b[53;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[53;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;220m\u001b[48;5;240m M\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[53;53H\u001b[38;5;240m\u001b[48;5;236m\u001b[53;54H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                         \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[10" - ], - [ - 3.7e-05, - "7m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m6\u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:99 \u001b[49;103H\u001b[?12l\u001b[?25h" - ], - [ - 0.497524, - "\u001b[?25l\u001b[54;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.127272, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.320956, - "!\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 1.072551, - "\r" - ], - [ - 0.015046, - "\u001b[?25l\u001b[?2004l\u001b[54;1H\u001b[K\u001b[54;1H\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.002419, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.020555, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m22s\u001b[39m\r\n" - ], - [ - 0.001416, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 7.6e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 3.1e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000124, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.1e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.2e-05, - "\u001b[?2004h" - ], - [ - 1.279799, - "g" - ], - [ - 0.167996, - "\bgi" - ], - [ - 0.128101, - "t" - ], - [ - 0.21601, - " " - ], - [ - 0.416115, - "s" - ], - [ - 0.11937, - "t" - ], - [ - 0.160387, - "a" - ], - [ - 0.128023, - "t" - ], - [ - 0.168159, - "u" - ], - [ - 0.127468, - "s" - ], - [ - 0.136345, - "\u001b[?1l\u001b>" - ], - [ - 0.000192, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004674, - "\u001b]2;git status\u0007\u001b]1;git\u0007" - ], - [ - 0.017951, - "On branch kpod-format-table\r\nChanges not staged for commit:\r\n (use \"git add ...\" to update what will be committed)\r\n (use \"git checkout -- ...\" to discard changes in working directory)\r\n\r\n" - ], - [ - 3.6e-05, - "\t\u001b[31mmodified: cmd/kpod/images.go\u001b[m\r\n\t\u001b[31mmodified: cmd/kpod/rmi.go\u001b[m\r\n\r\nno changes added to commit (use \"git add\" and/or \"git commit -a\")\r\n" - ], - [ - 0.000309, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024514, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001388, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000102, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.7e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 7.7e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.1e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.3e-05, - "\u001b[?2004h" - ], - [ - 10.906, - "e" - ], - [ - 0.233197, - "\bex" - ], - [ - 0.342482, - "\b\be \b" - ], - [ - 0.189249, - "\b \b" - ], - [ - 56220.893544, - "\u001b[?1l\u001b>" - ], - [ - 4.1e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.001035, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.020391, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m56232s\u001b[39m\r\n" - ], - [ - 0.000897, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000111, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.9e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000461, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 1.5e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 5.1e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.9e-05, - "\u001b[?2004h" - ], - [ - 5.901847, - "g" - ], - [ - 0.160227, - "\bgi" - ], - [ - 0.104667, - "t" - ], - [ - 0.055222, - " " - ], - [ - 0.127953, - "d" - ], - [ - 0.11279, - "i" - ], - [ - 0.078928, - "f" - ], - [ - 0.153039, - "f" - ], - [ - 0.1441, - "\u001b[?1l\u001b>" - ], - [ - 0.000244, - "\u001b[?2004l\r\r\n" - ], - [ - 0.005357, - "\u001b]2;git diff\u0007\u001b]1;git\u0007" - ], - [ - 0.005163, - "\u001b[?1049h\u001b[?1h\u001b=\r" - ], - [ - 0.003303, - "\u001b[1mdiff --git a/cmd/kpod/images.go b/cmd/kpod/images.go\u001b[m\u001b[m\r\n\u001b[1mindex 593c2b27..d6f2bda1 100644\u001b[m\u001b[m\r\n\u001b[1m--- a/cmd/kpod/images.go\u001b[m\u001b[m\r\n\u001b[1m+++ b/cmd/kpod/images.go\u001b[m\u001b[m\r\n\u001b[36m@@ -120,7 +120,7 @@\u001b[m \u001b[mfunc genImagesFormat(quiet, truncate, digests bool) (format string) {\u001b[m\u001b[m\r\n format += \"{{ .Name | printf \\\"%-56s\\\" }} \"\u001b[m\u001b[m\r\n \u001b[m\u001b[m\r\n if digests {\u001b[m\u001b[m\r\n\u001b[31m- format += \"{{ .DIGEST | printf \\\"%-71s \\\"}} \"\u001b[m\u001b[m\r\n\u001b[32m+\u001b[m \u001b[32mformat += \"{{ .Digest | printf \\\"%-71s \\\"}} \"\u001b[m\u001b[m\r\n }\u001b[m\u001b[m\r\n \u001b[m\u001b[m\r\n format += \"{{ .CreatedAt | printf \\\"%-22s\\\" }} {{.Size}}\"\u001b[m\u001b[m\r\n\u001b[1mdiff --git a/cmd/kpod/rmi.go b/cmd/kpod/rmi.go\u001b[m\u001b[m\r\n\u001b[1mindex c7752fc1..a8da7da6 100644\u001b[m\u001b[m\r\n\u001b[1m--- a/cmd/kpod/rmi.go\u001b[m\u001b[m\r\n\u001b[1m+++ b/cmd/kpod/rmi.go\u001b[m\u001b[m\r\n\u001b[36m@@ -63,7 +63,7 @@\u001b[m \u001b[mfunc rmiCmd(c *cli.Context) error {\u001b[m\u001b[m\r\n removeContainers(ctrIDs, store)\u001b[m\u001b[m\r\n } else {\u001b[m\u001b[m\r\n " - ], - [ - 2.9e-05, - " " - ], - [ - 0.000128, - " for ctrID := range ctrIDs {\u001b[m\u001b[m\r\n\u001b[31m- return fmt.Errorf(\"Could not remove image %q (must force) - container %q is using its reference image\", id, ctrID)\u001b[m\u001b[m\r\n\u001b[32m+\u001b[m \u001b[32mreturn fmt.Errorf(\"Could not remove image %q (must force) - one or more containers are using its reference image\", id, ctrID)\u001b[m\u001b[m\r\n }\u001b[m\u001b[m\r\n }\u001b[m\u001b[m\r\n }\u001b[m\u001b[m\r\n" - ], - [ - 2.2e-05, - "\u001b[7m(END)\u001b[27m\u001b[K" - ], - [ - 3.085748, - "\r\u001b[K\u001b[?1l\u001b>\u001b[?1049l" - ], - [ - 0.001753, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.026198, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.00165, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000111, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.8e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 9e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.8e-05, - "\u001b[?1h\u001b=" - ], - [ - 4.6e-05, - "\u001b[?2004h" - ], - [ - 0.198452, - "g" - ], - [ - 0.095541, - "\bgi" - ], - [ - 0.120141, - "t" - ], - [ - 0.087239, - " " - ], - [ - 0.295807, - "r" - ], - [ - 0.124741, - "e" - ], - [ - 0.107908, - "s" - ], - [ - 0.136109, - "e" - ], - [ - 0.099662, - "t" - ], - [ - 0.123811, - " " - ], - [ - 0.112331, - "-" - ], - [ - 0.143709, - "-" - ], - [ - 0.220177, - "h" - ], - [ - 0.084927, - "a" - ], - [ - 0.050956, - "r" - ], - [ - 0.116765, - "d" - ], - [ - 0.042884, - " " - ], - [ - 0.239721, - "H" - ], - [ - 0.068192, - "E" - ], - [ - 0.148337, - "A" - ], - [ - 0.280701, - "D\u001b[1m \u001b[0m" - ], - [ - 0.689919, - "\b\u001b[0m \b\u001b[?1l\u001b>\u001b[?2004l\r\r\n" - ], - [ - 0.004057, - "\u001b]2;git reset --hard HEAD\u0007\u001b]1;git\u0007" - ], - [ - 0.023549, - "HEAD is now at 99495909 Make kpod images use text/template by default\r\n" - ], - [ - 0.000466, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.027615, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001079, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 9.4e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.6e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8.1e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.7e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.3e-05, - "\u001b[?2004h" - ], - [ - 0.377121, - "v" - ], - [ - 0.083308, - "\bvi" - ], - [ - 0.079468, - " " - ], - [ - 0.080413, - "c" - ], - [ - 0.088276, - "m" - ], - [ - 0.144748, - "d\u001b[1m/\u001b[0m" - ], - [ - 0.074972, - "\b\u001b[0m/k" - ], - [ - 0.152342, - "pod\u001b[1m/\u001b[0m" - ], - [ - 0.396108, - "\b\u001b[0m/i" - ], - [ - 0.079857, - "m" - ], - [ - 0.074579, - "ages.go\u001b[1m \u001b[0m" - ], - [ - 0.548619, - "\b\u001b[0m \b" - ], - [ - 0.000101, - "\u001b[?1l\u001b>" - ], - [ - 0.000148, - "\u001b[?2004l\r\r\n" - ], - [ - 0.001987, - "\u001b]2;vim cmd/kpod/images.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.161796, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000788, - "\u001b[1;54r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[54;1H\"cmd/kpod/images.go\"" - ], - [ - 7.9e-05, - " 203L, 4796C" - ], - [ - 0.011442, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.008169, - "\u001b[1;1H\u001b[96m\u001b[47m152 \u001b[m\u001b[93m\u001b[107m\u001b[12CName: name,\r\n\u001b[96m\u001b[47m153 \u001b[m\u001b[93m\u001b[107m\u001b[12CDigest: imageDigest,\r\n\u001b[96m\u001b[47m154 \u001b[m\u001b[93m\u001b[107m\u001b[12CCreatedAt: createdTime.Format(\u001b[36m\"Jan 2, 2006 15:04\"\u001b[m\u001b[93m\u001b[107m),\r\n\u001b[96m\u001b[47m155 \u001b[m\u001b[93m\u001b[107m\u001b[12CSize: libkpodimage.FormattedSize(size),\r\n\u001b[96m\u001b[47m156 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m157 \u001b[m\u001b[93m\u001b[107m\u001b[8CimageOutput = \u001b[32mappend\u001b[m\u001b[93m\u001b[107m(imageOutput, params)\r\n\u001b[96m\u001b[47m158 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m159 \r\n160 \u001b[m\u001b[93m\u001b[107m \u001b[32mvar\u001b[m\u001b[93m\u001b[107m out formats.Writer\r\n\u001b[96m\u001b[47m161 \r\n162 \u001b[m\u001b[93m\u001b[107m \u001b[32mswitch\u001b[m\u001b[93m\u001b[107m outputFormat {\r\n\u001b[96m\u001b[47m163 \u001b[m\u001b[93m\u001b[107m \u001b[32mcase\u001b[m\u001b[93m\u001b[107m \u001b[36m\"json\"\u001b[m\u001b[93m\u001b[107m:\r\n\u001b[96m\u001b[47m164 \u001b[m\u001b[93m\u001b[107m\u001b[8Cout = formats.JSONstruct{Output: toGeneric(imageOutput)}\r\n\u001b[96m\u001b[47m165 \u001b[m\u001b[93m\u001b[107m \u001b[32mdefault\u001b[m\u001b[93m\u001b[107m:\r\n\u001b[96m\u001b[47m166 \u001b[m\u001b[93m\u001b[107m\u001b[8Cout = formats.StdoutTemplate{Output: toGeneric(imageOutput), Template: outputFormat, Fields: imageOutput[\u001b[36m0\u001b[m\u001b[93m\u001b[107m].header" - ], - [ - 3.1e-05, - "Map()}\r\n\u001b[96m\u001b[47m167 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m168 \r\n169 \u001b[m\u001b[93m\u001b[107m formats.Writer(out).Out()\r\n\u001b[96m\u001b[47m170 \r\n171 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m172 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m173 \r\n174 \u001b[m\u001b[93m\u001b[107m\u001b[32mtype\u001b[m\u001b[93m\u001b[107m imageOutputParams \u001b[32mstruct\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m175 \u001b[m\u001b[93m\u001b[107m ID\u001b[8C\u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[36m`json:\"id\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m176 \u001b[m\u001b[93m\u001b[107m Name \u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[36m`json:\"names\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m177 \u001b[m\u001b[93m\u001b[107m Digest digest.Digest \u001b[36m`json:\"digest\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m178 \u001b[m\u001b[93m\u001b[107m CreatedAt \u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[36m`json:\"created\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m179 \u001b[m\u001b[93m\u001b[107m Size \u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[36m`json:\"size\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m180 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m181 \r\n182 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m toGeneric(params []imageOutputParams) []\u001b[32minterface\u001b[m\u001b[93m\u001b[107m{} {\r\n" - ], - [ - 0.034447, - "\u001b[96m\u001b[47m183 \u001b[m\u001b[93m\u001b[107m genericParams := \u001b[32mmake\u001b[m\u001b[93m\u001b[107m([]\u001b[32minterface\u001b[m\u001b[93m\u001b[107m{}, \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(params))\r\n\u001b[96m\u001b[47m184 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m i, v := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m params {\r\n\u001b[96m\u001b[47m185 \u001b[m\u001b[93m\u001b[107m\u001b[8CgenericParams[i] = \u001b[32minterface\u001b[m\u001b[93m\u001b[107m{}(v)\r\n\u001b[96m\u001b[47m186 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m187 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m genericParams\r\n\u001b[96m\u001b[47m188 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m189 \r\n190 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (i *imageOutputParams) headerMap() \u001b[33mmap\u001b[m\u001b[93m\u001b[107m[\u001b[33mstring\u001b[m\u001b[93m\u001b[107m]\u001b[33mstring\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m191 \u001b[m\u001b[93m\u001b[107m v := reflect.Indirect(reflect.ValueOf(i))\r\n\u001b[96m\u001b[47m192 \u001b[m\u001b[93m\u001b[107m values := \u001b[32mmake\u001b[m\u001b[93m\u001b[107m(\u001b[33mmap\u001b[m\u001b[93m\u001b[107m[\u001b[33mstring\u001b[m\u001b[93m\u001b[107m]\u001b[33mstring\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m193 \r\n194 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m i := \u001b[36m0\u001b[m\u001b[93m\u001b[107m; i < v.NumField(); i++ {\r\n\u001b[96m\u001b[47m195 \u001b[m\u001b[93m\u001b[107m\u001b[8Ckey := v.Type().Field(i).Na" - ], - [ - 5.7e-05, - "me\r\n\u001b[96m\u001b[47m196 \u001b[m\u001b[93m\u001b[107m\u001b[8Cvalue := key\r\n\u001b[96m\u001b[47m197 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m value == \u001b[36m\"ID\"\u001b[m\u001b[93m\u001b[107m || value == \u001b[36m\"Name\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m198 \u001b[m\u001b[93m\u001b[107m\u001b[12Cvalue = \u001b[36m\"Image\"\u001b[m\u001b[93m\u001b[107m + value\r\n\u001b[96m\u001b[47m199 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m200 \u001b[m\u001b[93m\u001b[107m\u001b[8Cvalues[key] = fmt.Sprintf(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[31m%s\u001b[m\u001b[93m\u001b[107m\u001b[36m \"\u001b[m\u001b[93m\u001b[107m, strings.ToUpper(splitCamelCase(value)))\r\n\u001b[96m\u001b[47m201 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m202 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m values\r\n\u001b[96m\u001b[47m203 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mimages.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;53H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m               " - ], - [ - 0.01263, - "                                                                                                           \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  87%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m177\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5  \u001b[26;9H\u001b[?12l\u001b[?25h" - ], - [ - 2.3e-05, - "\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 0.07219, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[K\u001b[54;1H/" - ], - [ - 6.3e-05, - "\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.376099, - "D" - ], - [ - 8.8e-05, - "\u001b[?25l" - ], - [ - 0.008132, - "\u001b[26;26H\u001b[7m\u001b[91mD\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[53;2H\u001b[1m\u001b[38;5;22m\u001b[48;5;148mCOMMND \u001b[m\u001b[93m\u001b[107m\u001b[200C\u001b[38;5;22m\u001b[48;5;252m22\r\n\u001b[m\u001b[93m\u001b[107m/D" - ], - [ - 6.3e-05, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.127597, - "I" - ], - [ - 6e-05, - "\u001b[?25l" - ], - [ - 0.001462, - "\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[1;1H\u001b[96m\u001b[47m 98 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 99 \u001b[m\u001b[93m\u001b[107m } \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m100 \u001b[m\u001b[93m\u001b[107m\u001b[8Cparams = \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m101 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m102 \r\n103 \u001b[m\u001b[93m\u001b[107m imageList, err := libkpodimage.GetImagesMatchingFilter(store, params, name)\r\n\u001b[96m\u001b[47m104 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m105 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"could not get list of images matching filter\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m106 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m107 \r\n108 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m outputImages(store, imageList, truncate, digests, quiet, outputFormat, noheading)\r\n\u001b[96m\u001b[47m109 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m110 \r\n111 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m genImagesFormat(quiet, truncate, digests \u001b[33mbool\u001b[m\u001b[93m\u001b[107m) (format \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m112 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m quiet {\r\n\u001b[96m\u001b[" - ], - [ - 1.7e-05, - "47m113 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36m\"{{.ID}}\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m114 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m115 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m truncate {\r\n\u001b[96m\u001b[47m116 \u001b[m\u001b[93m\u001b[107m\u001b[8Cformat = \u001b[36m\"table {{ .ID | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-20.12s\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m }} \"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m117 \u001b[m\u001b[93m\u001b[107m } \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m118 \u001b[m\u001b[93m\u001b[107m\u001b[8Cformat = \u001b[36m\"table {{ .ID | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-64s\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m }} \"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m119 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m120 \u001b[m\u001b[93m\u001b[107m format += \u001b[36m\"{{ .Name | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-56s\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m }} \"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m121 \r\n122 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m digests {\r\n\u001b[96m\u001b[47m123 \u001b[m\u001b[93m\u001b[107m\u001b[8Cformat += \u001b[36m\"{{ .\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[91mDI\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[36mGEST | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m}} \"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m124 \u001b[m\u001b[93m\u001b[107m }\r\n" - ], - [ - 0.006692, - "\u001b[96m\u001b[47m125 \r\n126 \u001b[m\u001b[93m\u001b[107m format += \u001b[36m\"{{ .CreatedAt | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-22s\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m }} {{.Size}}\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m127 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m128 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m129 \r\n130 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m outputImages(store storage.Store, images []storage.Image, truncate, digests, quiet \u001b[33mbool\u001b[m\u001b[93m\u001b[107m, outputFormat \u001b[33mstring\u001b[m\u001b[93m\u001b[107m, noheading \u001b[33mbool\u001b[m\u001b[93m\u001b[107m) \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m131 \u001b[m\u001b[93m\u001b[107m imageOutput := []imageOutputParams{}\r\n\u001b[96m\u001b[47m132 \r\n133 \u001b[m\u001b[93m\u001b[107m lastID := \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m134 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, img := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m images {\r\n\u001b[96m\u001b[47m135 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m quiet && lastID == img.ID {\r\n\u001b[96m\u001b[47m136 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mcontinue\u001b[m\u001b[93m\u001b[107m \u001b[96m// quiet should not show the same ID multiple times\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m137 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m138 \u001b[m\u001b[" - ], - [ - 3e-05, - "93m\u001b[107m\u001b[8CcreatedTime := img.Created\r\n\u001b[96m\u001b[47m139 \r\n140 \u001b[m\u001b[93m\u001b[107m\u001b[8Cname := \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m141 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(img.Names) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m142 \u001b[m\u001b[93m\u001b[107m\u001b[12Cname = img.Names[\u001b[36m0\u001b[m\u001b[93m\u001b[107m]\r\n\u001b[96m\u001b[47m143 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m144 \r\n145 \u001b[m\u001b[93m\u001b[107m\u001b[8Cinfo, imageDigest, size, _ := libkpodimage.InfoAndDigestAndSize(store, img)\r\n\u001b[96m\u001b[47m146 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m info != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m147 \u001b[m\u001b[93m\u001b[107m\u001b[12CcreatedTime = info.Created\r\n\u001b[96m\u001b[47m148 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m149 \u001b[m\u001b[93m\u001b[107m\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m COMMND \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mimages.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;53H" - ], - [ - 6.1e-05, - " \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                          \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  61%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m123\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:24 \u001b[m\u001b[93m\u001b[107m\u001b[54;1H/DI\u001b[?12l\u001b[?25h" - ], - [ - 0.104778, - "G\u001b[?25l" - ], - [ - 0.004039, - "\u001b[26;30H\u001b[7m\u001b[91mG\u001b[54;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.202824, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mE\u001b[?25l" - ], - [ - 0.005173, - "\u001b[26;31H\u001b[7m\u001b[91mE\u001b[54;6H\u001b[?12l\u001b[?25h" - ], - [ - 0.122869, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mS\u001b[?25l" - ], - [ - 0.005278, - "\u001b[26;32H\u001b[7m\u001b[91mS\u001b[54;7H\u001b[?12l\u001b[?25h" - ], - [ - 0.090915, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mT\u001b[?25l" - ], - [ - 0.005887, - "\u001b[26;33H\u001b[7m\u001b[91mT\u001b[54;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.154865, - "\r\u001b[?25l\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31msearch hit BOTTOM, continuing at TOP" - ], - [ - 0.001958, - "\u001b[m\u001b[93m\u001b[107m\u001b[26;28H\u001b[7m\u001b[33mDIGEST" - ], - [ - 0.0027, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;2H\u001b[1m\u001b[38;5;22m\u001b[48;5;148mNORMAL \u001b[26;28H\u001b[?12l\u001b[?25h" - ], - [ - 0.713596, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m5\u001b[26;29H" - ], - [ - 0.44085, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36mDGEST | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m}} \"\u001b[m\u001b[93m\u001b[107m\u001b[26;57H\u001b[K\u001b[53;51H\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;55H \u001b[26;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.166708, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mEST | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m}} \"\u001b[m\u001b[93m\u001b[107m\u001b[26;56H\u001b[K\u001b[26;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.169515, - "\u001b[?25l\u001b[36mST | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m}} \"\u001b[m\u001b[93m\u001b[107m\u001b[26;55H\u001b[K\u001b[26;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.174197, - "\u001b[?25l\u001b[36mT | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m}} \"\u001b[m\u001b[93m\u001b[107m\u001b[26;54H\u001b[K\u001b[26;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.401145, - "\u001b[?25l\u001b[36m | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m}} \"\u001b[m\u001b[93m\u001b[107m\u001b[26;53H\u001b[K\u001b[26;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.176906, - "\u001b[?25l\u001b[54;1H\u001b[34m--\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31ma\u001b[m\u001b[93m\u001b[107m\b\u001b[34m INSERT\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31mt\u001b[m\u001b[93m\u001b[107m\b\u001b[34m --\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31mO\u001b[m\u001b[93m\u001b[107m\u001b[54;13H\u001b[K" - ], - [ - 0.042519, - "\u001b[53;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[53;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[53;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[53;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mimages.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[m\u001b[93m\u001b[107m\u001b[53;54H\u001b[38;5;31m\u001b[48;5;24m\u001b[53;55H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                        \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[" - ], - [ - 3.8e-05, - "107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;186m\u001b[48;5;31m  61%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m123\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:25 \u001b[26;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.163842, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mg | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m} \"\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[26;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.172451, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m} \"\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[26;31H\u001b[?12l\u001b[?25h" - ], - [ - 0.331326, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m}} \"\u001b[m\u001b[93m\u001b[107m\u001b[26;54H\u001b[K\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[26;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.13664, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m}} \"\u001b[m\u001b[93m\u001b[107m\u001b[26;53H\u001b[K\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[26;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.177579, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mi | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m} \"\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[26;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.174307, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mg | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m} \"\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[26;31H\u001b[?12l\u001b[?25h" - ], - [ - 0.131684, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m} \"\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[26;32H\u001b[?12l\u001b[?25h" - ], - [ - 0.086181, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36ms | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m} \"\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[26;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.037497, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mt | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m} \"\u001b[m\u001b[93m\u001b[107m\u001b[53;209H\u001b[38;5;22m\u001b[48;5;117m30\u001b[26;34H\u001b[?12l\u001b[?25h" - ], - [ - 0.329898, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[K" - ], - [ - 0.005721, - "\u001b[53;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[53;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H kpod-format-table \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;32H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mimages.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[53;54H\u001b[38;5;240m\u001b[48;5;236m\u001b[53;55H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                        \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m" - ], - [ - 2.8e-05, - " go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  61%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m123\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:29 \u001b[26;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.282806, - "\u001b[?25l\u001b[54;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.320108, - "w" - ], - [ - 0.00016, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.09557, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.080523, - "\r\u001b[?25l\u001b[?2004l" - ], - [ - 0.015569, - "\"cmd/kpod/images.go\"" - ], - [ - 0.005889, - " 203L, 4796C written" - ], - [ - 0.015946, - "\r\r\r\n\u001b[39;49m\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.003478, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.034812, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table* \u001b[39m \u001b[33m7s\u001b[39m\r\n" - ], - [ - 0.00239, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 9.2e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000169, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=" - ], - [ - 3.6e-05, - "\u001b[?2004h" - ], - [ - 0.906176, - "g" - ], - [ - 0.11065, - "\bgi" - ], - [ - 0.064415, - "t" - ], - [ - 0.143259, - " " - ], - [ - 0.080894, - "c" - ], - [ - 0.048032, - "o" - ], - [ - 0.103375, - "m" - ], - [ - 0.337522, - "i" - ], - [ - 0.287202, - "\b \b" - ], - [ - 0.191689, - "m" - ], - [ - 0.127496, - "i" - ], - [ - 0.096447, - "t" - ], - [ - 0.055022, - " " - ], - [ - 0.128991, - "-" - ], - [ - 0.136055, - "a" - ], - [ - 0.119988, - " " - ], - [ - 0.135881, - "-" - ], - [ - 0.159957, - "-" - ], - [ - 0.120583, - "a" - ], - [ - 0.087513, - "m" - ], - [ - 0.111928, - "e" - ], - [ - 0.104033, - "n" - ], - [ - 0.088352, - "d" - ], - [ - 0.127489, - "\u001b[?1l\u001b>" - ], - [ - 0.0001, - "\u001b[?2004l\r\r\n" - ], - [ - 0.002622, - "\u001b]2;git commit -a --amend\u0007\u001b]1;git\u0007" - ], - [ - 0.026245, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000861, - "\u001b[1;54r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[H\u001b[2J\u001b[?25l\u001b[54;1H\"~/Development/Go/src/github.com/kubernetes-incubator/cri-o/.git/COMMIT_EDITMSG\"" - ], - [ - 0.000158, - " 15L, 455C" - ], - [ - 0.00016, - "\u001b[1;1HMake kpod images use text/template by default\r\n\r\nSigned-off-by: Ryan Cole \r\n\r\n# Please enter the commit message for your changes. Lines starting\r\n# with '#' will be ignored, and an empty message aborts the commit.\r\n#\r\n# Date: Tue Aug 15 22:23:40 2017 -0400\r\n#\r\n# On branch kpod-format-table\r\n# Changes to be committed:\r\n#\u001b[7Cmodified: cmd/kpod/formats/formats.go\r\n#\u001b[7Cmodified: cmd/kpod/formats/templates.go\r\n#\u001b[7Cmodified: cmd/kpod/images.go\r\n#\r\n\u001b[94m~ \u001b[17;1H~ \u001b[18;1H~ " - ], - [ - 2e-05, - " \u001b[19;1H~ \u001b[20;1H~ \u001b[21;1H~ \u001b[22;1H~ \u001b[23;1H~ " - ], - [ - 5.1e-05, - " \u001b[24;1H~ \u001b[25;1H~ \u001b[26;1H~ \u001b[27;1H~ " - ], - [ - 1.5e-05, - " \u001b[28;1H~ \u001b[29;1H~ \u001b[30;1H~ \u001b[31;1H~ \u001b[32;1H~ " - ], - [ - 5e-05, - " \u001b[33;1H~ \u001b[34;1H~ \u001b[35;1H~ \u001b[36;1H~ \u001b[37;1H~ " - ], - [ - 1.5e-05, - " \u001b[38;1H~ \u001b[39;1H~ \u001b[40;1H~ \u001b[41;1H~ " - ], - [ - 7.1e-05, - " \u001b[42;1H~ \u001b[43;1H~ \u001b[44;1H~ \u001b[45;1H~ \u001b[46;1H~ " - ], - [ - 1.6e-05, - " \u001b[47;1H~ \u001b[48;1H~ \u001b[49;1H~ \u001b[50;1H~ \u001b[51;1H~ " - ], - [ - 1.6e-05, - " \u001b[52;1H~ \u001b[53;1H~ \u001b[1;1H\u001b[?12l\u001b[?25h" - ], - [ - 0.297423, - "\u001b[?25l\u001b[m\u001b[54;1H\u001b[K\u001b[54;1H:\u001b[?2004h" - ], - [ - 0.000258, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.143378, - "w" - ], - [ - 0.072145, - "q" - ], - [ - 0.095864, - "\r\u001b[?25l\u001b[?2004l" - ], - [ - 6e-05, - "\".git/COMMIT_EDITMSG\"" - ], - [ - 0.0116, - " 15L, 455C written\r\r\r\n\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.002475, - "[kpod-format-table ba07bfb9] Make kpod images use text/template by default\r\n Date: Tue Aug 15 22:23:40 2017 -0400\r\n" - ], - [ - 0.000465, - " 3 files changed, 36 insertions(+), 61 deletions(-)\r\n" - ], - [ - 0.000403, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.026137, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001205, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000102, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000115, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 2.9e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 2e-05, - "\u001b[?2004h" - ], - [ - 14.9661, - "g" - ], - [ - 0.136155, - "\bgi" - ], - [ - 0.079381, - "t" - ], - [ - 0.080921, - " " - ], - [ - 0.047606, - "p" - ], - [ - 0.09957, - "u" - ], - [ - 0.13683, - "s" - ], - [ - 0.083539, - "h" - ], - [ - 0.111539, - " " - ], - [ - 0.11253, - "-" - ], - [ - 0.159311, - "f" - ], - [ - 0.132344, - " " - ], - [ - 0.132514, - "o" - ], - [ - 0.144066, - "r" - ], - [ - 0.143563, - "i" - ], - [ - 0.155326, - "g" - ], - [ - 0.128262, - "i" - ], - [ - 0.112524, - "n" - ], - [ - 0.035327, - " " - ], - [ - 0.184803, - "k" - ], - [ - 0.120956, - "pod-" - ], - [ - 0.139448, - "f" - ], - [ - 0.127973, - "o" - ], - [ - 0.134993, - "rmat-table\u001b[1m \u001b[0m" - ], - [ - 0.600821, - "\b\u001b[0m \b\u001b[?1l\u001b>\u001b[?2004l\r\r\n" - ], - [ - 0.002119, - "\u001b]2;git push -f origin kpod-format-table\u0007\u001b]1;git\u0007" - ], - [ - 1.180562, - "Counting objects: 8, done.\r\n" - ], - [ - 0.000119, - "Delta compression using up to 4 threads.\r\n" - ], - [ - 4.1e-05, - "Compressing objects: 12% (1/8) \r" - ], - [ - 9.2e-05, - "Compressing objects: 25% (2/8) \r" - ], - [ - 7.4e-05, - "Compressing objects: 37% (3/8) \r" - ], - [ - 0.000131, - "Compressing objects: 50% (4/8) \r" - ], - [ - 1.4e-05, - "Compressing objects: 62% (5/8) \r" - ], - [ - 3.3e-05, - "Compressing objects: 75% (6/8) \r" - ], - [ - 2.2e-05, - "Compressing objects: 87% (7/8) \r" - ], - [ - 3.6e-05, - "Compressing objects: 100% (8/8) \r" - ], - [ - 4e-05, - "Compressing objects: 100% (8/8), done.\r\n" - ], - [ - 0.000193, - "Writing objects: 12% (1/8) \r" - ], - [ - 4.3e-05, - "Writing objects: 25% (2/8) \r" - ], - [ - 4.2e-05, - "Writing objects: 37% (3/8) \r" - ], - [ - 4.4e-05, - "Writing objects: 50% (4/8) \r" - ], - [ - 6.2e-05, - "Writing objects: 62% (5/8) \r" - ], - [ - 4e-05, - "Writing objects: 75% (6/8) \r" - ], - [ - 3.9e-05, - "Writing objects: 87% (7/8) \r" - ], - [ - 7.2e-05, - "Writing objects: 100% (8/8) \r" - ], - [ - 2.3e-05, - "Writing objects: 100% (8/8), 1.02 KiB | 1.02 MiB/s, done.\r\nTotal 8 (delta 6), reused 0 (delta 0)\r\n" - ], - [ - 0.089402, - "remote: Resolving deltas: 0% (0/6) \u001b[K\r" - ], - [ - 0.036283, - "remote: Resolving deltas: 16% (1/6) \u001b[K\rremote: Resolving deltas: 33% (2/6) \u001b[K\rremote: Resolving deltas: 50% (3/6) \u001b[K\rremote: Resolving deltas: 66% (4/6) \u001b[K\rremote: Resolving deltas: 83% (5/6) \u001b[K\rremote: Resolving deltas: 100% (6/6) \u001b[K\rremote: Resolving deltas: 100% (6/6), completed with 6 local objects.\u001b[K\r\n" - ], - [ - 1.955619, - "To github.com:14rcole/cri-o\r\n + 99495909...ba07bfb9 kpod-format-table -> kpod-format-table (forced update" - ], - [ - 7.3e-05, - ")\r\n" - ], - [ - 0.001606, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.029351, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-format-table \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001026, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 8.9e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.1e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000163, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 2e-05, - "\u001b[?2004h" - ], - [ - 330.270968, - "v" - ], - [ - 0.119343, - "\bvi" - ], - [ - 0.103605, - " " - ], - [ - 0.088799, - "t" - ], - [ - 0.080179, - "e" - ], - [ - 0.198952, - "st\u001b[1m/\u001b[0m" - ], - [ - 0.353296, - "\b\u001b[0m \b" - ], - [ - 0.499203, - "\b \b" - ], - [ - 0.02974, - "\b \b" - ], - [ - 0.031437, - "\b \b" - ], - [ - 0.029177, - "\b \b" - ], - [ - 0.030945, - "\b" - ], - [ - 0.029122, - "\b\bv \b" - ], - [ - 0.029942, - "\b \b" - ], - [ - 0.136039, - "g" - ], - [ - 0.071775, - "\bgi" - ], - [ - 0.112776, - "t" - ], - [ - 0.063244, - " " - ], - [ - 0.144384, - "c" - ], - [ - 0.055871, - "h" - ], - [ - 0.144103, - "e" - ], - [ - 0.080216, - "c" - ], - [ - 0.07079, - "k" - ], - [ - 0.112014, - "o" - ], - [ - 0.064547, - "u" - ], - [ - 0.080139, - "t" - ], - [ - 0.095908, - " " - ], - [ - 0.104077, - "k" - ], - [ - 0.138478, - "pod-" - ], - [ - 0.117535, - "s" - ], - [ - 0.128338, - "t" - ], - [ - 0.259432, - "a" - ], - [ - 0.522987, - "\u0007" - ], - [ - 0.000167, - "\r\r\n" - ], - [ - 8.2e-05, - "\u001b[J\u001b[0mkpod-start \u001b[Jkpod-stats\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\u001b[39m\r\u001b[2Cgit checkout kpod-sta\u001b[K\u001b[187C\u001b[90m\u001b[39m\u001b[39m\u001b[187D" - ], - [ - 0.225726, - "t" - ], - [ - 0.239644, - "s\u001b[1m \u001b[0m" - ], - [ - 0.327637, - "\b\u001b[0m \b" - ], - [ - 0.000136, - "\u001b[?1l\u001b>" - ], - [ - 0.000445, - "\u001b[?2004l\r\r\n\u001b[J" - ], - [ - 0.004978, - "\u001b]2;git checkout kpod-stats\u0007\u001b]1;git\u0007" - ], - [ - 0.041451, - "Switched to branch 'kpod-stats'\r\n" - ], - [ - 6.6e-05, - "Your branch is up-to-date with 'origin/kpod-stats'.\r\n" - ], - [ - 0.000587, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.035434, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001297, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 7.4e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.1e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000329, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.2e-05, - "\u001b[?2004h" - ], - [ - 2.822658, - "v" - ], - [ - 0.107877, - "\bvi" - ], - [ - 0.28439, - " " - ], - [ - 0.083967, - "l" - ], - [ - 0.064538, - "i" - ], - [ - 0.100026, - "b" - ], - [ - 0.167219, - "kpod\u001b[1m/\u001b[0m" - ], - [ - 0.572222, - "\b\u001b[0m/c" - ], - [ - 0.09244, - "o" - ], - [ - 0.118682, - "\u0007" - ], - [ - 0.000448, - "\r\r\n" - ], - [ - 0.000186, - "\u001b[J\u001b[38;5;33mcommon\u001b[0m/ \u001b[Jconfig.go \u001b[Jcontainer_data.go \u001b[Jcontainer.go \u001b[Jcontainer_server.go\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\u001b[39m\r\u001b[2Cvi libkpod/co\u001b[K\u001b[195C\u001b[90m\u001b[39m\u001b[39m\u001b[195D" - ], - [ - 0.632962, - "t" - ], - [ - 0.209392, - "\u0007\r\r\n\u001b[J\u001b[A\u001b[16C" - ], - [ - 0.694432, - "\b \b" - ], - [ - 0.159374, - "n" - ], - [ - 0.086549, - "\u0007" - ], - [ - 0.000211, - "\r\r\n\u001b[J" - ], - [ - 9.9e-05, - "\u001b[J\u001b[0mconfig.go \u001b[Jcontainer_data.go \u001b[Jcontainer.go \u001b[Jcontainer_server.go\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\u001b[39m\r\u001b[2Cvi libkpod/con\u001b[K\u001b[194C\u001b[90m\u001b[39m\u001b[39m\u001b[194D" - ], - [ - 0.481713, - "t" - ], - [ - 0.111125, - "a" - ], - [ - 0.180888, - "\r\r\n\u001b[J\u001b[A\u001b[18Ciner" - ], - [ - 0.492502, - "_" - ], - [ - 0.206735, - "s" - ], - [ - 0.024184, - "e" - ], - [ - 0.325519, - "rver.go\u001b[1m \u001b[0m" - ], - [ - 0.3706, - "\b\u001b[0m \b" - ], - [ - 0.0002, - "\u001b[?1l\u001b>\u001b[?2004l\r\r\n\u001b[J" - ], - [ - 0.00447, - "\u001b]2;vim libkpod/container_server.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.135276, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000662, - "\u001b[1;54r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[54;1H\"libkpod/container_server.go\"" - ], - [ - 0.00017, - " 684L, 20532C" - ], - [ - 0.008173, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.025784, - "\u001b[1;1H\u001b[96m\u001b[47m586 \u001b[m\u001b[93m\u001b[107m \u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\r\n\u001b[96m\u001b[47m587 \u001b[m\u001b[93m\u001b[107m sbID := ctr.Sandbox()\r\n\u001b[96m\u001b[47m588 \u001b[m\u001b[93m\u001b[107m sb := c.state.sandboxes[sbID]\r\n\u001b[96m\u001b[47m589 \u001b[m\u001b[93m\u001b[107m sb.RemoveContainer(ctr)\r\n\u001b[96m\u001b[47m590 \u001b[m\u001b[93m\u001b[107m c.state.containers.Delete(ctr.ID())\r\n\u001b[96m\u001b[47m591 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m592 \r\n593 \u001b[m\u001b[93m\u001b[107m\u001b[96m// listContainers returns a list of all containers stored by the server state\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m594 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) listContainers() []*oci.Container {\r\n\u001b[96m\u001b[47m595 \u001b[m\u001b[93m\u001b[107m c.stateLock.Lock()\r\n\u001b[96m\u001b[47m596 \u001b[m\u001b[93m\u001b[107m \u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\r\n\u001b[96m\u001b[47m597 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m c.state.containers.List()\r\n\u001b[96m\u001b[47m598 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m599 \r\n600 \u001b[m\u001b[93m\u001b[107m\u001b[96m// ListContainers returns a list of all containers stored by the server state that match the given filter function\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b" - ], - [ - 2.9e-05, - "[47m601 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) ListContainers(filters ...\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m(*oci.Container) \u001b[33mbool\u001b[m\u001b[93m\u001b[107m) ([]*oci.Container, \u001b[33merror\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m602 \u001b[m\u001b[93m\u001b[107m containers := c.listContainers()\r\n\u001b[96m\u001b[47m603 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(filters) == \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m604 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m containers, \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m605 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m606 \u001b[m\u001b[93m\u001b[107m filteredContainers := \u001b[32mmake\u001b[m\u001b[93m\u001b[107m([]*oci.Container, \u001b[36m0\u001b[m\u001b[93m\u001b[107m, \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(containers))\r\n\u001b[96m\u001b[47m607 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, container := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m containers {\r\n\u001b[96m\u001b[47m608 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, filter := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m filters {\r\n\u001b[96m\u001b[47m609 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter(container) \u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m610 \u001b[m" - ], - [ - 0.027894, - "\u001b[93m\u001b[107m\u001b[16CfilteredContainers = \u001b[32mappend\u001b[m\u001b[93m\u001b[107m(filteredContainers, container)\r\n\u001b[96m\u001b[47m611 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m612 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m613 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m614 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m filteredContainers, \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m615 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m616 \r\n617 \u001b[m\u001b[93m\u001b[107m\u001b[96m// AddSandbox adds a sandbox to the sandbox state store\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m618 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) AddSandbox(sb *sandbox.Sandbox) {\r\n\u001b[96m\u001b[47m619 \u001b[m\u001b[93m\u001b[107m c.stateLock.Lock()\r\n\u001b[96m\u001b[47m620 \u001b[m\u001b[93m\u001b[107m \u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\r\n\u001b[96m\u001b[47m621 \u001b[m\u001b[93m\u001b[107m c.state.sandboxes[sb.ID()] = sb\r\n\u001b[96m\u001b[47m622 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m623 \r\n624 \u001b[m\u001b[93m\u001b[107m\u001b[96m// GetSandbox returns a sandbox by its ID\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m625 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) GetSandbox(id \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) *sandb" - ], - [ - 2.9e-05, - "ox.Sandbox {\r\n\u001b[96m\u001b[47m626 \u001b[m\u001b[93m\u001b[107m c.stateLock.Lock()\r\n\u001b[96m\u001b[47m627 \u001b[m\u001b[93m\u001b[107m \u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\r\n\u001b[96m\u001b[47m628 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m c.state.sandboxes[id]\r\n\u001b[96m\u001b[47m629 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m630 \r\n631 \u001b[m\u001b[93m\u001b[107m\u001b[96m// GetSandboxContainer returns a sandbox's infra container\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m632 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) GetSandboxContainer(id \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) *oci.Container {\r\n\u001b[96m\u001b[47m633 \u001b[m\u001b[93m\u001b[107m c.stateLock.Lock()\r\n\u001b[96m\u001b[47m634 \u001b[m\u001b[93m\u001b[107m \u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\r\n\u001b[96m\u001b[47m635 \u001b[m\u001b[93m\u001b[107m sb, ok := c.state.sandboxes[id]\r\n\u001b[96m\u001b[47m636 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m !ok {\r\n\u001b[96m\u001b[47m637 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H kpod-stats \u001b[m\u001b[93m\u001b[107m" - ], - [ - 0.00922, - "\u001b[38;5;245m\u001b[48;5;240m\u001b[53;25H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mlibkpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mcontainer_server.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;55H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                        \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  89%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m611\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:13 \u001b[26;17H\u001b[?12l\u001b[?25h" - ], - [ - 2e-05, - "\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 3.500046, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[K\u001b[54;1H/\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.32149, - "L\u001b[?25l" - ], - [ - 0.014673, - "\u001b[34;16H\u001b[7m\u001b[91mL\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[53;2H\u001b[1m\u001b[38;5;22m\u001b[48;5;148mCOMMND \u001b[m\u001b[93m\u001b[107m\u001b[186C\u001b[38;5;247m\u001b[48;5;240m  90%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:12\r\n\u001b[m\u001b[93m\u001b[107m/L\u001b[?12l\u001b[?25h" - ], - [ - 0.16782, - "i\u001b[?25l" - ], - [ - 0.004265, - "\u001b[1;1H\u001b[96m\u001b[47m609\u001b[m\u001b[93m\u001b[107m\u001b[5C \u001b[32mif\u001b[m\u001b[93m\u001b[107m filter(container) \u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m610\u001b[m\u001b[93m\u001b[107m\u001b[5C filteredContainers = \u001b[32mappend\u001b[m\u001b[93m\u001b[107m(filteredContainers, container)\r\n\u001b[96m\u001b[47m611\u001b[m\u001b[93m\u001b[107m\u001b[5C \u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\u001b[3;18H\u001b[K\u001b[4;1H\u001b[96m\u001b[47m612\u001b[m\u001b[93m\u001b[107m\u001b[5C }\u001b[4;14H\u001b[K\u001b[5;1H\u001b[96m\u001b[47m613\u001b[m\u001b[93m\u001b[107m\u001b[5C}\u001b[5;10H\u001b[K\u001b[6;1H\u001b[96m\u001b[47m614\u001b[m\u001b[93m\u001b[107m\u001b[1C \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m filteredContainers, \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m615\u001b[m\u001b[93m\u001b[107m\u001b[1C}\r\n\u001b[96m\u001b[47m616\u001b[m\u001b[93m\u001b[107m\u001b[8;5H\u001b[K\u001b[9;1H\u001b[96m\u001b[47m617\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// AddSandbox adds a sandbox to the sandbox state store\u001b[m\u001b[93m\u001b[107m\u001b[9;60H\u001b[K\u001b[10;1H\u001b[96m\u001b[47m618\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) AddSandbox(sb *sandbox.Sandbox) {\r\n\u001b[96m\u001b[47m619\u001b[m\u001b[93m\u001b[107m\u001b[5Cc.stateLock.Lock()\u001b[11;27H\u001b[K\u001b[12;1H\u001b[96m\u001b[47m620\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mdefe\u001b[m\u001b[93m\u001b[107m\u001b[1C c.stateLock.Unlock()\u001b[12;35H\u001b[K\u001b[13;1H\u001b[96m\u001b[47m621\u001b[m\u001b[93m\u001b" - ], - [ - 5.4e-05, - "[107m\u001b[1C c.state.sandboxes[sb.ID()] = sb\r\n\u001b[96m\u001b[47m622\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[15;2H\u001b[96m\u001b[47m23\u001b[m\u001b[93m\u001b[107m\u001b[15;5H\u001b[K\u001b[16;2H\u001b[96m\u001b[47m24\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// GetSandbox returns a sandbox by its ID\u001b[m\u001b[93m\u001b[107m\u001b[16;46H\u001b[K\u001b[17;2H\u001b[96m\u001b[47m25\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) GetSandbox(id \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) *sandbox.Sandbox {\u001b[18;2H\u001b[96m\u001b[47m26\u001b[m\u001b[93m\u001b[107m\u001b[5Cc.stateLock.Lock()\u001b[18;28H\u001b[K\u001b[19;2H\u001b[96m\u001b[47m27\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mdefe\u001b[m\u001b[93m\u001b[107m\u001b[1C c.stateLock.Unlock()\u001b[20;2H\u001b[96m\u001b[47m28\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m c.state.sandboxes[id]\u001b[21;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[21;9H\u001b[K\u001b[22;2H\u001b[96m\u001b[47m30\u001b[m\u001b[93m\u001b[107m\u001b[22;9H\u001b[K\u001b[23;2H\u001b[96m\u001b[47m31\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// GetSandboxContainer returns a sandbox's infra container\u001b[m\u001b[93m\u001b[107m\u001b[24;2H\u001b[96m\u001b[47m32\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) GetSandboxContainer(id \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) *oci.Container {\u001b[25;2H\u001b[96m\u001b[47m33\u001b[m\u001b[93m\u001b[107m\u001b[5Cc.stateLock.Lock()" - ], - [ - 0.003162, - "\u001b[25;27H\u001b[K\u001b[26;2H\u001b[96m\u001b[47m34\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\u001b[27;2H\u001b[96m\u001b[47m35\u001b[m\u001b[93m\u001b[107m\u001b[5Csb, ok := c.state.sandboxes[id]\u001b[28;2H\u001b[96m\u001b[47m36\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mif\u001b[m\u001b[93m\u001b[107m !ok {\u001b[29;2H\u001b[96m\u001b[47m37\u001b[m\u001b[93m\u001b[107m\u001b[5C \u001b[1C\u001b[32meturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\u001b[29;23H\u001b[K\u001b[30;2H\u001b[96m\u001b[47m38\u001b[m\u001b[93m\u001b[107m\u001b[1C }\u001b[31;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m sb.InfraContainer()\u001b[32;2H\u001b[96m\u001b[47m40\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[32;6H\u001b[K\u001b[33;2H\u001b[96m\u001b[47m41\u001b[m\u001b[93m\u001b[107m\u001b[33;5H\u001b[K\u001b[34;2H\u001b[96m\u001b[47m42\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// HasSandbox checks if a sandbox exists in the state\u001b[m\u001b[93m\u001b[107m\u001b[35;2H\u001b[96m\u001b[47m43\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) HasSandbox(id \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) \u001b[33mbool\u001b[m\u001b[93m\u001b[107m {\u001b[36;2H\u001b[96m\u001b[47m44\u001b[m\u001b[93m\u001b[107m\u001b[12CLock.Lock()\u001b[36;27H\u001b[K\u001b[37;2H\u001b[96m\u001b[47m45\u001b[m\u001b[93m\u001b[107m\u001b[1C \u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\u001b[38;2H\u001b[96m\u001b[47m46\u001b[m\u001b[93m\u001b[107m\u001b[5C_, ok := c.state.sandboxes[id]\u001b[39;2H\u001b[96" - ], - [ - 6.1e-05, - "m\u001b[47m47\u001b[m\u001b[93m\u001b[107m\u001b[1C \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m ok\u001b[39;18H\u001b[K\u001b[40;2H\u001b[96m\u001b[47m48\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[40;6H\u001b[K\u001b[41;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[41;9H\u001b[K\u001b[42;2H\u001b[96m\u001b[47m50\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// RemoveSandbox removes a sandbox from the state store\u001b[m\u001b[93m\u001b[107m\u001b[43;2H\u001b[96m\u001b[47m51\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) RemoveSandbox(id \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) {\u001b[44;2H\u001b[96m\u001b[47m52\u001b[m\u001b[93m\u001b[107m\u001b[1C c.stateLock.Lock()\u001b[45;2H\u001b[96m\u001b[47m53\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\u001b[46;2H\u001b[96m\u001b[47m54\u001b[m\u001b[93m\u001b[107m\u001b[1C \u001b[32mdelete\u001b[m\u001b[93m\u001b[107m(c.state.sandboxes, id)\u001b[46;38H\u001b[K\u001b[47;2H\u001b[96m\u001b[47m55\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[47;6H\u001b[K\u001b[48;2H\u001b[96m\u001b[47m56\u001b[m\u001b[93m\u001b[107m\u001b[48;9H\u001b[K\u001b[49;2H\u001b[96m\u001b[47m57\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// \u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[91mLi\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[96mstSandboxes lists all sandboxes in the state store\u001b[m\u001b[93m\u001b[107m\u001b[50;2H\u001b[96m\u001b[47m58\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) ListSandboxes() []*sandbox.Sandbox {" - ], - [ - 0.008005, - "\u001b[51;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cc.stateLock.Lock()\u001b[52;2H\u001b[96m\u001b[47m60\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mdefe\u001b[m\u001b[93m\u001b[107m\u001b[1C c.stateLock.Unlock()\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  96%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m57\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:4 \r\n\u001b[m\u001b[93m\u001b[107m/Li\u001b[?12l\u001b[?25h" - ], - [ - 0.064191, - "s\u001b[?25l" - ], - [ - 0.009148, - "\u001b[49;10H\u001b[7m\u001b[91ms\u001b[54;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.063177, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mt\u001b[?25l" - ], - [ - 0.011092, - "\u001b[49;11H\u001b[7m\u001b[91mt\u001b[54;6H\u001b[?12l\u001b[?25h" - ], - [ - 0.22857, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mC\u001b[?25l" - ], - [ - 0.012394, - "\u001b[1;52r\u001b[1;1H\u001b[23L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m586 \u001b[m\u001b[93m\u001b[107m \u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\r\n\u001b[96m\u001b[47m587 \u001b[m\u001b[93m\u001b[107m sbID := ctr.Sandbox()\r\n\u001b[96m\u001b[47m588 \u001b[m\u001b[93m\u001b[107m sb := c.state.sandboxes[sbID]\r\n\u001b[96m\u001b[47m589 \u001b[m\u001b[93m\u001b[107m sb.RemoveContainer(ctr)\r\n\u001b[96m\u001b[47m590 \u001b[m\u001b[93m\u001b[107m c.state.containers.Delete(ctr.ID())\r\n\u001b[96m\u001b[47m591 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m592 \r\n593 \u001b[m\u001b[93m\u001b[107m\u001b[96m// listContainers returns a list of all containers stored by the server state\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m594 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) listContainers() []*oci.Container {\r\n\u001b[96m\u001b[47m595 \u001b[m\u001b[93m\u001b[107m c.stateLock.Lock()\r\n\u001b[96m\u001b[47m596 \u001b[m\u001b[93m\u001b[107m \u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\r\n\u001b[96m\u001b[47m597 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m c.state.containers.List()\r\n\u001b[96m\u001b[47m598 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m599 \r\n600 \u001b[m\u001b[93m\u001b[107m\u001b[96m// \u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[91mListC\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[96montainers returns a list of all containers stored by the" - ], - [ - 3.7e-05, - " server state that match the given filter function\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m601 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) ListContainers(filters ...\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m(*oci.Container) \u001b[33mbool\u001b[m\u001b[93m\u001b[107m) ([]*oci.Container, \u001b[33merror\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m602 \u001b[m\u001b[93m\u001b[107m containers := c.listContainers()\r\n\u001b[96m\u001b[47m603 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(filters) == \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m604 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m containers, \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m605 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m606 \u001b[m\u001b[93m\u001b[107m filteredContainers := \u001b[32mmake\u001b[m\u001b[93m\u001b[107m([]*oci.Container, \u001b[36m0\u001b[m\u001b[93m\u001b[107m, \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(containers))\r\n\u001b[96m\u001b[47m607 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, container := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m containers {\r\n\u001b[96m\u001b[47m608 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, filter := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m filters {\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  88%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m" - ], - [ - 1.8e-05, - "\u001b[48;5;252m00\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[K\u001b[54;1H/ListC\u001b[?12l\u001b[?25h" - ], - [ - 0.108051, - "o\u001b[?25l" - ], - [ - 0.010669, - "\u001b[15;13H\u001b[7m\u001b[91mo\u001b[54;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.076559, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mn\u001b[?25l" - ], - [ - 0.010612, - "\u001b[15;14H\u001b[7m\u001b[91mn\u001b[54;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.109997, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mt\u001b[?25l" - ], - [ - 0.011985, - "\u001b[15;15H\u001b[7m\u001b[91mt\u001b[54;10H\u001b[?12l\u001b[?25h" - ], - [ - 0.059787, - "\u001b[27m\u001b[m\u001b[93m\u001b[107ma\u001b[?25l" - ], - [ - 0.01275, - "\u001b[15;16H\u001b[7m\u001b[91ma\u001b[54;11H\u001b[?12l\u001b[?25h" - ], - [ - 0.075246, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mi\u001b[?25l" - ], - [ - 0.008912, - "\u001b[15;17H\u001b[7m\u001b[91mi\u001b[54;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.063432, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mn\u001b[?25l" - ], - [ - 0.01195, - "\u001b[15;18H\u001b[7m\u001b[91mn\u001b[54;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.075708, - "\u001b[27m\u001b[m\u001b[93m\u001b[107me\u001b[?25l" - ], - [ - 0.012727, - "\u001b[15;19H\u001b[7m\u001b[91me\u001b[54;14H\u001b[?12l\u001b[?25h" - ], - [ - 0.035347, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mr\u001b[?25l" - ], - [ - 0.012424, - "\u001b[15;20H\u001b[7m\u001b[91mr\u001b[54;15H\u001b[?12l\u001b[?25h" - ], - [ - 0.124381, - "\u001b[27m\u001b[m\u001b[93m\u001b[107ms\u001b[?25l" - ], - [ - 0.010815, - "\u001b[15;21H\u001b[7m\u001b[91ms\u001b[54;16H\u001b[?12l\u001b[?25h" - ], - [ - 0.180264, - "\r" - ], - [ - 5.1e-05, - "\u001b[?25l" - ], - [ - 0.00018, - "\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31msearch hit BOTTOM, continuing at TOP" - ], - [ - 0.005654, - "\u001b[m\u001b[93m\u001b[107m\u001b[15;8H\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m\u001b[16;31H\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m\u001b[24;38H{\u001b[26;17H}" - ], - [ - 0.00472, - "\u001b[53;2H\u001b[1m\u001b[38;5;22m\u001b[48;5;148mNORMAL \u001b[15;8H\u001b[?12l\u001b[?25h" - ], - [ - 1.144572, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;209H\u001b[38;5;22m\u001b[48;5;252m5\u001b[15;9H" - ], - [ - 0.50187, - "\u001b[53;209H6\u001b[15;10H" - ], - [ - 0.032501, - "\u001b[53;209H7\u001b[15;11H" - ], - [ - 0.027726, - "\u001b[53;209H8\u001b[15;12H" - ], - [ - 0.031165, - "\u001b[53;209H9\u001b[15;13H" - ], - [ - 0.027591, - "\u001b[53;209H10\u001b[15;14H" - ], - [ - 0.033383, - "\u001b[53;210H1\u001b[15;15H" - ], - [ - 0.031794, - "\u001b[53;210H2\u001b[15;16H" - ], - [ - 0.029702, - "\u001b[53;210H3\u001b[15;17H" - ], - [ - 0.031021, - "\u001b[53;210H4\u001b[15;18H" - ], - [ - 0.028265, - "\u001b[53;210H5\u001b[15;19H" - ], - [ - 0.030334, - "\u001b[53;210H6\u001b[15;20H" - ], - [ - 0.033667, - "\u001b[53;210H7\u001b[15;21H" - ], - [ - 0.029757, - "\u001b[53;210H8\u001b[15;22H" - ], - [ - 0.028472, - "\u001b[53;210H9\u001b[15;23H" - ], - [ - 0.029385, - "\u001b[53;209H20\u001b[15;24H" - ], - [ - 0.030816, - "\u001b[53;210H1\u001b[15;25H" - ], - [ - 0.030044, - "\u001b[53;210H2\u001b[15;26H" - ], - [ - 0.028015, - "\u001b[53;210H3\u001b[15;27H" - ], - [ - 0.031825, - "\u001b[53;210H4\u001b[15;28H" - ], - [ - 0.029947, - "\u001b[53;210H5\u001b[15;29H" - ], - [ - 0.027564, - "\u001b[53;210H6\u001b[15;30H" - ], - [ - 0.03206, - "\u001b[53;210H7\u001b[15;31H" - ], - [ - 0.032422, - "\u001b[53;210H8\u001b[15;32H" - ], - [ - 0.030352, - "\u001b[53;210H9\u001b[15;33H" - ], - [ - 0.030131, - "\u001b[53;209H30\u001b[15;34H" - ], - [ - 0.031966, - "\u001b[53;210H1\u001b[15;35H" - ], - [ - 0.028785, - "\u001b[53;210H2\u001b[15;36H" - ], - [ - 0.030353, - "\u001b[53;210H3\u001b[15;37H" - ], - [ - 0.0284, - "\u001b[53;210H4\u001b[15;38H" - ], - [ - 0.032354, - "\u001b[53;210H5\u001b[15;39H" - ], - [ - 0.031925, - "\u001b[53;210H6\u001b[15;40H" - ], - [ - 0.027402, - "\u001b[53;210H7\u001b[15;41H" - ], - [ - 0.028353, - "\u001b[53;210H8\u001b[15;42H" - ], - [ - 0.033363, - "\u001b[53;210H9\u001b[15;43H" - ], - [ - 0.030492, - "\u001b[53;209H40\u001b[15;44H" - ], - [ - 0.029102, - "\u001b[53;210H1\u001b[15;45H" - ], - [ - 0.028712, - "\u001b[53;210H2\u001b[15;46H" - ], - [ - 0.031899, - "\u001b[53;210H3\u001b[15;47H" - ], - [ - 0.028425, - "\u001b[53;210H4\u001b[15;48H" - ], - [ - 0.031288, - "\u001b[53;210H5\u001b[15;49H" - ], - [ - 0.030582, - "\u001b[53;210H6\u001b[15;50H" - ], - [ - 0.030863, - "\u001b[53;210H7\u001b[15;51H" - ], - [ - 0.029856, - "\u001b[53;210H8\u001b[15;52H" - ], - [ - 0.03183, - "\u001b[53;210H9\u001b[15;53H" - ], - [ - 0.027287, - "\u001b[53;209H50\u001b[15;54H" - ], - [ - 0.030571, - "\u001b[53;210H1\u001b[15;55H" - ], - [ - 0.027721, - "\u001b[53;210H2\u001b[15;56H" - ], - [ - 0.034507, - "\u001b[53;210H3\u001b[15;57H" - ], - [ - 0.026321, - "\u001b[53;210H4\u001b[15;58H" - ], - [ - 0.033001, - "\u001b[53;210H5\u001b[15;59H" - ], - [ - 0.03007, - "\u001b[53;210H6\u001b[15;60H" - ], - [ - 0.031121, - "\u001b[53;210H7\u001b[15;61H" - ], - [ - 0.028288, - "\u001b[53;210H8\u001b[15;62H" - ], - [ - 0.032991, - "\u001b[53;210H9\u001b[15;63H" - ], - [ - 0.030687, - "\u001b[53;209H60\u001b[15;64H" - ], - [ - 0.031504, - "\u001b[53;210H1\u001b[15;65H" - ], - [ - 0.03011, - "\u001b[53;210H2\u001b[15;66H" - ], - [ - 0.029317, - "\u001b[53;210H3\u001b[15;67H" - ], - [ - 0.034275, - "\u001b[53;210H4\u001b[15;68H" - ], - [ - 0.59524, - "\u001b[53;210H5\u001b[15;69H" - ], - [ - 0.497159, - "\u001b[53;210H6\u001b[15;70H" - ], - [ - 0.033269, - "\u001b[53;210H7\u001b[15;71H" - ], - [ - 0.030617, - "\u001b[53;210H8\u001b[15;72H" - ], - [ - 0.028412, - "\u001b[53;210H9\u001b[15;73H" - ], - [ - 0.030755, - "\u001b[53;209H70\u001b[15;74H" - ], - [ - 0.031182, - "\u001b[53;210H1\u001b[15;75H" - ], - [ - 0.030179, - "\u001b[53;210H2\u001b[15;76H" - ], - [ - 0.469875, - "\u001b[53;210H3\u001b[15;77H" - ], - [ - 0.197619, - "\u001b[53;210H4\u001b[15;78H" - ], - [ - 0.324244, - "\u001b[53;210H3\u001b[15;77H" - ], - [ - 0.746299, - "\u001b[53;210H4\u001b[15;78H" - ], - [ - 0.177222, - "\u001b[53;210H5\u001b[15;79H" - ], - [ - 0.16103, - "\u001b[53;210H6\u001b[15;80H" - ], - [ - 0.178906, - "\u001b[53;210H7\u001b[15;81H" - ], - [ - 0.176875, - "\u001b[53;210H8\u001b[15;82H" - ], - [ - 0.168167, - "\u001b[53;210H9\u001b[15;83H" - ], - [ - 0.945125, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[34m--\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31ma\u001b[m\u001b[93m\u001b[107m\b\u001b[34m INSERT\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31mt\u001b[m\u001b[93m\u001b[107m\b\u001b[34m --\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31mO\u001b[m\u001b[93m\u001b[107m\u001b[54;13H\u001b[K" - ], - [ - 0.044251, - "\u001b[53;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[53;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[53;12H kpod-stats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[53;25H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mlibkpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mcontainer_server.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[m\u001b[93m\u001b[107m\u001b[53;54H\u001b[38;5;31m\u001b[48;5;24m\u001b[53;55H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                        \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;31m  88%\u001b[m" - ], - [ - 5.4e-05, - "\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m600\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:79 \u001b[15;83H\u001b[?12l\u001b[?25h" - ], - [ - 0.275463, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[96mthat match the given filter function\u001b[m\u001b[93m\u001b[107m\u001b[15;118H\u001b[K\u001b[53;53H\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[53;57H \u001b[m\u001b[93m\u001b[107m\u001b[152C\u001b[38;5;22m\u001b[48;5;117m8\u001b[15;82H\u001b[?12l\u001b[?25h" - ], - [ - 0.268424, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[15;82H\u001b[K\u001b[16;5Hthat match the given filter function\u001b[16;41H\u001b[K\u001b[17;5H\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) \u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m(filters ...\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m(*oci.Container) \u001b[33mbool\u001b[m\u001b[93m\u001b[107m) ([]*oci.Container, \u001b[33merror\u001b[m\u001b[93m\u001b[107m) {\u001b[18;9Hcontainers := c.listContainers()\u001b[19;9H\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(filters) == \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\u001b[19;32H\u001b[K\u001b[20;9H \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m containers, \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\u001b[21;9H}\u001b[21;10H\u001b[K\u001b[22;9HfilteredContainers := \u001b[32mmake\u001b[m\u001b[93m\u001b[107m([]*oci.Container, \u001b[36m0\u001b[m\u001b[93m\u001b[107m, \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(containers))\u001b[23;9H\u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, container := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m containers {\u001b[24;13H\u001b[32mfor\u001b[m\u001b[93m\u001b[107m _,\u001b[7C := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m filters {\u001b[25;17H\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter(container) {\u001b[25;40H\u001b[K\u001b[26;17H filteredContainers = \u001b[32mappend\u001b[m\u001b[93m\u001b[107m(filteredContainers, container)\u001b[27;13H }\u001b[28;9H }\u001b[29;9H}\u001b[29;10H\u001b[K\u001b[30;5H \u001b[32mreturn" - ], - [ - 5.2e-05, - "\u001b[m\u001b[93m\u001b[107m filteredContainers, \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\u001b[31;5H}\u001b[32;5H\u001b[K\u001b[33;5H\u001b[96m// AddSandbox adds a sandbox to the sandbox state store\u001b[m\u001b[93m\u001b[107m\u001b[33;60H\u001b[K\u001b[34;5H\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) AddSandbox(sb *sandbox.Sandbox) {\u001b[35;9Hc.stateLock.Lock()\u001b[35;27H\u001b[K\u001b[36;9H\u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\u001b[36;36H\u001b[K\u001b[37;5H c.state.sandboxes[sb.ID()] = sb\u001b[38;5H}\u001b[39;5H\u001b[K\u001b[40;5H\u001b[96m// GetSandbox returns a sandbox by its ID\u001b[m\u001b[93m\u001b[107m\u001b[40;46H\u001b[K\u001b[41;5H\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) GetSandbox(id \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) *sandbox.Sandbox {\u001b[42;9Hc.stateLock.Lock()\u001b[42;27H\u001b[K\u001b[43;9H\u001b[32mdefe\u001b[m\u001b[93m\u001b[107m\u001b[1C c.stateLock.Unlock()\u001b[43;35H\u001b[K\u001b[44;5H \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m c.state.sandboxes[id]\u001b[45;5H}\u001b[46;5H\u001b[K\u001b[47;5H\u001b[96m// GetSandboxContainer returns a sandbox's infra container\u001b[m\u001b[93m\u001b[107m\u001b[47;63H\u001b[K\u001b[48;5H\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (c *ContainerServer) GetSandboxContainer(id \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) *oci.Container {\u001b[49;9Hc.stateLock.Lock()" - ], - [ - 0.005517, - "\u001b[49;27H\u001b[K\u001b[50;9H\u001b[32mdefer\u001b[m\u001b[93m\u001b[107m c.stateLock.Unlock()\u001b[50;35H\u001b[K\u001b[51;9Hsb, ok := c.state.sandboxes[id]\u001b[52;9H\u001b[32mif\u001b[m\u001b[93m\u001b[107m !ok {\u001b[52;17H\u001b[K\u001b[53;207H\u001b[1m\u001b[38;5;24m\u001b[48;5;117m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:1 \u001b[16;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.829866, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m/that match the given filter function\u001b[53;209H\u001b[38;5;22m\u001b[48;5;117m2\u001b[16;6H\u001b[?12l\u001b[?25h" - ], - [ - 0.150054, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[96m//that match the given filter function\u001b[m\u001b[93m\u001b[107m\u001b[53;209H\u001b[38;5;22m\u001b[48;5;117m3\u001b[16;7H\u001b[?12l\u001b[?25h" - ], - [ - 0.080887, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[96m that match the given filter function\u001b[m\u001b[93m\u001b[107m\u001b[53;209H\u001b[38;5;22m\u001b[48;5;117m4\u001b[16;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.385382, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[K" - ], - [ - 0.014625, - "\u001b[53;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[53;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H kpod-stats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;25H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mlibkpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mcontainer_server.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[53;56H\u001b[38;5;240m\u001b[48;5;236m\u001b[53;57H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                      \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m g" - ], - [ - 2.4e-05, - "o\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  88%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m601\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:3  \u001b[16;7H\u001b[?12l\u001b[?25h" - ], - [ - 0.26471, - "\u001b[?25l\u001b[54;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h" - ], - [ - 7e-05, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.184441, - "w" - ], - [ - 9.6e-05, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.039251, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.104195, - "\r" - ], - [ - 5.6e-05, - "\u001b[?25l" - ], - [ - 0.00018, - "\u001b[?2004l" - ], - [ - 0.022882, - "\"libkpod/container_server.go\"" - ], - [ - 0.006415, - " 685L, 20535C written" - ], - [ - 0.012113, - "\r\r\r\n\u001b[39;49m\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.002425, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.019671, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats* \u001b[39m \u001b[33m17s\u001b[39m\r\n" - ], - [ - 0.002255, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000189, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 8.5e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000208, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000116, - "\u001b[?1h\u001b=" - ], - [ - 0.000101, - "\u001b[?2004h" - ], - [ - 11.704887, - "\u001b[?2004l\r\r\n" - ], - [ - 0.000651, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.025893, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats* \u001b[39m \u001b[33m29s\u001b[39m\r\n" - ], - [ - 0.001689, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 7.2e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 7.5e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000263, - "\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.604019, - ":" - ], - [ - 0.41525, - "\b \b" - ], - [ - 24.413263, - "v" - ], - [ - 0.123238, - "\bvi" - ], - [ - 0.096701, - " " - ], - [ - 0.103904, - "s" - ], - [ - 0.040009, - "e" - ], - [ - 0.056539, - "r" - ], - [ - 0.196367, - "ver\u001b[1m/\u001b[0m" - ], - [ - 0.158628, - "\b\u001b[0m/c" - ], - [ - 0.107515, - "o" - ], - [ - 0.072945, - "n" - ], - [ - 0.122494, - "\u0007" - ], - [ - 0.000289, - "\r\r\n" - ], - [ - 0.000116, - "\u001b[0mconfig.go container_create.go container_execsync.go container_portforward.go container_start.go container_stop.go \r\n\u001b[Jcontainer_attach.go \u001b[Jcontainer_exec.go \u001b[Jcontainer_list.go \u001b[Jcontainer_remove.go \u001b[Jcontainer_status.go \u001b[Jcontainer_updateruntimeconfig.go\u001b[J\u001b[A\u001b[A\u001b[0m\u001b[27m\u001b[24m\u001b[39m\r\u001b[2Cvi server/con\u001b[K\u001b[195C\u001b[90m\u001b[39m\u001b[39m\u001b[195D" - ], - [ - 0.477329, - "t" - ], - [ - 0.11174, - "a" - ], - [ - 0.211254, - "\r\r\n\u001b[J\u001b[A\u001b[17Ciner_" - ], - [ - 0.68805, - "l" - ], - [ - 0.124985, - "i" - ], - [ - 0.138637, - "st.go\u001b[1m \u001b[0m" - ], - [ - 0.548537, - "\b\u001b[0m \b" - ], - [ - 0.000176, - "\u001b[?1l\u001b>\u001b[?2004l\r\r\n\u001b[J" - ], - [ - 0.003295, - "\u001b]2;vim server/container_list.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.135184, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000569, - "\u001b[1;54r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[54;1H\"server/container_list.go\"" - ], - [ - 0.000171, - " 108L, 2729C" - ], - [ - 0.008118, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.001699, - "\u001b[1;1H\u001b[96m\u001b[47m 1 \u001b[m\u001b[93m\u001b[107m\u001b[32mpackage\u001b[m\u001b[93m\u001b[107m server\r\n\u001b[96m\u001b[47m 2 \r\n 3 \u001b[m\u001b[93m\u001b[107m\u001b[32mimport\u001b[m\u001b[93m\u001b[107m (\r\n\u001b[96m\u001b[47m 4 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"github.com/kubernetes-incubator/cri-o/oci\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 5 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"github.com/sirupsen/logrus\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 6 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"golang.org/x/net/context\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 7 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"k8s.io/apimachinery/pkg/fields\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 8 \u001b[m\u001b[93m\u001b[107m pb \u001b[36m\"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 9 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 10 \r\n 11 \u001b[m\u001b[93m\u001b[107m\u001b[96m// filterContainer returns whether passed container matches filtering criteria\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 12 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m filterContainer(c *pb.Container, filter *pb.ContainerFilter) \u001b[33mbool\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 13 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m filter != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 14 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[" - ], - [ - 1.6e-05, - "107m filter.State != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 15 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m c.State != filter.State.State {\r\n\u001b[96m\u001b[47m 16 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 17 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 18 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.LabelSelector != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m\u001b[12Csel := fields.SelectorFromSet(filter.LabelSelector)\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m !sel.Matches(fields.Set(c.Labels)) {\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mtrue\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 28 \r\n 29 \u001b[m\u001b[93m\u001b[107m\u001b[96m// \u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m\u001b[96m lists all containers by filters." - ], - [ - 0.033458, - "\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 30 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (s *Server) \u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m(ctx context.Context, req *pb.\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107mRequest) (*pb.\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107mResponse, \u001b[33merror\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m 31 \u001b[m\u001b[93m\u001b[107m logrus.Debugf(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m\u001b[36mRequest \u001b[m\u001b[93m\u001b[107m\u001b[31m%+v\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, req)\r\n\u001b[96m\u001b[47m 32 \u001b[m\u001b[93m\u001b[107m \u001b[32mvar\u001b[m\u001b[93m\u001b[107m ctrs []*pb.Container\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m filter := req.Filter\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m ctrList := s.ContainerServer.\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m()\r\n\u001b[96m\u001b[47m 35 \r\n 36 \u001b[m\u001b[93m\u001b[107m \u001b[96m// Filter using container id and pod id first.\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 37 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m filter != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.Id != \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m\u001b[12Cid, err := s.CtrI" - ], - [ - 3.2e-05, - "DIndex().Get(filter.Id)\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 41 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m, err\r\n\u001b[96m\u001b[47m 42 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m\u001b[12Cc := s.ContainerServer.GetContainer(id)\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m c != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.PodSandboxId != \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mif\u001b[m\u001b[93m\u001b[107m c.Sandbox() == filter.PodSandboxId {\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m\u001b[24CctrList = []*oci.Container{c}\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[20C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m\u001b[24CctrList = []*oci.Container{}\r\n\u001b[96m\u001b[47m 50 \u001b[m\u001b[93m\u001b[107m\u001b[20C}\r\n\u001b[96m\u001b[47m 51 \r\n 52 \u001b[m\u001b[93m\u001b[107m\u001b[16C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m" - ], - [ - 0.009216, - "\u001b[53;12H kpod-stats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;25H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mserver/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mcontainer_list.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;52H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                           \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m   1%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m  1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1  \u001b[1;5H\u001b[?12l\u001b" - ], - [ - 2e-05, - "[?25h\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 0.527381, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m   2%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[2;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.495163, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m   3%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[3;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.025763, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m   4%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:4\u001b[4;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.025962, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m   5%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[5;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.035885, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m   6%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[6;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.03159, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[7;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.027692, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m   7%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[8;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.030541, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[3;12H\u001b[1m\u001b[31m\u001b[106m(\u001b[9;5H)\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m   8%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[9;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.031712, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[3;12H(\u001b[9;5H)\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m   9%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m10\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[10;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.031153, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  10%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[11;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.026845, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  11%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[12;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.030542, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  12%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:4\u001b[13;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.032914, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  13%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[14;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.032993, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  14%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[15;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.026882, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  15%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[16;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.032257, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  16%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[17;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.031522, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  17%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[18;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.288102, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  18%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[19;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.496038, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  19%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m20\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[20;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.024987, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[21;8H" - ], - [ - 0.034118, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  20%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[22;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.029418, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  21%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[23;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.031726, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  22%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[24;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.026646, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  23%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[25;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.038387, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  24%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[26;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.02789, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;76H\u001b[1m\u001b[31m\u001b[106m{\u001b[27;5H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;70m\u001b[48;5;240m  25%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[27;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.027891, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;76H{\u001b[27;5H}\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  26%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[28;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.025499, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  27%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[29;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.032868, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  28%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m30\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[30;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.034132, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  29%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:4\u001b[31;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.734937, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  28%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[30;5H" - ], - [ - 0.244963, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  29%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:4\u001b[31;8H" - ], - [ - 1.147152, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  30%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[32;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.507676, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  31%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[33;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.021376, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[34;8H" - ], - [ - 0.52311, - "\u001b[53;209H5\u001b[34;9H" - ], - [ - 0.501301, - "\u001b[53;209H6\u001b[34;10H" - ], - [ - 0.026664, - "\u001b[53;209H7\u001b[34;11H" - ], - [ - 0.029294, - "\u001b[53;209H8\u001b[34;12H" - ], - [ - 0.029763, - "\u001b[53;209H9\u001b[34;13H" - ], - [ - 0.029935, - "\u001b[53;209H10\u001b[34;14H" - ], - [ - 0.030764, - "\u001b[53;210H1\u001b[34;15H" - ], - [ - 0.028666, - "\u001b[53;210H2\u001b[34;16H" - ], - [ - 0.031328, - "\u001b[53;210H3\u001b[34;17H" - ], - [ - 0.031664, - "\u001b[53;210H4\u001b[34;18H" - ], - [ - 0.027038, - "\u001b[53;210H5\u001b[34;19H" - ], - [ - 0.033849, - "\u001b[53;210H6\u001b[34;20H" - ], - [ - 0.028053, - "\u001b[53;210H7\u001b[34;21H" - ], - [ - 0.031407, - "\u001b[53;210H8\u001b[34;22H" - ], - [ - 0.029045, - "\u001b[53;210H9\u001b[34;23H" - ], - [ - 0.031094, - "\u001b[53;209H20\u001b[34;24H" - ], - [ - 0.030714, - "\u001b[53;210H1\u001b[34;25H" - ], - [ - 0.030843, - "\u001b[53;210H2\u001b[34;26H" - ], - [ - 0.029335, - "\u001b[53;210H3\u001b[34;27H" - ], - [ - 0.03625, - "\u001b[53;210H4\u001b[34;28H" - ], - [ - 0.02287, - "\u001b[53;210H5\u001b[34;29H" - ], - [ - 0.031991, - "\u001b[53;210H6\u001b[34;30H" - ], - [ - 0.026648, - "\u001b[53;210H7\u001b[34;31H" - ], - [ - 0.032914, - "\u001b[53;210H8\u001b[34;32H" - ], - [ - 0.030082, - "\u001b[53;210H9\u001b[34;33H" - ], - [ - 0.03302, - "\u001b[53;209H30\u001b[34;34H" - ], - [ - 0.029673, - "\u001b[53;210H1\u001b[34;35H" - ], - [ - 0.029969, - "\u001b[53;210H2\u001b[34;36H" - ], - [ - 0.030958, - "\u001b[53;210H3\u001b[34;37H" - ], - [ - 0.032073, - "\u001b[53;210H4\u001b[34;38H" - ], - [ - 0.029162, - "\u001b[53;210H5\u001b[34;39H" - ], - [ - 0.030591, - "\u001b[53;210H6\u001b[34;40H" - ], - [ - 0.02993, - "\u001b[53;210H7\u001b[34;41H" - ], - [ - 0.032535, - "\u001b[53;210H8\u001b[34;42H" - ], - [ - 0.029006, - "\u001b[53;210H9\u001b[34;43H" - ], - [ - 0.031122, - "\u001b[53;209H40\u001b[34;44H" - ], - [ - 0.027152, - "\u001b[53;210H1\u001b[34;45H" - ], - [ - 0.030614, - "\u001b[53;210H2\u001b[34;46H" - ], - [ - 0.030056, - "\u001b[53;210H3\u001b[34;47H" - ], - [ - 0.031423, - "\u001b[53;210H4\u001b[34;48H" - ], - [ - 0.03265, - "\u001b[53;210H5\u001b[34;49H" - ], - [ - 0.026915, - "\u001b[53;210H6\u001b[34;50H" - ], - [ - 0.032032, - "\u001b[53;210H7\u001b[34;51H" - ], - [ - 0.22945, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[1m\u001b[31m\u001b[106m()\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m8\u001b[34;52H\u001b[?12l\u001b[?25h" - ], - [ - 0.516968, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[34m-- INSERT --\u001b[m\u001b[93m\u001b[107m\u001b[54;13H\u001b[K" - ], - [ - 0.039223, - "\u001b[53;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[53;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[53;12H kpod-stats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[53;25H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mserver/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mcontainer_list.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[m\u001b[93m\u001b[107m\u001b[53;51H\u001b[38;5;31m\u001b[48;5;24m\u001b[53;52H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                           \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;149m\u001b[48;5;31m  31%" - ], - [ - 2.7e-05, - "\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 34\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:48 \u001b[34;52H\u001b[?12l\u001b[?25h" - ], - [ - 1.114697, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31m\u001b[106mj(\u001b[m\u001b[93m\u001b[107m)\u001b[53;50H\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[53;54H \u001b[m\u001b[93m\u001b[107m\u001b[155C\u001b[38;5;22m\u001b[48;5;117m9\u001b[m\u001b[93m\u001b[107m\u001b[34;52Hj\u001b[1m\u001b[31m\u001b[106m()\b\b\u001b[?12l\u001b[?25h" - ], - [ - 0.616695, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b(\u001b[1m\u001b[31m\u001b[106m)\u001b[m\u001b[93m\u001b[107m\u001b[34;54H\u001b[K\u001b[34;52H\u001b[1m\u001b[31m\u001b[106m(\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[34;52H\u001b[?12l\u001b[?25h" - ], - [ - 0.440354, - "\u001b[?25l\u001b[53;210H9\u001b[34;53H\u001b[?12l\u001b[?25h" - ], - [ - 8.949745, - "\u001b[?25l\u001b[53;210H8\u001b[34;52H\u001b[?12l\u001b[?25h" - ], - [ - 0.496969, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m()\u001b[53;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[34;51H\u001b[?12l\u001b[?25h" - ], - [ - 0.032523, - "\u001b[53;210H6\u001b[34;50H" - ], - [ - 0.025768, - "\u001b[53;210H5\u001b[34;49H" - ], - [ - 0.032765, - "\u001b[53;210H4\u001b[34;48H" - ], - [ - 0.031191, - "\u001b[53;210H3\u001b[34;47H" - ], - [ - 0.030756, - "\u001b[53;210H2\u001b[34;46H" - ], - [ - 0.029399, - "\u001b[53;210H1\u001b[34;45H" - ], - [ - 0.338294, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;149m\u001b[48;5;31m  32%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;24m\u001b[48;5;117m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:1 \u001b[35;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.473917, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;149m\u001b[48;5;31m  31%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;24m\u001b[48;5;117m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:41\u001b[34;45H" - ], - [ - 113.574346, - "\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[1;1H\u001b[96m\u001b[47m 2 \r\n 3 \u001b[m\u001b[93m\u001b[107m\u001b[32mimport\u001b[m\u001b[93m\u001b[107m (\r\n\u001b[96m\u001b[47m 4 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"github.com/kubernetes-incubator/cri-o/oci\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 5 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"github.com/sirupsen/logrus\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 6 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"golang.org/x/net/context\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 7 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"k8s.io/apimachinery/pkg/fields\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 8 \u001b[m\u001b[93m\u001b[107m pb \u001b[36m\"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 9 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 10 \r\n 11 \u001b[m\u001b[93m\u001b[107m\u001b[96m// filterContainer returns whether passed container matches filtering criteria\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 12 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m filterContainer(c *pb.Container, filter *pb.ContainerFilter) \u001b[33mbool\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 13 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m filter != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 14 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.State != \u001b[36m" - ], - [ - 2.7e-05, - "nil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 15 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m c.State != filter.State.State {\r\n\u001b[96m\u001b[47m 16 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 17 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 18 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.LabelSelector != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m\u001b[12Csel := fields.SelectorFromSet(filter.LabelSelector)\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m !sel.Matches(fields.Set(c.Labels)) {\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mtrue\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 28 \r\n 29 \u001b[m\u001b[93m\u001b[107m\u001b[96m// \u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m\u001b[96m lists all containers by filters.\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m" - ], - [ - 0.005724, - "\u001b[47m 30 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (s *Server) \u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m(ctx context.Context, req *pb.\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107mRequest) (*pb.\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107mResponse, \u001b[33merror\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m 31 \u001b[m\u001b[93m\u001b[107m logrus.Debugf(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m\u001b[36mRequest \u001b[m\u001b[93m\u001b[107m\u001b[31m%+v\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, req)\r\n\u001b[96m\u001b[47m 32 \u001b[m\u001b[93m\u001b[107m \u001b[32mvar\u001b[m\u001b[93m\u001b[107m ctrs []*pb.Container\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m filter := req.Filter\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m ctrList := s.ContainerServer.\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m()\r\n\u001b[96m\u001b[47m 35 \r\n 36 \u001b[m\u001b[93m\u001b[107m \u001b[96m// Filter using container id and pod id first.\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 37 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m filter != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.Id != \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m\u001b[12Cid, err := s.CtrIDIndex().Get(filter.I" - ], - [ - 3.6e-05, - "d)\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 41 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m, err\r\n\u001b[96m\u001b[47m 42 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m\u001b[12Cc := s.ContainerServer.GetContainer(id)\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m c != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.PodSandboxId != \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mif\u001b[m\u001b[93m\u001b[107m c.Sandbox() == filter.PodSandboxId {\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m\u001b[24CctrList = []*oci.Container{c}\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[20C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m\u001b[24CctrList = []*oci.Container{}\r\n\u001b[96m\u001b[47m 50 \u001b[m\u001b[93m\u001b[107m\u001b[20C}\r\n\u001b[96m\u001b[47m 51 \u001b[m\u001b[93m\u001b[107m\r\n\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[51;12H kpod-stats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m" - ], - [ - 1.9e-05, - "\u001b[51;25H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mserver/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mcontainer_list.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[51;54H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                         \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;149m\u001b[48;5;31m  31%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 34\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:41 \u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[34m-- INSERT --\u001b[33;45H\u001b[?12l\u001b[?2" - ], - [ - 1.4e-05, - "5h" - ], - [ - 0.903658, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[33;38HListCon:tainers()\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[33;46H\u001b[?12l\u001b[?25h" - ], - [ - 0.265128, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mqtainers()\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[33;47H\u001b[?12l\u001b[?25h" - ], - [ - 0.194087, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[33;47H\u001b[K\u001b[34;9Htainers()\u001b[35;9H\u001b[K\u001b[36;9H\u001b[96m// Filter using container id and pod id first.\u001b[m\u001b[93m\u001b[107m\u001b[37;9H\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[37;27H\u001b[K\u001b[38;13H\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.Id != \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m {\u001b[38;33H\u001b[K\u001b[39;17Hid, err := s.CtrIDIndex().Get(filter.Id)\u001b[40;17H\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[40;33H\u001b[K\u001b[41;17H \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m, err\u001b[42;17H}\u001b[42;19H\u001b[K\u001b[43;17Hc := s.ContainerServer.GetContainer(id)\u001b[44;17H\u001b[32mif\u001b[m\u001b[93m\u001b[107m c != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[44;30H\u001b[K\u001b[45;21H\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.PodSandboxId != \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m {\u001b[45;51H\u001b[K\u001b[46;25H\u001b[32mif\u001b[m\u001b[93m\u001b[107m c.Sandbox() == filter.PodSandboxId {\u001b[47;25H ctrList = []*oci.Container{c}\u001b[48;25H} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\u001b[48;33H\u001b[K\u001b[49;25H ctrList = []*oci.Container{}\u001b[50;25H}\u001b[51;195H\u001b[38;5;149m\u001b[48;5;31m  32%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;24m\u001b[48;5;117m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:5 \u001b[34;9H\u001b[?12l" - ], - [ - 2.1e-05, - "\u001b[?25h" - ], - [ - 0.588871, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K" - ], - [ - 0.015698, - "\u001b[51;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H kpod-stats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;25H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mserver/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mcontainer_list.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[51;53H\u001b[38;5;240m\u001b[48;5;236m\u001b[51;54H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                         \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;247m\u001b[48;5;236m" - ], - [ - 4.7e-05, - " go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  32%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 35\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:4  \u001b[34;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.392274, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H1 line less; before #2 2 seconds ago" - ], - [ - 0.004898, - "\u001b[33;38H\u001b[7m\u001b[33mListContainers\u001b[m\u001b[93m\u001b[107m()\u001b[34;9H\u001b[K\u001b[35;9H\u001b[96m// Filter using container id and pod id first.\u001b[m\u001b[93m\u001b[107m\u001b[36;9H\u001b[32mif\u001b[m\u001b[93m\u001b[107m filter != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[36;27H\u001b[K\u001b[37;9H \u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.Id != \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m {\u001b[38;13H id, err := s.CtrIDIndex().Get(filter.Id)\u001b[39;17H\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[39;32H\u001b[K\u001b[40;17H \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m, err\u001b[41;17H}\u001b[41;21H\u001b[K\u001b[42;17Hc := s.ContainerServer.GetContainer(id)\u001b[43;17H\u001b[32mif\u001b[m\u001b[93m\u001b[107m c != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[43;30H\u001b[K\u001b[44;17H \u001b[32mif\u001b[m\u001b[93m\u001b[107m filter.PodSandboxId != \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m {\u001b[45;21H \u001b[32mif\u001b[m\u001b[93m\u001b[107m c.Sandbox() == filter.PodSandboxId {\u001b[46;25H ctrList = []*oci.Container{c}\u001b[46;58H\u001b[K\u001b[47;25H} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\u001b[47;33H\u001b[K\u001b[48;25H ctrList = []*oci.Container{}\u001b[49;25H}\u001b[49;29H\u001b[K\u001b[50;25H\u001b[K\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  31%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m" - ], - [ - 3e-05, - "\u001b[48;5;252m:41\u001b[33;45H\u001b[?12l\u001b[?25h" - ], - [ - 0.603268, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1HType :qa! and press to abandon all changes and exit Vim\u001b[?5h\u001b[?12l\u001b[?25h" - ], - [ - 0.008041, - "\u001b[?5l\u001b[33;45H" - ], - [ - 0.319799, - "\u001b[?25l\u001b[52;1H\u001b[K\u001b[52;1H:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.127623, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.201264, - "\r\u001b[?25l\u001b[7m\u001b[31mE37: No write since last change (add ! to override)\u001b[?2004h" - ], - [ - 0.008793, - "\u001b[33;45H\u001b[?12l\u001b[?25h" - ], - [ - 0.638602, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K\u001b[52;1H:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.175644, - "q" - ], - [ - 7.5e-05, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.256365, - "!\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.232366, - "\r" - ], - [ - 0.014901, - "\u001b[?25l\u001b[?2004l\u001b[52;1H\u001b[K\u001b[52;1H\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.00252, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024247, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats* \u001b[39m \u001b[33m139s\u001b[39m\r\n" - ], - [ - 0.00148, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.00016, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 6.8e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000192, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000106, - "\u001b[?1h\u001b=" - ], - [ - 5e-05, - "\u001b[?2004h" - ], - [ - 0.340214, - "m" - ], - [ - 0.09551, - "\bma" - ], - [ - 0.120447, - "k" - ], - [ - 0.087296, - "e" - ], - [ - 0.071368, - " " - ], - [ - 0.089098, - "k" - ], - [ - 0.1039, - "p" - ], - [ - 0.071636, - "o" - ], - [ - 0.087901, - "d" - ], - [ - 0.136844, - "\u001b[?1l\u001b>" - ], - [ - 0.000113, - "\u001b[?2004l\r\r\n" - ], - [ - 0.012133, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 8.491971, - "go build -ldflags '-X main.gitCommit=1fd05c35 -X main.buildInfo=1502973722' -tags \"selinux seccomp \" -o kpod github.com/kubernetes-incubator/cri-o/cmd/kpod\r\n" - ], - [ - 6.307507, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.020575, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats* \u001b[39m \u001b[33m15s\u001b[39m\r\n" - ], - [ - 0.001077, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000118, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.6e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 9.3e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.5e-05, - "\u001b[?1h\u001b=" - ], - [ - 3e-05, - "\u001b[?2004h" - ], - [ - 146.677024, - "c" - ], - [ - 0.396395, - "\b \b" - ], - [ - 0.096947, - "g" - ], - [ - 0.119491, - "\bgi" - ], - [ - 0.065073, - "t" - ], - [ - 0.119305, - " " - ], - [ - 0.064229, - "s" - ], - [ - 0.367919, - "t" - ], - [ - 0.167391, - "a" - ], - [ - 0.088279, - "t" - ], - [ - 0.136684, - "u" - ], - [ - 0.111874, - "s" - ], - [ - 0.167915, - "\u001b[?1l\u001b>" - ], - [ - 0.000214, - "\u001b[?2004l\r\r\n" - ], - [ - 0.003677, - "\u001b]2;git status\u0007\u001b]1;git\u0007" - ], - [ - 0.018619, - "On branch kpod-stats\r\n" - ], - [ - 3.2e-05, - "Your branch is up-to-date with 'origin/kpod-stats'.\r\nChanges not staged for commit:\r\n (use \"git add ...\" to update what will be committed)\r\n (use \"git checkout -- ...\" to discard changes in working directory)\r\n\r\n\t\u001b[31mmodified: libkpod/container_server.go\u001b[m\r\n" - ], - [ - 1.3e-05, - "\t\u001b[31mmodified: server/container_list.go\u001b[m\r\n\r\nno changes added to commit (use \"git add\" and/or \"git commit -a\")\r\n" - ], - [ - 0.000564, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024004, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.003289, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000146, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000144, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000112, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000113, - "\u001b[?1h\u001b=" - ], - [ - 4.6e-05, - "\u001b[?2004h" - ], - [ - 0.157568, - "g" - ], - [ - 0.150725, - "\bgi" - ], - [ - 0.088758, - "t" - ], - [ - 0.191523, - " " - ], - [ - 0.703489, - "\b" - ], - [ - 0.499958, - "\b \b" - ], - [ - 0.031738, - "\b\bg \b" - ], - [ - 0.030624, - "\b \b" - ], - [ - 0.302685, - "m" - ], - [ - 0.143773, - "\bma" - ], - [ - 0.132383, - "k" - ], - [ - 0.13996, - "e" - ], - [ - 0.031734, - " " - ], - [ - 0.100103, - "k" - ], - [ - 0.13186, - "p" - ], - [ - 0.09625, - "o" - ], - [ - 0.135131, - "d" - ], - [ - 1.224544, - "\u001b[?1l\u001b>" - ], - [ - 0.0002, - "\u001b[?2004l" - ], - [ - 0.000282, - "\r\r\n" - ], - [ - 0.003861, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 7.767101, - "make: 'kpod' is up to date.\r\n" - ], - [ - 0.000406, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024134, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats* \u001b[39m \u001b[33m8s\u001b[39m\r\n" - ], - [ - 0.001383, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000122, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.6e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.00011, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.5e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.4e-05, - "\u001b[?2004h" - ], - [ - 11.517218, - " " - ], - [ - 4.345309, - "\b\b\b\b\b\b" - ], - [ - 0.359649, - "s" - ], - [ - 0.087548, - "\bsu" - ], - [ - 0.097684, - "d" - ], - [ - 0.086354, - "o" - ], - [ - 0.096813, - " " - ], - [ - 0.09598, - "m" - ], - [ - 0.095724, - "a" - ], - [ - 0.112142, - "k" - ], - [ - 0.095568, - "e" - ], - [ - 0.079489, - " " - ], - [ - 0.121036, - "c" - ], - [ - 0.06424, - "l" - ], - [ - 0.160013, - "e" - ], - [ - 0.078814, - "a" - ], - [ - 0.088993, - "n" - ], - [ - 0.198984, - "\u001b[?1l\u001b>" - ], - [ - 4.7e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.002964, - "\u001b]2;sudo make clean\u0007\u001b]1;make\u0007" - ], - [ - 0.970251, - "[sudo] password for ryan: " - ], - [ - 1.949557, - "\r\n" - ], - [ - 3.091473, - "rm -f \"/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/_output/.gopathok\"\r\n" - ], - [ - 0.001076, - "rm -rf _output\r\n" - ], - [ - 0.000525, - "rm -f docs/*.1 docs/*.5 docs/*.8\r\n" - ], - [ - 0.001472, - "rm -fr test/testdata/redis-image\r\n" - ], - [ - 0.000452, - "find . -name \\*~ -delete\r\n" - ], - [ - 0.015501, - "find . -name \\#\\* -delete\r\n" - ], - [ - 0.01696, - "rm -f crioctl crio kpod\r\n" - ], - [ - 0.025533, - "make -C conmon clean\r\n" - ], - [ - 0.002621, - "make[1]: Entering directory '/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/conmon'\r\nrm -f conmon.o cmsg.o conmon\r\n" - ], - [ - 0.001734, - "make[1]: Leaving directory '/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/conmon'\r\nmake -C pause clean\r\n" - ], - [ - 0.003556, - "make[1]: Entering directory '/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/pause'\r\nrm -f pause.o pause\r\n" - ], - [ - 0.000983, - "make[1]: Leaving directory '/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/pause'\r\n" - ], - [ - 0.000224, - "rm -f test/bin2img/bin2img\r\n" - ], - [ - 0.002959, - "rm -f test/copyimg/copyimg\r\n" - ], - [ - 0.004744, - "rm -f test/checkseccomp/checkseccomp\r\n" - ], - [ - 0.002614, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024046, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats* \u001b[39m \u001b[33m6s\u001b[39m\r\n" - ], - [ - 0.00245, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000119, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.6e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 2.4e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.6e-05, - "\u001b[?2004h" - ], - [ - 2.73537, - "g" - ], - [ - 0.063396, - "\bgi" - ], - [ - 0.128086, - "t" - ], - [ - 0.095837, - " " - ], - [ - 0.144554, - "c" - ], - [ - 0.083684, - "o" - ], - [ - 0.083361, - "m" - ], - [ - 0.165205, - "m" - ], - [ - 0.159495, - "i" - ], - [ - 0.108285, - "t" - ], - [ - 0.098609, - " " - ], - [ - 0.145308, - "-" - ], - [ - 0.099243, - "a" - ], - [ - 0.10427, - " " - ], - [ - 0.117066, - "-" - ], - [ - 0.131033, - "-" - ], - [ - 0.095675, - "a" - ], - [ - 0.112554, - "m" - ], - [ - 0.075379, - "e" - ], - [ - 0.108006, - "n" - ], - [ - 0.111673, - "d" - ], - [ - 0.121036, - "\u001b[?1l\u001b>" - ], - [ - 0.000282, - "\u001b[?2004l\r\r\n" - ], - [ - 0.006629, - "\u001b]2;git commit -a --amend\u0007\u001b]1;git\u0007" - ], - [ - 0.033575, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000393, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"~/Development/Go/src/github.com/kubernetes-incubator/cri-o/.git/COMMIT_EDITMSG\"" - ], - [ - 8.2e-05, - " 107L, 6228C" - ], - [ - 0.000173, - "\u001b[1;1Hadd kpod stats function\r\n\r\nSigned-off-by: Ryan Cole \r\n\r\n# Please enter the commit message for your changes. Lines starting\r\n# with '#' will be ignored, and an empty message aborts the commit.\r\n#\r\n# Date: Tue Jul 25 09:56:23 2017 -0400\r\n#\r\n# On branch kpod-stats\r\n# Your branch is up-to-date with 'origin/kpod-stats'.\r\n#\r\n# Changes to be committed:\r\n#\u001b[7Cmodified: README.md\r\n#\u001b[7Cmodified: cmd/kpod/images.go\r\n#\u001b[7Cmodified: cmd/kpod/main.go\r\n#\u001b[7Cnew file: cmd/kpod/stats.go\r\n#\u001b[7Cmodified: completions/bash/kpod\r\n#\u001b[7Cnew file: docs/kpod-stats.1.md\r\n#\u001b[7Cmodified: libkpod/container_server.go\r\n#\u001b[7Cmodified: libkpod/image/image.go\r\n#\u001b[7Cnew file: libkpod/stats.go\r\n#\u001b[7Cmodified: server/container_list.go\r\n#\u001b[7Cnew file: test/kpod_stats.bats\r\n#\u001b[7Cmodified: vendor.conf\r\n#\u001b[7Cdeleted: vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go\r\n#\u001b[7Cnew file: vendor/github.com/buger/goterm/README.md\r\n#\u001b[7Cnew file: vendor/github.com/buger/goterm/box.go\r\n#\u001b[7C" - ], - [ - 1.9e-05, - "new file: vendor/github.com/buger/goterm/plot.go\r\n#\u001b[7Cnew file: vendor/github.com/buger/goterm/table.go\r\n#\u001b[7Cnew file: vendor/github.com/buger/goterm/terminal.go\r\n#\u001b[7Cnew file: vendor/github.com/buger/goterm/terminal_nosysioctl.go\r\n#\u001b[7Cnew file: vendor/github.com/buger/goterm/terminal_sysioctl.go\r\n#\u001b[7Cdeleted: vendor/github.com/containers/storage/pkg/archive/example_changes.go\r\n#\u001b[7Cnew file: vendor/github.com/mrunalp/fileutils/LICENSE\r\n#\u001b[7Cnew file: vendor/github.com/mrunalp/fileutils/README.md\r\n#\u001b[7Cnew file: vendor/github.com/mrunalp/fileutils/fileutils.go\r\n#\u001b[7Cnew file: vendor/github.com/mrunalp/fileutils/idtools.go\r\n#\u001b[7Cmodified: vendor/github.com/opencontainers/runc/libcontainer/container_linux.go\r\n#\u001b[7Cmodified: vendor/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.pb.go\r\n#\u001b[7Cmodified: vendor/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.proto\r\n#\u001b[7Cmodified: vendor/github.com/opencontainers/runc/libcontainer/init_linux.go\r\n#" - ], - [ - 1.7e-05, - "\u001b[7Cmodified: vendor/github.com/opencontainers/runc/libcontainer/state_linux.go\r\n#\u001b[7Cnew file: vendor/github.com/vishvananda/netlink/LICENSE\r\n#\u001b[7Cnew file: vendor/github.com/vishvananda/netlink/README.md\r\n#\u001b[7Cnew file: vendor/github.com/vishvananda/netlink/addr.go\r\n#\u001b[7Cnew file: vendor/github.com/vishvananda/netlink/addr_linux.go\r\n#\u001b[7Cnew file: vendor/github.com/vishvananda/netlink/bpf_linux.go\r\n#\u001b[7Cnew file: vendor/github.com/vishvananda/netlink/bridge_linux.go\r\n#\u001b[7Cnew file: vendor/github.com/vishvananda/netlink/class.go\r\n#\u001b[7Cnew file: vendor/github.com/vishvananda/netlink/class_linux.go\u001b[1;1H\u001b[?12l\u001b[?25h" - ], - [ - 0.318498, - "\u001b[?25l\u001b[52;1H\u001b[K\u001b[52;1H:\u001b[?2004h" - ], - [ - 0.000184, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.194901, - "w" - ], - [ - 0.048866, - "q" - ], - [ - 0.079626, - "\r" - ], - [ - 6.7e-05, - "\u001b[?25l\u001b[?2004l" - ], - [ - 0.000842, - "\".git/COMMIT_EDITMSG\"" - ], - [ - 0.018643, - " 107L, 6228C written" - ], - [ - 0.000239, - "\r\r\r\n\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.012196, - "[kpod-stats 51fe5a83] add kpod stats function\r\n Date: Tue Jul 25 09:56:23 2017 -0400\r\n" - ], - [ - 0.005388, - " 93 files changed, 15781 insertions(+), 1249 deletions(-)\r\n create mode 100644 cmd/kpod/stats.go\r\n create mode 100644 docs/kpod-stats.1.md\r\n create mode 100644 libkpod/stats.go\r\n create mode 100644 test/kpod_stats.bats\r\n delete mode 100644 vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go\r\n create mode 100644 vendor/github.com/buger/goterm/README.md\r\n create mode 100644 vendor/github.com/buger/goterm/box.go\r\n create mode 100644 vendor/github.com/buger/goterm/plot.go\r\n create mode 100644 vendor/github.com/buger/goterm/table.go\r\n create mode 100644 vendor/github.com/buger/goterm/terminal.go\r\n create mode 100644 vendor/github.com/buger/goterm/terminal_nosysioctl.go\r\n create mode 100644 vendor/github.com/buger/goterm/terminal_sysioctl.go\r\n delete mode 100644 vendor/github.com/containers/storage/pkg/archive/example_changes.go\r\n create mode 100644 vendor/github.com/mrunalp/fileutils/LICENSE\r\n create mode 100644 vendor/github.com/mrunalp/fileutils/README.md\r\n create mode 100644 vendor/github.com/mrunalp/fileu" - ], - [ - 3.1e-05, - "tils/fileutils.go\r\n create mode 100644 vendor/github.com/mrunalp/fileutils/idtools.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/LICENSE\r\n create mode 100644 vendor/github.com/vishvananda/netlink/README.md\r\n create mode 100644 vendor/github.com/vishvananda/netlink/addr.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/addr_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/bpf_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/bridge_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/class.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/class_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/conntrack_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/conntrack_unspecified.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/filter.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/filter_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/genetlin" - ], - [ - 1.9e-05, - "k_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/genetlink_unspecified.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/gtp_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/handle_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/handle_unspecified.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/link.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/link_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/link_tuntap_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/neigh.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/neigh_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/netlink.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/netlink_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/netlink_unspecified.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/addr_linux.go\r\n create mode 100644 vendor/github." - ], - [ - 1.7e-05, - "com/vishvananda/netlink/nl/bridge_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/genetlink_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/link_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/mpls_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/nl_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/nl_unspecified.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/route_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/syscall.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/tc_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/xfrm_monitor_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go\r\n" - ], - [ - 1.6e-05, - " create mode 100644 vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/order.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/protinfo.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/protinfo_linux.go\r\n" - ], - [ - 1.9e-05, - " create mode 100644 vendor/github.com/vishvananda/netlink/qdisc.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/qdisc_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/route.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/route_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/route_unspecified.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/rule.go\r\n" - ], - [ - 1.5e-05, - " create mode 100644 vendor/github.com/vishvananda/netlink/rule_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/socket.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/socket_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/xfrm.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_policy.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go\r\n" - ], - [ - 1.3e-05, - " create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_state.go\r\n create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_state_linux.go\r\n create mode 100644 vendor/github.com/vishvananda/netns/LICENSE\r\n create mode 100644 vendor/github.com/vishvananda/netns/README.md\r\n create mode 100644 vendor/github.com/vishvananda/netns/netns.go\r\n create mode 100644 vendor/github.com/vishvananda/netns/netns_linux.go" - ], - [ - 1.3e-05, - "\r\n create mode 100644 vendor/github.com/vishvananda/netns/netns_unspecified.go\r\n" - ], - [ - 0.00046, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.023729, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001064, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 7.6e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.9e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 7.5e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.4e-05, - "\u001b[?1h\u001b=" - ], - [ - 1.7e-05, - "\u001b[?2004h" - ], - [ - 0.093363, - "g" - ], - [ - 0.116444, - "\bgi" - ], - [ - 0.09112, - "t" - ], - [ - 0.140489, - " " - ], - [ - 0.295456, - "\b" - ], - [ - 0.148558, - "\b \b" - ], - [ - 0.151794, - "\b\bg \b" - ], - [ - 0.143687, - "\b \b" - ], - [ - 0.1717, - "m" - ], - [ - 0.113167, - "\bma" - ], - [ - 0.071539, - "k" - ], - [ - 0.151864, - "e" - ], - [ - 0.068538, - " " - ], - [ - 0.051546, - "k" - ], - [ - 0.136138, - "p" - ], - [ - 0.087532, - "o" - ], - [ - 0.136608, - "d" - ], - [ - 0.107472, - "\u001b[?1l\u001b>" - ], - [ - 0.000279, - "\u001b[?2004l\r\r\n" - ], - [ - 0.003576, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 7.566279, - "go build -ldflags '-X main.gitCommit=51fe5a83 -X main.buildInfo=1502973930' -tags \"selinux seccomp \" -o kpod github.com/kubernetes-incubator/cri-o/cmd/kpod\r\n" - ], - [ - 5.528396, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.025808, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats \u001b[39m \u001b[33m13s\u001b[39m\r\n" - ], - [ - 0.002211, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 64.301943, - "g" - ], - [ - 0.072172, - "\bgi" - ], - [ - 0.104359, - "t" - ], - [ - 0.079837, - " " - ], - [ - 0.080135, - "p" - ], - [ - 0.055427, - "u" - ], - [ - 0.119911, - "s" - ], - [ - 0.104008, - "h" - ], - [ - 0.111595, - " " - ], - [ - 0.112396, - "-" - ], - [ - 0.112504, - "f" - ], - [ - 0.143522, - " " - ], - [ - 0.191507, - "o" - ], - [ - 0.120131, - "r" - ], - [ - 0.096379, - "i" - ], - [ - 0.127918, - "g" - ], - [ - 0.063437, - "i" - ], - [ - 0.056858, - "n" - ], - [ - 0.104321, - " " - ], - [ - 0.095939, - "k" - ], - [ - 0.192254, - "pod-" - ], - [ - 0.303579, - "s" - ], - [ - 0.088135, - "t" - ], - [ - 0.179185, - "a" - ], - [ - 0.580721, - "t" - ], - [ - 0.182035, - "s\u001b[1m \u001b[0m" - ], - [ - 0.393146, - "\b\u001b[0m \b" - ], - [ - 0.000296, - "\u001b[?1l\u001b>\u001b[?2004l\r\r\n" - ], - [ - 0.003558, - "\u001b]2;git push -f origin kpod-stats\u0007\u001b]1;git\u0007" - ], - [ - 0.735535, - "Counting objects: 120, done.\r\n" - ], - [ - 0.001739, - "Delta compression using up to 4 threads.\r\n" - ], - [ - 0.000228, - "Compressing objects: 0% (1/105) \r" - ], - [ - 0.000124, - "Compressing objects: 1% (2/105) \rCompressing objects: 2% (3/105) \r" - ], - [ - 4.3e-05, - "Compressing objects: 3% (4/105) \rCompressing objects: 4% (5/105) \rCompressing objects: 5% (6/105) \rCompressing objects: 6% (7/105) \r" - ], - [ - 2e-05, - "Compressing objects: 7% (8/105) \rCompressing objects: 8% (9/105) \rCompressing objects: 9% (10/105) \r" - ], - [ - 4.8e-05, - "Compressing objects: 10% (11/105) \rCompressing objects: 11% (12/105) \rCompressing objects: 12% (13/105) \r" - ], - [ - 0.000331, - "Compressing objects: 13% (14/105) \r" - ], - [ - 4.3e-05, - "Compressing objects: 14% (15/105) \r" - ], - [ - 4e-05, - "Compressing objects: 15% (16/105) \rCompressing objects: 16% (17/105) \r" - ], - [ - 1.5e-05, - "Compressing objects: 17% (18/105) \r" - ], - [ - 5.3e-05, - "Compressing objects: 18% (19/105) \rCompressing objects: 19% (20/105) \r" - ], - [ - 2e-05, - "Compressing objects: 20% (21/105) \r" - ], - [ - 0.000375, - "Compressing objects: 21% (23/105) \r" - ], - [ - 2e-05, - "Compressing objects: 22% (24/105) \r" - ], - [ - 4.8e-05, - "Compressing objects: 23% (25/105) \r" - ], - [ - 3.7e-05, - "Compressing objects: 24% (26/105) \r" - ], - [ - 3.7e-05, - "Compressing objects: 25% (27/105) \r" - ], - [ - 7.7e-05, - "Compressing objects: 26% (28/105) \r" - ], - [ - 1.7e-05, - "Compressing objects: 27% (29/105) \rCompressing objects: 28% (30/105) \r" - ], - [ - 3.7e-05, - "Compressing objects: 29% (31/105) \r" - ], - [ - 7.1e-05, - "Compressing objects: 30% (32/105) \rCompressing objects: 31% (33/105) \rCompressing objects: 32% (34/105) \r" - ], - [ - 4.1e-05, - "Compressing objects: 33% (35/105) \rCompressing objects: 34% (36/105) \r" - ], - [ - 6.3e-05, - "Compressing objects: 35% (37/105) \r" - ], - [ - 0.00012, - "Compressing objects: 36% (38/105) \r" - ], - [ - 0.000158, - "Compressing objects: 37% (39/105) \r" - ], - [ - 1.2e-05, - "Compressing objects: 38% (40/105) \r" - ], - [ - 0.000264, - "Compressing objects: 39% (41/105) \r" - ], - [ - 3.5e-05, - "Compressing objects: 40% (42/105) \r" - ], - [ - 0.000137, - "Compressing objects: 41% (44/105) \r" - ], - [ - 8.9e-05, - "Compressing objects: 42% (45/105) \r" - ], - [ - 8.5e-05, - "Compressing objects: 43% (46/105) \r" - ], - [ - 1e-05, - "Compressing objects: 44% (47/105) \r" - ], - [ - 6.8e-05, - "Compressing objects: 45% (48/105) \r" - ], - [ - 1.4e-05, - "Compressing objects: 46% (49/105) \r" - ], - [ - 0.000117, - "Compressing objects: 47% (50/105) \r" - ], - [ - 3.1e-05, - "Compressing objects: 48% (51/105) \r" - ], - [ - 3.1e-05, - "Compressing objects: 49% (52/105) \rCompressing objects: 50% (53/105) \r" - ], - [ - 4e-05, - "Compressing objects: 51% (54/105) \r" - ], - [ - 6.3e-05, - "Compressing objects: 52% (55/105) \r" - ], - [ - 9.2e-05, - "Compressing objects: 53% (56/105) \r" - ], - [ - 4.3e-05, - "Compressing objects: 54% (57/105) \r" - ], - [ - 7.9e-05, - "Compressing objects: 55% (58/105) \r" - ], - [ - 4.1e-05, - "Compressing objects: 56% (59/105) \r" - ], - [ - 6.3e-05, - "Compressing objects: 57% (60/105) \r" - ], - [ - 5e-05, - "Compressing objects: 58% (61/105) \r" - ], - [ - 0.000414, - "Compressing objects: 59% (62/105) \r" - ], - [ - 7.8e-05, - "Compressing objects: 60% (63/105) \r" - ], - [ - 5.6e-05, - "Compressing objects: 61% (65/105) \r" - ], - [ - 0.000113, - "Compressing objects: 62% (66/105) \r" - ], - [ - 9.4e-05, - "Compressing objects: 63% (67/105) \r" - ], - [ - 0.000101, - "Compressing objects: 64% (68/105) \r" - ], - [ - 8.3e-05, - "Compressing objects: 65% (69/105) \r" - ], - [ - 3.4e-05, - "Compressing objects: 66% (70/105) \r" - ], - [ - 9.2e-05, - "Compressing objects: 67% (71/105) \r" - ], - [ - 0.000266, - "Compressing objects: 68% (72/105) \r" - ], - [ - 2.2e-05, - "Compressing objects: 69% (73/105) \rCompressing objects: 70% (74/105) \rCompressing objects: 71% (75/105) \r" - ], - [ - 4.2e-05, - "Compressing objects: 72% (76/105) \r" - ], - [ - 3e-05, - "Compressing objects: 73% (77/105) \rCompressing objects: 74% (78/105) \r" - ], - [ - 0.000171, - "Compressing objects: 75% (79/105) \r" - ], - [ - 6.2e-05, - "Compressing objects: 76% (80/105) \r" - ], - [ - 2.4e-05, - "Compressing objects: 77% (81/105) \r" - ], - [ - 6.7e-05, - "Compressing objects: 78% (82/105) \rCompressing objects: 79% (83/105) \r" - ], - [ - 0.000233, - "Compressing objects: 80% (84/105) \r" - ], - [ - 0.00016, - "Compressing objects: 81% (86/105) \r" - ], - [ - 0.000135, - "Compressing objects: 82% (87/105) \r" - ], - [ - 0.000101, - "Compressing objects: 83% (88/105) \rCompressing objects: 84% (89/105) \r" - ], - [ - 2.1e-05, - "Compressing objects: 85% (90/105) \r" - ], - [ - 1.3e-05, - "Compressing objects: 86% (91/105) \r" - ], - [ - 0.000128, - "Compressing objects: 87% (92/105) \r" - ], - [ - 3.5e-05, - "Compressing objects: 88% (93/105) \r" - ], - [ - 0.000118, - "Compressing objects: 89% (94/105) \r" - ], - [ - 5.7e-05, - "Compressing objects: 90% (95/105) \rCompressing objects: 91% (96/105) \r" - ], - [ - 0.000429, - "Compressing objects: 92% (97/105) \rCompressing objects: 93% (98/105) \rCompressing objects: 94% (99/105) \r" - ], - [ - 1.5e-05, - "Compressing objects: 95% (100/105) \rCompressing objects: 96% (101/105) \rCompressing objects: 97% (102/105) \r" - ], - [ - 0.000944, - "Compressing objects: 98% (103/105) \r" - ], - [ - 0.000609, - "Compressing objects: 99% (104/105) \r" - ], - [ - 0.000117, - "Compressing objects: 100% (105/105) \r" - ], - [ - 7.5e-05, - "Compressing objects: 100% (105/105), done.\r\n" - ], - [ - 0.000147, - "Writing objects: 0% (1/120) \r" - ], - [ - 4.5e-05, - "Writing objects: 1% (2/120) \r" - ], - [ - 2.2e-05, - "Writing objects: 2% (3/120) \r" - ], - [ - 1.6e-05, - "Writing objects: 3% (4/120) \r" - ], - [ - 2.2e-05, - "Writing objects: 4% (5/120) \r" - ], - [ - 1.2e-05, - "Writing objects: 5% (6/120) \r" - ], - [ - 0.000344, - "Writing objects: 6% (8/120) \r" - ], - [ - 0.000106, - "Writing objects: 7% (9/120) \r" - ], - [ - 3.4e-05, - "Writing objects: 8% (10/120) \r" - ], - [ - 3.1e-05, - "Writing objects: 9% (11/120) \r" - ], - [ - 2.3e-05, - "Writing objects: 10% (12/120) \r" - ], - [ - 3.4e-05, - "Writing objects: 11% (14/120) \r" - ], - [ - 0.000662, - "Writing objects: 12% (15/120) \r" - ], - [ - 3.1e-05, - "Writing objects: 13% (16/120) \rWriting objects: 14% (17/120) \r" - ], - [ - 0.000202, - "Writing objects: 15% (18/120) \r" - ], - [ - 4.8e-05, - "Writing objects: 16% (20/120) \r" - ], - [ - 1.7e-05, - "Writing objects: 17% (21/120) \rWriting objects: 18% (22/120) \r" - ], - [ - 2.2e-05, - "Writing objects: 19% (23/120) \r" - ], - [ - 1.1e-05, - "Writing objects: 20% (24/120) \r" - ], - [ - 4.3e-05, - "Writing objects: 21% (26/120) \rWriting objects: 22% (27/120) \r" - ], - [ - 1.1e-05, - "Writing objects: 23% (28/120) \rWriting objects: 24% (29/120) \r" - ], - [ - 7.8e-05, - "Writing objects: 25% (30/120) \r" - ], - [ - 2.3e-05, - "Writing objects: 26% (32/120) \rWriting objects: 27% (33/120) \rWriting objects: 28% (34/120) \rWriting objects: 29% (35/120) \rWriting objects: 30% (36/120) \rWriting objects: 31% (38/120) \rWriting objects: 32% (39/120) \rWriting objects: 33% (40/120) \rWriting objects: 34% (41/120) \r" - ], - [ - 3.9e-05, - "Writing objects: 35% (42/120) \r" - ], - [ - 7.2e-05, - "Writing objects: 36% (44/120) \rWriting objects: 37% (45/120) \r" - ], - [ - 1.9e-05, - "Writing objects: 38% (46/120) \r" - ], - [ - 1.8e-05, - "Writing objects: 39% (47/120) \r" - ], - [ - 2.5e-05, - "Writing objects: 40% (48/120) \rWriting objects: 41% (50/120) \rWriting objects: 42% (51/120) \rWriting objects: 43% (52/120) \r" - ], - [ - 1e-05, - "Writing objects: 44% (53/120) \rWriting objects: 45% (54/120) \r" - ], - [ - 7.1e-05, - "Writing objects: 46% (56/120) \r" - ], - [ - 0.000534, - "Writing objects: 47% (57/120) \r" - ], - [ - 9.6e-05, - "Writing objects: 48% (58/120) \r" - ], - [ - 0.000193, - "Writing objects: 49% (59/120) \rWriting objects: 50% (60/120) \rWriting objects: 51% (62/120) \r" - ], - [ - 1.3e-05, - "Writing objects: 52% (63/120) \rWriting objects: 53% (64/120) \r" - ], - [ - 6e-05, - "Writing objects: 54% (65/120) \rWriting objects: 55% (66/120) \rWriting objects: 56% (68/120) \r" - ], - [ - 7e-05, - "Writing objects: 57% (69/120) \rWriting objects: 58% (70/120) \rWriting objects: 59% (71/120) \rWriting objects: 60% (72/120) \r" - ], - [ - 0.000191, - "Writing objects: 61% (74/120) \r" - ], - [ - 6.2e-05, - "Writing objects: 62% (75/120) \rWriting objects: 63% (76/120) \rWriting objects: 64% (77/120) \r" - ], - [ - 0.00023, - "Writing objects: 65% (78/120) \r" - ], - [ - 8.5e-05, - "Writing objects: 66% (80/120) \rWriting objects: 67% (81/120) \rWriting objects: 68% (82/120) \r" - ], - [ - 0.000103, - "Writing objects: 69% (83/120) \rWriting objects: 70% (84/120) \r" - ], - [ - 0.000194, - "Writing objects: 71% (86/120) \rWriting objects: 72% (87/120) \rWriting objects: 73% (88/120) \r" - ], - [ - 0.000514, - "Writing objects: 74% (89/120) \rWriting objects: 75% (90/120) \r" - ], - [ - 2.9e-05, - "Writing objects: 76% (92/120) \r" - ], - [ - 0.000844, - "Writing objects: 77% (93/120) \r" - ], - [ - 3.5e-05, - "Writing objects: 78% (94/120) \rWriting objects: 79% (95/120) \rWriting objects: 80% (96/120) \r" - ], - [ - 9.9e-05, - "Writing objects: 81% (98/120) \r" - ], - [ - 0.000116, - "Writing objects: 82% (99/120) \r" - ], - [ - 4.6e-05, - "Writing objects: 83% (100/120) \r" - ], - [ - 0.000493, - "Writing objects: 84% (101/120) \rWriting objects: 85% (102/120) \rWriting objects: 86% (104/120) \rWriting objects: 87% (105/120) \rWriting objects: 88% (106/120) \rWriting objects: 89% (107/120) \rWriting objects: 90% (108/120) \r" - ], - [ - 0.020626, - "Writing objects: 91% (110/120) \rWriting objects: 92% (111/120) \rWriting objects: 93% (112/120) \rWriting objects: 94% (113/120) \rWriting objects: 95% (114/120) \rWriting objects: 96% (116/120) \rWriting objects: 97% (117/120) \rWriting objects: 98% (118/120) \rWriting objects: 99% (119/120) \rWriting objects: 100% (120/120) \r" - ], - [ - 0.000125, - "Writing objects: 100% (120/120), 116.02 KiB | 4.30 MiB/s, done.\r\nTotal 120 (delta 31), reused 78 (delta 11)\r\n" - ], - [ - 0.145058, - "remote: Resolving deltas: 0% (0/31) \u001b[K\r" - ], - [ - 0.038865, - "remote: Resolving deltas: 3% (1/31) \u001b[K\rremote: Resolving deltas: 6% (2/31) \u001b[K\rremote: Resolving deltas: 9% (3/31) \u001b[K\r" - ], - [ - 0.000148, - "remote: Resolving deltas: 12% (4/31) \u001b[K\rremote: Resolving deltas: 16% (5/31) \u001b[K\r" - ], - [ - 4.8e-05, - "remote: Resolving deltas: 19% (6/31) \u001b[K\rremote: Resolving deltas: 22% (7/31) \u001b[K\rremote: Resolving deltas: 25% (8/31) \u001b[K\r" - ], - [ - 9.1e-05, - "remote: Resolving deltas: 29% (9/31) \u001b[K\rremote: Resolving deltas: 32% (10/31) \u001b[K\rremote: Resolving deltas: 35% (11/31) \u001b[K\rremote: Resolving deltas: 38% (12/31) \u001b[K\rremote: Resolving deltas: 41% (13/31) \u001b[K\rremote: Resolving deltas: 45% (14/31) \u001b[K\r" - ], - [ - 3.7e-05, - "remote: Resolving deltas: 48% (15/31) \u001b[K\rremote: Resolving deltas: 51% (16/31) \u001b[K\rremote: Resolving deltas: 54% (17/31) \u001b[K\rremote: Resolving deltas: 58% (18/31) \u001b[K\rremote: Resolving deltas: 61% (19/31) \u001b[K\rremote: Resolving deltas: 64% (20/31) \u001b[K\rremote: Resolving deltas: 67% (21/31) \u001b[K\r" - ], - [ - 0.000346, - "remote: Resolving deltas: 70% (22/31) \u001b[K\rremote: Resolving deltas: 74% (23/31) \u001b[K\rremote: Resolving deltas: 77% (24/31) \u001b[K\rremote: Resolving deltas: 80% (25/31) \u001b[K\rremote: Resolving deltas: 83% (26/31) \u001b[K\rremote: Resolving deltas: 87% (27/31) \u001b[K\rremote: Resolving deltas: 90% (28/31) \u001b[K\rremote: Resolving deltas: 93% (29/31) \u001b[K\rremote: Resolving deltas: 96% (30/31) \u001b[K\rremote: Resolving deltas: 100% (31/31) \u001b[K\rremote: Resolving deltas: 100% (31/31), completed with 30 local objects.\u001b[K\r\n" - ], - [ - 1.631427, - "To github.com:14rcole/cri-o\r\n + 1fd05c35...51fe5a83 kpod-stats -> kpod-stats (forced update)\r\n" - ], - [ - 0.001344, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.027627, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001274, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 9.1e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.9e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000387, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=" - ], - [ - 2.1e-05, - "\u001b[?2004h" - ], - [ - 291.306849, - "s" - ], - [ - 0.143831, - "\bsu" - ], - [ - 0.09576, - "d" - ], - [ - 0.079837, - "o" - ], - [ - 0.056313, - " " - ], - [ - 0.09608, - "d" - ], - [ - 0.071913, - "o" - ], - [ - 0.175697, - "k" - ], - [ - 0.11164, - "c" - ], - [ - 0.344681, - "\b \b" - ], - [ - 0.143002, - "\b \b" - ], - [ - 0.353439, - "c" - ], - [ - 0.087928, - "k" - ], - [ - 0.127562, - "e" - ], - [ - 0.064111, - "r" - ], - [ - 0.095364, - " " - ], - [ - 0.103965, - "p" - ], - [ - 0.153227, - "s" - ], - [ - 0.119176, - "\u001b[?1l\u001b>" - ], - [ - 0.000192, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004251, - "\u001b]2;sudo docker ps\u0007\u001b]1;docker\u0007" - ], - [ - 0.951818, - "[sudo] password for ryan: " - ], - [ - 2.123737, - "\r\n" - ], - [ - 0.050254, - "CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\r\n" - ], - [ - 0.00261, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.027572, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.000953, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000219, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.6e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 6.4e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.4e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.9e-05, - "\u001b[?2004h" - ], - [ - 1.758608, - "s" - ], - [ - 0.079496, - "\bsu" - ], - [ - 0.160361, - "d" - ], - [ - 0.111675, - "o" - ], - [ - 0.576293, - "\b \b" - ], - [ - 0.159908, - "\b \b" - ], - [ - 0.168126, - "\b\bs \b" - ], - [ - 0.159654, - "\b \b" - ], - [ - 0.11212, - "s" - ], - [ - 0.127338, - "\bsu" - ], - [ - 0.080917, - "d" - ], - [ - 0.215883, - " " - ], - [ - 0.103841, - "o" - ], - [ - 0.256261, - "\b \b" - ], - [ - 0.136045, - "\b" - ], - [ - 0.191849, - "o" - ], - [ - 0.143304, - " " - ], - [ - 0.0082, - "d" - ], - [ - 0.096655, - "o" - ], - [ - 0.127223, - "c" - ], - [ - 0.08778, - "k" - ], - [ - 0.072448, - "e" - ], - [ - 0.064418, - "r" - ], - [ - 0.08791, - " " - ], - [ - 0.10463, - "r" - ], - [ - 0.111928, - "u" - ], - [ - 0.175632, - "n" - ], - [ - 0.127848, - " " - ], - [ - 0.401299, - "=" - ], - [ - 0.358516, - "\b \b" - ], - [ - 0.119353, - "-" - ], - [ - 0.128809, - "d" - ], - [ - 0.208134, - " " - ], - [ - 0.57505, - "r" - ], - [ - 0.13708, - "y" - ], - [ - 0.295931, - "\b \b" - ], - [ - 0.151706, - "\b \b" - ], - [ - 0.128648, - "r" - ], - [ - 0.087732, - "y" - ], - [ - 0.128014, - "a" - ], - [ - 0.223742, - "\b \b" - ], - [ - 0.143667, - "\b \b" - ], - [ - 0.151936, - "\b \b" - ], - [ - 0.688079, - "r" - ], - [ - 0.080579, - "e" - ], - [ - 0.151405, - "d" - ], - [ - 0.08859, - "i" - ], - [ - 0.127706, - "s" - ], - [ - 0.223311, - ":" - ], - [ - 0.225389, - "a" - ], - [ - 0.119256, - "l" - ], - [ - 0.167568, - "p" - ], - [ - 0.136311, - "i" - ], - [ - 0.064759, - "n" - ], - [ - 0.519447, - "e" - ], - [ - 2.207743, - "\u001b[?1l\u001b>" - ], - [ - 7.9e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004381, - "\u001b]2;sudo docker run -d redis:alpine\u0007\u001b]1;docker\u0007" - ], - [ - 0.156868, - "7e7a6dcecb2a803420db5e51e50289160869d387d5fe002c1f968c9c5e0aff47\r\n" - ], - [ - 0.331473, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.02536, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001087, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000141, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.9e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8.7e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 8.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 0.00014, - "\u001b[?2004h" - ], - [ - 1.408725, - "s" - ], - [ - 0.072298, - "\bsu" - ], - [ - 0.120191, - "d" - ], - [ - 0.070475, - "o" - ], - [ - 0.104883, - " " - ], - [ - 0.224668, - "d" - ], - [ - 0.140195, - "o" - ], - [ - 0.083514, - "c" - ], - [ - 0.087764, - "k" - ], - [ - 0.159229, - "e" - ], - [ - 0.032016, - "r" - ], - [ - 0.145043, - " " - ], - [ - 0.079616, - "p" - ], - [ - 0.17546, - "s" - ], - [ - 0.095711, - "\u001b[?1l\u001b>" - ], - [ - 3.8e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.002462, - "\u001b]2;sudo docker ps\u0007\u001b]1;docker\u0007" - ], - [ - 0.040773, - "CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\r\n7e7a6dcecb2a " - ], - [ - 2.8e-05, - "redis:alpine \"docker-entrypoint...\" 4 seconds ago Up 3 seconds 6379/tcp angry_sammet\r\n" - ], - [ - 0.002288, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.023578, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.00174, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.00025, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 5.7e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000111, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 2.5e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 5.5e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.7e-05, - "\u001b[?2004h" - ], - [ - 2.400936, - "s" - ], - [ - 0.152528, - "\bsu" - ], - [ - 0.11187, - "d" - ], - [ - 0.104125, - "o" - ], - [ - 0.095533, - " " - ], - [ - 0.048573, - "d" - ], - [ - 0.103791, - "o" - ], - [ - 0.280474, - "c" - ], - [ - 0.375565, - "k" - ], - [ - 0.096108, - "e" - ], - [ - 0.07174, - "r" - ], - [ - 0.143903, - " " - ], - [ - 0.088133, - "p" - ], - [ - 0.136279, - "s" - ], - [ - 0.087682, - " " - ], - [ - 0.128367, - "-" - ], - [ - 0.095693, - "-" - ], - [ - 0.199987, - "n" - ], - [ - 0.095887, - "o" - ], - [ - 0.231909, - "-" - ], - [ - 0.192349, - "t" - ], - [ - 0.144555, - "r" - ], - [ - 0.079249, - "u" - ], - [ - 0.167991, - "n" - ], - [ - 0.104447, - "c" - ], - [ - 0.103816, - "\u001b[?1l\u001b>" - ], - [ - 0.000465, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004264, - "\u001b]2;sudo docker ps --no-trunc\u0007\u001b]1;docker\u0007" - ], - [ - 0.037936, - "CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\r\n7e7a6dcecb2a803420db5e51e50289160869d387d5fe002c1f968c9c5e0aff47 redis:alpine \"docker-entrypoint.sh redis-server\" 10 seconds ago Up 8 seconds 6379/tcp angry_sammet\r\n" - ], - [ - 0.003831, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.025231, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-stats \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001942, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000107, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000109, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 8.2e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.2e-05, - "\u001b[?2004h" - ], - [ - 266.219062, - "g" - ], - [ - 0.16768, - "\bgi" - ], - [ - 0.095185, - "t" - ], - [ - 0.088157, - " " - ], - [ - 0.14361, - "c" - ], - [ - 0.072124, - "h" - ], - [ - 0.080042, - "e" - ], - [ - 0.08789, - "c" - ], - [ - 0.161802, - "k" - ], - [ - 0.438173, - "o" - ], - [ - 0.122346, - "ut" - ], - [ - 0.445633, - " " - ], - [ - 0.145215, - "k" - ], - [ - 0.171124, - "pod-" - ], - [ - 0.332988, - "t" - ], - [ - 0.282934, - "est-refactor\u001b[1m \u001b[0m" - ], - [ - 5.748889, - "\b\u001b[0m \b" - ], - [ - 0.000105, - "\u001b[?1l\u001b>" - ], - [ - 0.000463, - "\u001b[?2004l\r\r\n" - ], - [ - 0.005657, - "\u001b]2;git checkout kpod-test-refactor\u0007\u001b]1;git\u0007" - ], - [ - 0.037888, - "Switched to branch 'kpod-test-refactor'\r\n" - ], - [ - 0.001679, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.035158, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-test-refactor \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.002249, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 7.2e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 7e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000272, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.4e-05, - "\u001b[?1h\u001b=" - ], - [ - 1.9e-05, - "\u001b[?2004h" - ], - [ - 49.127527, - "g" - ], - [ - 0.324522, - "\b \b" - ], - [ - 0.279967, - "v" - ], - [ - 0.192132, - "\bv " - ], - [ - 0.07975, - "t" - ], - [ - 0.07264, - "e" - ], - [ - 0.164523, - "st\u001b[1m/\u001b[0m" - ], - [ - 0.330897, - "\b\u001b[0m \b" - ], - [ - 0.136168, - "\b \b" - ], - [ - 0.152143, - "\b \b" - ], - [ - 0.500013, - "\b \b" - ], - [ - 0.030003, - "\b \b" - ], - [ - 0.029676, - "\b" - ], - [ - 0.028834, - "\b \b" - ], - [ - 0.171506, - "v" - ], - [ - 0.119953, - "\bvi" - ], - [ - 0.111174, - " " - ], - [ - 0.080497, - "t" - ], - [ - 0.064222, - "e" - ], - [ - 0.174426, - "st\u001b[1m/\u001b[0m" - ], - [ - 0.314489, - "\b\u001b[0m/k" - ], - [ - 0.079352, - "pod_" - ], - [ - 0.695495, - "p" - ], - [ - 0.0322, - "u" - ], - [ - 0.141247, - "\u0007" - ], - [ - 0.000165, - "\r\r\n" - ], - [ - 5.7e-05, - "\u001b[J\u001b[0mkpod_pull.bats \u001b[Jkpod_push.bats\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\u001b[39m\r\u001b[2Cvi test/kpod_pu\u001b[K\u001b[193C\u001b[90m\u001b[39m\u001b[39m\u001b[193D" - ], - [ - 0.577866, - "s" - ], - [ - 0.192123, - "h.bats\u001b[1m \u001b[0m" - ], - [ - 2.609562, - "\b\u001b[0m \b\u001b[?1l\u001b>\u001b[?2004l\r\r\n\u001b[J" - ], - [ - 0.005239, - "\u001b]2;vim test/kpod_push.bats\u0007\u001b]1;vi\u0007" - ], - [ - 0.142495, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000713, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"test/kpod_push.bats\"" - ], - [ - 9.6e-05, - " 87L, 2371C" - ], - [ - 0.003372, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.000718, - "\u001b[1;1H\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m rm -rf /tmp/busybox\r\n\u001b[96m\u001b[47m 41 \u001b[m\u001b[93m\u001b[107m stop_crio\r\n\u001b[96m\u001b[47m 42 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 43 \r\n 44 \u001b[m\u001b[93m\u001b[107m@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 50 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 51 \u001b[m\u001b[93m\u001b[107m rm /tmp/busybox-archive\r\n\u001b[96m\u001b[47m 52 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPO" - ], - [ - 2.1e-05, - "D_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 53 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 54 \u001b[m\u001b[93m\u001b[107m stop_crio\r\n\u001b[96m\u001b[47m 55 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 56 \r\n 57 \u001b[m\u001b[93m\u001b[107m@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 58 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 59 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 60 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 61 \u001b[m\u001b[93m\u001b[107m run mkdir /tmp/oci-busybox\r\n\u001b[96m\u001b[47m 62 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 63 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 64 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS push --disable-compression \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m oci:/tmp/oci-busybox\r\n\u001b[96m\u001b[47m 65 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 66 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n" - ], - [ - 0.033108, - "\u001b[96m\u001b[47m 67 \u001b[m\u001b[93m\u001b[107m rm -rf /tmp/oci-busybox\r\n\u001b[96m\u001b[47m 68 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 69 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 70 \u001b[m\u001b[93m\u001b[107m stop_crio\r\n\u001b[96m\u001b[47m 71 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 72 \r\n 73 \u001b[m\u001b[93m\u001b[107m@test \u001b[36m\"kpod push without signatures\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 74 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 75 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 76 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 77 \u001b[m\u001b[93m\u001b[107m run mkdir /tmp/busybox\r\n\u001b[96m\u001b[47m 78 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 79 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 80 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS push --remove-signatures \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\r\n\u001b[96m\u001b[47m 81 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[" - ], - [ - 3.3e-05, - "107m\r\n\u001b[96m\u001b[47m 82 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 83 \u001b[m\u001b[93m\u001b[107m rm -rf /tmp/busybox\r\n\u001b[96m\u001b[47m 84 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 85 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 86 \u001b[m\u001b[93m\u001b[107m stop_crio\r\n\u001b[96m\u001b[47m 87 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H kpod-test-refactor \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;33H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mtest/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mkpod_push.bats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;55H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                      \u001b[m\u001b[93m\u001b[107m" - ], - [ - 0.010668, - "\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;180H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;188H\u001b[38;5;247m\u001b[48;5;236m conf\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;247m\u001b[48;5;240m  97%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 84\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5  \u001b[47;9H\u001b[?12l\u001b[?25h\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 3.900157, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;2H\u001b[96m\u001b[47m21\r\n 22\r\n 23\u001b[m\u001b[93m\u001b[107m\u001b[6Cun ${OCIC_BINARY} image remove busybox:test\u001b[4;2H\u001b[96m\u001b[47m24\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[1m\u001b[31m\u001b[106m[\u001b[m\u001b[93m\u001b[107m \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 \u001b[1m\u001b[31m\u001b[106m]\u001b[m\u001b[93m\u001b[107m\u001b[5;2H\u001b[96m\u001b[47m25\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[6;2H\u001b[96m\u001b[47m26\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[7;2H\u001b[96m\u001b[47m27\u001b[m\u001b[93m\u001b[107m\u001b[7;5H\u001b[K\u001b[8;2H\u001b[96m\u001b[47m28\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[8;37H\u001b[K\u001b[9;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[10;2H\u001b[96m\u001b[47m30\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[10;23H\u001b[K\u001b[11;2H\u001b[96m\u001b[47m31\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[11;28H\u001b[K\u001b[12;2H\u001b[96m\u001b[47m32\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[13;2H\u001b[96m\u001b[47m33\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[13;23H\u001b[K\u001b[14;2H\u001b[96m\u001b[47m34\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[14;28H\u001b[K\u001b[15;2H\u001b[96m\u001b[47m35\u001b[m\u001b[93m\u001b[107m\u001b[38Cpush \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b" - ], - [ - 6e-05, - "[107m dir:/tmp/busybox\u001b[16;2H\u001b[96m\u001b[47m36\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[16;23H\u001b[K\u001b[17;2H\u001b[96m\u001b[47m37\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[18;2H\u001b[96m\u001b[47m38\u001b[m\u001b[93m\u001b[107m\u001b[1C run ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[19;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[20;2H\u001b[96m\u001b[47m40\u001b[m\u001b[93m\u001b[107m\u001b[1C rm -rf /tmp/busybox\u001b[20;28H\u001b[K\u001b[21;2H\u001b[96m\u001b[47m41\u001b[m\u001b[93m\u001b[107m\u001b[5Cstop_crio\u001b[21;18H\u001b[K\u001b[22;2H\u001b[96m\u001b[47m42\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[22;9H\u001b[K\u001b[23;2H\u001b[96m\u001b[47m43\u001b[m\u001b[93m\u001b[107m\u001b[23;9H\u001b[K\u001b[24;2H\u001b[96m\u001b[47m44\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[25;2H\u001b[96m\u001b[47m45\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[26;2H\u001b[96m\u001b[47m46\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[26;23H\u001b[K\u001b[27;2H\u001b[96m\u001b[47m47\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[27;28H\u001b[K\u001b[28;2H\u001b[96m\u001b[47m48\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push " - ], - [ - 0.016615, - "\u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[29;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[29;23H\u001b[K\u001b[30;2H\u001b[96m\u001b[47m50\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[30;28H\u001b[K\u001b[31;2H\u001b[96m\u001b[47m51\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[31;32H\u001b[K\u001b[32;2H\u001b[96m\u001b[47m52\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[33;2H\u001b[96m\u001b[47m53\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[34;2H\u001b[96m\u001b[47m54\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[35;2H\u001b[96m\u001b[47m55\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[36;2H\u001b[96m\u001b[47m56\u001b[m\u001b[93m\u001b[107m\u001b[36;5H\u001b[K\u001b[37;2H\u001b[96m\u001b[47m57\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\u001b[37;51H\u001b[K\u001b[38;2H\u001b[96m\u001b[47m58\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[39;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[39;23H\u001b[K\u001b[40;2H\u001b[96m\u001b[47m60\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[40;28H\u001b[K\u001b[41;2H\u001b[96m\u001b[47m61\u001b[m\u001b[93m\u001b[1" - ], - [ - 5.1e-05, - "07m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[42;2H\u001b[96m\u001b[47m62\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[42;23H\u001b[K\u001b[43;2H\u001b[96m\u001b[47m63\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[43;28H\u001b[K\u001b[44;2H\u001b[96m\u001b[47m64\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push --disable-compression \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m oci:/tmp/oci-busybox\u001b[45;2H\u001b[96m\u001b[47m65\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[45;23H\u001b[K\u001b[46;2H\u001b[96m\u001b[47m66\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[47;2H\u001b[96m\u001b[47m67\u001b[m\u001b[93m\u001b[107m\u001b[6Cm -rf /tmp/oci-busybox\u001b[47;32H\u001b[K\u001b[48;2H\u001b[96m\u001b[47m68\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[49;2H\u001b[96m\u001b[47m69\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[50;2H\u001b[96m\u001b[47m70\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  28%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m24\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 1.049793, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[1;28H\u001b[K\u001b[2;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[3;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[3;28H\u001b[K\u001b[4;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${OCIC_BINARY} image remove busybox:test\u001b[5;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[6;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[7;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[8;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[8;5H\u001b[K\u001b[9;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[9;37H\u001b[K\u001b[10;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[11;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[11;23H\u001b[K\u001b[12;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[12;28H\u001b[K\u001b[13;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[14;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b" - ], - [ - 0.000196, - "[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[14;23H\u001b[K\u001b[15;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[15;28H\u001b[K\u001b[16;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[17;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[17;23H\u001b[K\u001b[18;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[18;28H\u001b[K\u001b[19;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[20;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[21;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Crm -rf /tmp/busybox\u001b[22;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[23;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[24;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[24;5H\u001b[K\u001b[25;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[25;42H\u001b[K\u001b[26;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[27;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho " - ], - [ - 0.004649, - "\u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[27;23H\u001b[K\u001b[28;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[28;28H\u001b[K\u001b[29;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[30;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[30;23H\u001b[K\u001b[31;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[31;28H\u001b[K\u001b[32;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[32;32H\u001b[K\u001b[33;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[34;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[35;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[36;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[37;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[37;5H\u001b[K\u001b[38;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\u001b[38;51H\u001b[K\u001b[39;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[40;2H\u001b" - ], - [ - 3e-05, - "[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[40;23H\u001b[K\u001b[41;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[41;28H\u001b[K\u001b[42;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[43;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[43;23H\u001b[K\u001b[44;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[44;28H\u001b[K\u001b[45;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push --disable-compression \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m oci:/tmp/oci-busybox\u001b[46;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[46;23H\u001b[K\u001b[47;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[47;28H\u001b[K\u001b[48;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[6Cm -rf /tmp/oci-busybox\u001b[48;32H\u001b[K\u001b[49;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[50;2H\u001b[96m\u001b[47m69\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  26%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m" - ], - [ - 0.002097, - "\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.206758, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;2H\u001b[96m\u001b[47m19\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[1;23H\u001b[K\u001b[2;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[2;28H\u001b[K\u001b[3;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[4;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[1m\u001b[31m\u001b[106m[\u001b[m\u001b[93m\u001b[107m \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 \u001b[1m\u001b[31m\u001b[106m]\u001b[m\u001b[93m\u001b[107m\u001b[4;28H\u001b[K\u001b[5;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${OCIC_BINARY} image remove busybox:test\u001b[6;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[7;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[8;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[9;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[9;5H\u001b[K\u001b[10;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[10;37H\u001b[K\u001b[11;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[12;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[12;23H\u001b[K\u001b[13;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$s" - ], - [ - 5.7e-05, - "tatus\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[13;28H\u001b[K\u001b[14;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[15;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[15;23H\u001b[K\u001b[16;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[16;28H\u001b[K\u001b[17;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[18;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[18;23H\u001b[K\u001b[19;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[19;28H\u001b[K\u001b[20;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[21;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[22;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Crm -rf /tmp/busybox\u001b[23;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[24;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[25;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[25;5H\u001b[K\u001b[26;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[26;42H\u001b[K" - ], - [ - 0.006686, - "\u001b[27;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[28;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[28;23H\u001b[K\u001b[29;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[29;28H\u001b[K\u001b[30;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[31;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[31;23H\u001b[K\u001b[32;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[32;28H\u001b[K\u001b[33;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[33;32H\u001b[K\u001b[34;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[35;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[36;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[37;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[38;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5H\u001b[K\u001b[39;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compre" - ], - [ - 0.000129, - "ssion\"\u001b[m\u001b[93m\u001b[107m {\u001b[39;51H\u001b[K\u001b[40;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[41;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[41;23H\u001b[K\u001b[42;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[42;28H\u001b[K\u001b[43;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[44;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[44;23H\u001b[K\u001b[45;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[45;28H\u001b[K\u001b[46;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push --disable-compression \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m oci:/tmp/oci-busybox\u001b[47;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[47;23H\u001b[K\u001b[48;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[48;28H\u001b[K\u001b[49;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[6Cm -rf /tmp/oci-busybox\u001b[49;32H\u001b[K\u001b[50;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m" - ], - [ - 0.006076, - "\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  25%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.163845, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m containers-storage:[$ROOT]busybox:test\u001b[2;2H\u001b[96m\u001b[47m19\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[2;23H\u001b[K\u001b[3;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[3;28H\u001b[K\u001b[4;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[5;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[5;28H\u001b[K\u001b[6;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${OCIC_BINARY} image remove busybox:test\u001b[7;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[8;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[9;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[10;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[10;5H\u001b[K\u001b[11;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[11;37H\u001b[K\u001b[12;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[13;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[" - ], - [ - 8.3e-05, - "36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[13;23H\u001b[K\u001b[14;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[14;28H\u001b[K\u001b[15;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[16;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[16;23H\u001b[K\u001b[17;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[17;28H\u001b[K\u001b[18;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[19;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[19;23H\u001b[K\u001b[20;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[20;28H\u001b[K\u001b[21;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[22;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[23;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Crm -rf /tmp/busybox\u001b[24;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[25;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[26;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[26;5H\u001b[K\u001b[27;3H\u001b[96m\u001b[47m4" - ], - [ - 0.007793, - "\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[27;42H\u001b[K\u001b[28;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[29;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[29;23H\u001b[K\u001b[30;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[30;28H\u001b[K\u001b[31;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[32;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[32;23H\u001b[K\u001b[33;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[33;28H\u001b[K\u001b[34;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[34;32H\u001b[K\u001b[35;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[36;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[37;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[38;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[39;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[39" - ], - [ - 3.1e-05, - ";5H\u001b[K\u001b[40;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\u001b[40;51H\u001b[K\u001b[41;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[42;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[42;23H\u001b[K\u001b[43;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[43;28H\u001b[K\u001b[44;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[45;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[45;23H\u001b[K\u001b[46;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[46;28H\u001b[K\u001b[47;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push --disable-compression \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m oci:/tmp/oci-busybox\u001b[48;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[48;23H\u001b[K\u001b[49;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[49;28H\u001b[K\u001b[50;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[6Cm -rf /tmp/oci-busybox\u001b[50;32H\u001b[K\u001b[51;195H\u001b[38;5;70m" - ], - [ - 0.002123, - "\u001b[48;5;240m  24%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.19883, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[1;28H\u001b[K\u001b[2;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m containers-storage:[$ROOT]busybox:test\u001b[3;2H\u001b[96m\u001b[47m19\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[3;23H\u001b[K\u001b[4;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[1m\u001b[31m\u001b[106m[\u001b[m\u001b[93m\u001b[107m \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 \u001b[1m\u001b[31m\u001b[106m]\u001b[m\u001b[93m\u001b[107m\u001b[4;28H\u001b[K\u001b[5;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[6;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[6;28H\u001b[K\u001b[7;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${OCIC_BINARY} image remove busybox:test\u001b[8;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[9;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[10;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[11;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[11;5H\u001b[K\u001b[12;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[12;37H\u001b[K\u001b[1" - ], - [ - 0.000124, - "3;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[14;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[14;23H\u001b[K\u001b[15;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[15;28H\u001b[K\u001b[16;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[17;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[17;23H\u001b[K\u001b[18;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[18;28H\u001b[K\u001b[19;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[20;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[20;23H\u001b[K\u001b[21;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[21;28H\u001b[K\u001b[22;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[23;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[24;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Crm -rf /tmp/busybox\u001b[25;3H" - ], - [ - 0.001207, - "\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[26;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[27;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[27;5H\u001b[K\u001b[28;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[28;42H\u001b[K\u001b[29;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[30;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[30;23H\u001b[K\u001b[31;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[31;28H\u001b[K\u001b[32;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[33;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[33;23H\u001b[K\u001b[34;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[34;28H\u001b[K\u001b[35;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[35;32H\u001b[K\u001b[36;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[37;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[" - ], - [ - 0.000194, - "93m\u001b[107m -eq 0 ]\u001b[38;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[39;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[40;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[40;5H\u001b[K\u001b[41;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\u001b[41;51H\u001b[K\u001b[42;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[43;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[43;23H\u001b[K\u001b[44;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[44;28H\u001b[K\u001b[45;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[46;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[46;23H\u001b[K\u001b[47;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[47;28H\u001b[K\u001b[48;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push --disable-compression \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m oci:/tmp/oci-busybox\u001b[49;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[49;23H\u001b[K\u001b[50;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m" - ], - [ - 0.007907, - "\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[50;28H\u001b[K\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  23%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.157382, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[1;23H\u001b[K\u001b[2;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[2;28H\u001b[K\u001b[3;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m containers-storage:[$ROOT]busybox:test\u001b[4;2H\u001b[96m\u001b[47m19\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[4;23H\u001b[K\u001b[5;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[5;28H\u001b[K\u001b[6;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[7;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[7;28H\u001b[K\u001b[8;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${OCIC_BINARY} image remove busybox:test\u001b[9;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[10;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[11;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[12;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[12;5H\u001b[K\u001b[13;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b" - ], - [ - 7.7e-05, - "[107m {\u001b[13;37H\u001b[K\u001b[14;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[15;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[15;23H\u001b[K\u001b[16;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[16;28H\u001b[K\u001b[17;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[18;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[18;23H\u001b[K\u001b[19;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[19;28H\u001b[K\u001b[20;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[21;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[21;23H\u001b[K\u001b[22;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[22;28H\u001b[K\u001b[23;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[24;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[25;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Crm -rf /tmp/busybox" - ], - [ - 0.004633, - "\u001b[26;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[27;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[28;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[28;5H\u001b[K\u001b[29;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[29;42H\u001b[K\u001b[30;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[31;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[31;23H\u001b[K\u001b[32;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[32;28H\u001b[K\u001b[33;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[34;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[34;23H\u001b[K\u001b[35;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[35;28H\u001b[K\u001b[36;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[36;32H\u001b[K\u001b[37;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[38;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$statu" - ], - [ - 2.3e-05, - "s\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[39;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[40;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[41;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[41;5H\u001b[K\u001b[42;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\u001b[42;51H\u001b[K\u001b[43;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[44;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[44;23H\u001b[K\u001b[45;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[45;28H\u001b[K\u001b[46;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[47;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[47;23H\u001b[K\u001b[48;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[48;28H\u001b[K\u001b[49;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push --disable-compression \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m oci:/tmp/oci-busybox\u001b[50;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[50;23H\u001b[K\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m" - ], - [ - 0.002017, - "  22%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m19\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.216601, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[2;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[2;23H\u001b[K\u001b[3;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[3;28H\u001b[K\u001b[4;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m containers-storage:[$ROOT]busybox:test\u001b[5;2H\u001b[96m\u001b[47m19\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[5;23H\u001b[K\u001b[6;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[6;28H\u001b[K\u001b[7;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[8;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[8;28H\u001b[K\u001b[9;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${OCIC_BINARY} image remove busybox:test\u001b[10;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[11;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[12;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[13;3H\u001b[96m\u001b[47m7\u001b[m" - ], - [ - 0.000101, - "\u001b[93m\u001b[107m\u001b[13;5H\u001b[K\u001b[14;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[14;37H\u001b[K\u001b[15;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[16;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[16;23H\u001b[K\u001b[17;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[17;28H\u001b[K\u001b[18;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[19;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[19;23H\u001b[K\u001b[20;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[20;28H\u001b[K\u001b[21;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[22;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[22;23H\u001b[K\u001b[23;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[23;28H\u001b[K\u001b[24;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[25;2H\u001b[96m" - ], - [ - 0.00763, - "\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[26;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Crm -rf /tmp/busybox\u001b[27;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[28;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[29;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[29;5H\u001b[K\u001b[30;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[30;42H\u001b[K\u001b[31;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[32;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[32;23H\u001b[K\u001b[33;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[33;28H\u001b[K\u001b[34;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[35;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[35;23H\u001b[K\u001b[36;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[36;28H\u001b[K\u001b[37;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[37;32H\u001b[K\u001b[38;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m" - ], - [ - 0.000247, - "\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[39;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[40;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[41;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[42;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[42;5H\u001b[K\u001b[43;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\u001b[43;51H\u001b[K\u001b[44;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[45;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[45;23H\u001b[K\u001b[46;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[46;28H\u001b[K\u001b[47;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[48;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[48;23H\u001b[K\u001b[49;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[49;28H\u001b[K\u001b[50;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push --disable-compression \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m oci:/tmp/oci-busybox" - ], - [ - 0.003544, - "\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  21%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.174049, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to containers/storage\"\u001b[m\u001b[93m\u001b[107m {\u001b[1;47H\u001b[K\u001b[2;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[3;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[3;23H\u001b[K\u001b[4;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[1m\u001b[31m\u001b[106m[\u001b[m\u001b[93m\u001b[107m \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 \u001b[1m\u001b[31m\u001b[106m]\u001b[m\u001b[93m\u001b[107m\u001b[4;28H\u001b[K\u001b[5;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m containers-storage:[$ROOT]busybox:test\u001b[6;2H\u001b[96m\u001b[47m19\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[6;23H\u001b[K\u001b[7;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[7;28H\u001b[K\u001b[8;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[9;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[9;28H\u001b[K\u001b[10;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${OCIC_BINARY} image remove busybox:test\u001b[11;3H\u001b[96m\u001b[47m" - ], - [ - 5.9e-05, - "4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[12;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[13;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[14;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[14;5H\u001b[K\u001b[15;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[15;37H\u001b[K\u001b[16;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[17;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[17;23H\u001b[K\u001b[18;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[18;28H\u001b[K\u001b[19;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[20;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[20;23H\u001b[K\u001b[21;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[21;28H\u001b[K\u001b[22;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[23;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[23;23H\u001b[K\u001b[24;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ " - ], - [ - 0.001075, - "\u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[24;28H\u001b[K\u001b[25;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[26;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[27;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Crm -rf /tmp/busybox\u001b[28;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[29;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[30;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[30;5H\u001b[K\u001b[31;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[31;42H\u001b[K\u001b[32;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[33;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[33;23H\u001b[K\u001b[34;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[34;28H\u001b[K\u001b[35;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[36;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[36;23H\u001b[K\u001b[37;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b" - ], - [ - 5.3e-05, - "[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[37;28H\u001b[K\u001b[38;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[38;32H\u001b[K\u001b[39;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[40;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[41;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[42;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[43;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[43;5H\u001b[K\u001b[44;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\u001b[44;51H\u001b[K\u001b[45;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[46;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[46;23H\u001b[K\u001b[47;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[47;28H\u001b[K\u001b[48;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[49;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[49;23H\u001b[K\u001b[50;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m" - ], - [ - 0.008247, - "\u001b[93m\u001b[107m -eq 0 ]\u001b[50;28H\u001b[K\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  20%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 2.125968, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[1;5H\u001b[K\u001b[2;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to containers/storage\"\u001b[m\u001b[93m\u001b[107m {\u001b[2;47H\u001b[K\u001b[3;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[4;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[4;23H\u001b[K\u001b[5;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[5;28H\u001b[K\u001b[6;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m containers-storage:[$ROOT]busybox:test\u001b[7;2H\u001b[96m\u001b[47m19\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[7;23H\u001b[K\u001b[8;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[8;28H\u001b[K\u001b[9;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[10;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[10;28H\u001b[K\u001b[11;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${OCIC_BINARY} image remove busybox:test\u001b[12;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b" - ], - [ - 7.1e-05, - "[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[13;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[14;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[15;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[15;5H\u001b[K\u001b[16;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[16;37H\u001b[K\u001b[17;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[18;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[18;23H\u001b[K\u001b[19;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[19;28H\u001b[K\u001b[20;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[21;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[21;23H\u001b[K\u001b[22;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[22;28H\u001b[K\u001b[23;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[24;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[24;23H\u001b[K\u001b[25;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"" - ], - [ - 0.007072, - "\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[25;28H\u001b[K\u001b[26;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[27;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[28;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Crm -rf /tmp/busybox\u001b[29;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[30;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[31;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[31;5H\u001b[K\u001b[32;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[32;42H\u001b[K\u001b[33;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[34;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[34;23H\u001b[K\u001b[35;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[35;28H\u001b[K\u001b[36;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[37;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[37;23H\u001b[K\u001b[38;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[3" - ], - [ - 4.4e-05, - "6m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[38;28H\u001b[K\u001b[39;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[39;32H\u001b[K\u001b[40;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[41;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[42;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[43;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[44;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[44;5H\u001b[K\u001b[45;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\u001b[45;51H\u001b[K\u001b[46;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[47;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[47;23H\u001b[K\u001b[48;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[48;28H\u001b[K\u001b[49;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[50;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[50;23H\u001b[K\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  18%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m" - ], - [ - 0.002085, - "\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.16733, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[2;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[2;5H\u001b[K\u001b[3;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to containers/storage\"\u001b[m\u001b[93m\u001b[107m {\u001b[3;47H\u001b[K\u001b[4;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[5;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[5;23H\u001b[K\u001b[6;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[6;28H\u001b[K\u001b[7;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m containers-storage:[$ROOT]busybox:test\u001b[8;2H\u001b[96m\u001b[47m19\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[8;23H\u001b[K\u001b[9;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[9;28H\u001b[K\u001b[10;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[11;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[11;28H\u001b[K\u001b[12;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${OCIC_BINARY} image remove busybox:" - ], - [ - 8.8e-05, - "test\u001b[13;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[14;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[15;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[16;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[16;5H\u001b[K\u001b[17;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[17;37H\u001b[K\u001b[18;2H\u001b[96m\u001b[47m29\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[19;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[19;23H\u001b[K\u001b[20;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[20;28H\u001b[K\u001b[21;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/busybox\u001b[22;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[22;23H\u001b[K\u001b[23;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[23;28H\u001b[K\u001b[24;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[25;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[25;23H\u001b[K\u001b[26;3H\u001b[96m" - ], - [ - 0.007373, - "\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[26;28H\u001b[K\u001b[27;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[28;2H\u001b[96m\u001b[47m39\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[29;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5Crm -rf /tmp/busybox\u001b[30;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[31;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[32;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[32;5H\u001b[K\u001b[33;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[33;42H\u001b[K\u001b[34;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[35;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[35;23H\u001b[K\u001b[36;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[36;28H\u001b[K\u001b[37;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[38;2H\u001b[96m\u001b[47m49\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[38;23H\u001b[K\u001b" - ], - [ - 2.1e-05, - "[39;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[39;28H\u001b[K\u001b[40;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[6Cm /tmp/busybox-archive\u001b[40;32H\u001b[K\u001b[41;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[42;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[43;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C stop_crio\u001b[44;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[45;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[45;5H\u001b[K\u001b[46;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[1C@test \u001b[36m\"kpod push to oci without compression\"\u001b[m\u001b[93m\u001b[107m {\u001b[46;51H\u001b[K\u001b[47;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[5Crun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[48;2H\u001b[96m\u001b[47m59\u001b[m\u001b[93m\u001b[107m\u001b[5Cecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[48;23H\u001b[K\u001b[49;3H\u001b[96m\u001b[47m0\u001b[m\u001b[93m\u001b[107m\u001b[5C[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[49;28H\u001b[K\u001b[50;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[5Crun mkdir /tmp/oci-busybox\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  17%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;9H\u001b[?1" - ], - [ - 1e-05, - "2l\u001b[?25h" - ], - [ - 0.244303, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  18%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[5;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.495597, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[6;9H\u001b[1m\u001b[31m\u001b[106m[\u001b[17C]\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  20%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[6;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.0243, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m[ \u001b[16C]\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  21%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[7;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.030791, - "\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  22%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[8;9H" - ], - [ - 0.034862, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[9;9H\u001b[1m\u001b[31m\u001b[106m[\u001b[17C]\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  23%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m20\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[9;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.025609, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m[ \u001b[16C]\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  24%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[10;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.03403, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[11;9H\u001b[1m\u001b[31m\u001b[106m[\u001b[17C]\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;70m\u001b[48;5;240m  25%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[11;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.346484, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m[ \u001b[16C]\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  26%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[12;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.422162, - "\u001b[51;209H48\u001b[12;52H" - ], - [ - 0.227309, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[34m-- INSERT --\u001b[m\u001b[93m\u001b[107m\u001b[52;13H\u001b[K" - ], - [ - 0.018402, - "\u001b[51;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[51;12H kpod-test-refactor \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[51;33H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mtest/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mkpod_push.bats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[m\u001b[93m\u001b[107m\u001b[51;54H\u001b[38;5;31m\u001b[48;5;24m\u001b[51;55H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                      \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;180H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;188H\u001b[38;5;117m\u001b[48;5;24m conf\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;149m\u001b[48;5;31m  26%\u001b[m\u001b[" - ], - [ - 2.9e-05, - "93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 23\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:49 \u001b[12;53H\u001b[?12l\u001b[?25h" - ], - [ - 0.11896, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;52H\u001b[K\u001b[51;53H\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[51;57H \u001b[m\u001b[93m\u001b[107m\u001b[152C\u001b[38;5;22m\u001b[48;5;117m8\u001b[12;52H\u001b[?12l\u001b[?25h" - ], - [ - 0.496088, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;51H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[12;51H\u001b[?12l\u001b[?25h" - ], - [ - 0.031493, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;50H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[12;50H\u001b[?12l\u001b[?25h" - ], - [ - 0.027468, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;49H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[12;49H\u001b[?12l\u001b[?25h" - ], - [ - 0.033381, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;48H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[12;48H\u001b[?12l\u001b[?25h" - ], - [ - 0.034028, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;47H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[12;47H\u001b[?12l\u001b[?25h" - ], - [ - 0.027933, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;46H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[12;46H\u001b[?12l\u001b[?25h" - ], - [ - 0.032743, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;45H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;45H\u001b[?12l\u001b[?25h" - ], - [ - 0.030113, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;44H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m0\u001b[12;44H\u001b[?12l\u001b[?25h" - ], - [ - 0.034589, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;43H\u001b[K\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m39\u001b[12;43H\u001b[?12l\u001b[?25h" - ], - [ - 0.023953, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;42H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[12;42H\u001b[?12l\u001b[?25h" - ], - [ - 0.192014, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;41H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[12;41H\u001b[?12l\u001b[?25h" - ], - [ - 0.74866, - "\u001b[?25l\u001b[51;210H6\u001b[12;40H\u001b[?12l\u001b[?25h" - ], - [ - 0.498543, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;39H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[12;39H\u001b[?12l\u001b[?25h" - ], - [ - 0.025782, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;38H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[12;38H\u001b[?12l\u001b[?25h" - ], - [ - 0.031784, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;37H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[12;37H\u001b[?12l\u001b[?25h" - ], - [ - 0.03173, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;36H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[12;36H\u001b[?12l\u001b[?25h" - ], - [ - 0.029779, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;35H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;35H\u001b[?12l\u001b[?25h" - ], - [ - 0.034013, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;34H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m0\u001b[12;34H\u001b[?12l\u001b[?25h" - ], - [ - 0.028916, - "\u001b[?25l\u001b[51;209H29\u001b[12;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.028505, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;32H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[12;32H\u001b[?12l\u001b[?25h" - ], - [ - 0.029212, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;31H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[12;31H\u001b[?12l\u001b[?25h" - ], - [ - 0.035421, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;30H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[12;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.030293, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;29H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[12;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.030509, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;28H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[12;28H\u001b[?12l\u001b[?25h" - ], - [ - 0.029979, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;14H\u001b[1m\u001b[31m\u001b[106m{\u001b[11C}\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[12;27H\u001b[?12l\u001b[?25h" - ], - [ - 0.03119, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;26H\u001b[K\u001b[12;14H{O\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[12;26H\u001b[?12l\u001b[?25h" - ], - [ - 0.032258, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;25H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;25H\u001b[?12l\u001b[?25h" - ], - [ - 0.02545, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;24H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m0\u001b[12;24H\u001b[?12l\u001b[?25h" - ], - [ - 0.035798, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;23H\u001b[K\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m19\u001b[12;23H\u001b[?12l\u001b[?25h" - ], - [ - 0.02867, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;22H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[12;22H\u001b[?12l\u001b[?25h" - ], - [ - 0.031704, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;21H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[12;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.0333, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;20H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[12;20H\u001b[?12l\u001b[?25h" - ], - [ - 0.024988, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;19H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[12;19H\u001b[?12l\u001b[?25h" - ], - [ - 0.02881, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;18H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[12;18H\u001b[?12l\u001b[?25h" - ], - [ - 0.033247, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;17H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[12;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.032484, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;16H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[12;16H\u001b[?12l\u001b[?25h" - ], - [ - 0.031784, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;15H\u001b[K\u001b[12;14H\u001b[1m\u001b[31m\u001b[106m{\u001b[15;5H}\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;15H\u001b[?12l\u001b[?25h" - ], - [ - 0.453651, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;14H\u001b[K\u001b[15;5H}\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m0\u001b[12;14H\u001b[?12l\u001b[?25h" - ], - [ - 0.192402, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;13H\u001b[K\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m9 \u001b[12;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.437798, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mk\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m10\u001b[12;14H\u001b[?12l\u001b[?25h" - ], - [ - 0.254702, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mp\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;15H\u001b[?12l\u001b[?25h" - ], - [ - 0.30839, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;14H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m0\u001b[12;14H\u001b[?12l\u001b[?25h" - ], - [ - 0.132923, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;13H\u001b[K\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m9 \u001b[12;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.318686, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m$\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m10\u001b[12;14H\u001b[?12l\u001b[?25h" - ], - [ - 0.283056, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m(\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;15H\u001b[?12l\u001b[?25h" - ], - [ - 0.330165, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;14H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m0\u001b[12;14H\u001b[?12l\u001b[?25h" - ], - [ - 0.516215, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{\b\u001b[1m\u001b[31m\u001b[106m{\u001b[15;5H}\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;15H\u001b[?12l\u001b[?25h" - ], - [ - 0.658201, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mK\b\b{K\u001b[15;5H}\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[12;16H\u001b[?12l\u001b[?25h" - ], - [ - 0.143261, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mP\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[12;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.123887, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mO\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[12;18H\u001b[?12l\u001b[?25h" - ], - [ - 0.169592, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mD\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[12;19H\u001b[?12l\u001b[?25h" - ], - [ - 0.345485, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m_\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[12;20H\u001b[?12l\u001b[?25h" - ], - [ - 0.181625, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mB\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[12;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.084398, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mI\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[12;22H\u001b[?12l\u001b[?25h" - ], - [ - 0.111056, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mN\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[12;23H\u001b[?12l\u001b[?25h" - ], - [ - 0.128088, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mA\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m20\u001b[12;24H\u001b[?12l\u001b[?25h" - ], - [ - 0.131002, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mR\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;25H\u001b[?12l\u001b[?25h" - ], - [ - 0.165526, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mY\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[12;26H\u001b[?12l\u001b[?25h" - ], - [ - 0.657525, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m}\u001b[12;14H\u001b[1m\u001b[31m\u001b[106m{\u001b[11C}\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[12;27H\u001b[?12l\u001b[?25h" - ], - [ - 0.183629, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;14H{K\u001b[10C} \u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[12;28H\u001b[?12l\u001b[?25h" - ], - [ - 0.447484, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m$\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[12;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.17344, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mK\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[12;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.115871, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mP\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[12;31H\u001b[?12l\u001b[?25h" - ], - [ - 0.090564, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mO\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[12;32H\u001b[?12l\u001b[?25h" - ], - [ - 0.114158, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mD\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[12;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.141443, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m_\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m30\u001b[12;34H\u001b[?12l\u001b[?25h" - ], - [ - 0.23065, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mO\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;35H\u001b[?12l\u001b[?25h" - ], - [ - 0.144279, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mP\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[12;36H\u001b[?12l\u001b[?25h" - ], - [ - 0.096025, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mT\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[12;37H\u001b[?12l\u001b[?25h" - ], - [ - 0.093663, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mI\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[12;38H\u001b[?12l\u001b[?25h" - ], - [ - 0.079633, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mO\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[12;39H\u001b[?12l\u001b[?25h" - ], - [ - 0.081562, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mN\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[12;40H\u001b[?12l\u001b[?25h" - ], - [ - 0.084293, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mS\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[12;41H\u001b[?12l\u001b[?25h" - ], - [ - 0.137772, - "\u001b[?25l\u001b[51;210H8\u001b[12;42H\u001b[?12l\u001b[?25h" - ], - [ - 0.094796, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mr\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[12;43H\u001b[?12l\u001b[?25h" - ], - [ - 0.14112, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mm\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m40\u001b[12;44H\u001b[?12l\u001b[?25h" - ], - [ - 0.100405, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mi\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[12;45H\u001b[?12l\u001b[?25h" - ], - [ - 0.104584, - "\u001b[?25l\u001b[51;210H2\u001b[12;46H\u001b[?12l\u001b[?25h" - ], - [ - 0.138653, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m$\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[12;47H\u001b[?12l\u001b[?25h" - ], - [ - 0.344074, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;46H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[12;46H\u001b[?12l\u001b[?25h" - ], - [ - 0.199704, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\"\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[12;47H\u001b[?12l\u001b[?25h" - ], - [ - 0.127088, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m$\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[12;48H\u001b[?12l\u001b[?25h" - ], - [ - 0.143183, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mK\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[12;49H\u001b[?12l\u001b[?25h" - ], - [ - 0.328141, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;48H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[12;48H\u001b[?12l\u001b[?25h" - ], - [ - 0.170702, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mI\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[12;49H\u001b[?12l\u001b[?25h" - ], - [ - 0.084435, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mM\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[12;50H\u001b[?12l\u001b[?25h" - ], - [ - 0.098537, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mA\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[12;51H\u001b[?12l\u001b[?25h" - ], - [ - 0.163497, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mG\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[12;52H\u001b[?12l\u001b[?25h" - ], - [ - 0.105928, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mE\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[12;53H\u001b[?12l\u001b[?25h" - ], - [ - 0.145464, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[12;46H\u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m50\u001b[12;54H\u001b[?12l\u001b[?25h" - ], - [ - 0.391443, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K" - ], - [ - 0.007246, - "\u001b[51;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H kpod-test-refactor \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;33H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mtest/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mkpod_push.bats\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[51;56H\u001b[38;5;240m\u001b[48;5;236m\u001b[51;57H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                    \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;180H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;188H\u001b[38;5;247m\u001b[48;5;236m conf\u001b" - ], - [ - 2.4e-05, - "[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  26%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 23\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:49 \u001b[12;53H\u001b[?12l\u001b[?25h" - ], - [ - 0.297107, - "\u001b[?25l\u001b[52;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.311391, - "w\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.071982, - "q" - ], - [ - 7.5e-05, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.127973, - "\r" - ], - [ - 0.000142, - "\u001b[?25l\u001b[?2004l" - ], - [ - 0.000165, - "\"test/kpod_push.bats\"" - ], - [ - 0.006449, - " 87L, 2372C written" - ], - [ - 0.013487, - "\r\r\r\n\u001b[39;49m\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.002115, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.022207, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-test-refactor* \u001b[39m \u001b[33m25s\u001b[39m\r\n" - ], - [ - 0.000932, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000102, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000173, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.1e-05, - "\u001b[?1h\u001b=" - ], - [ - 1.9e-05, - "\u001b[?2004h" - ], - [ - 0.394575, - "g" - ], - [ - 0.216017, - "\bgi" - ], - [ - 0.088382, - "t" - ], - [ - 0.103545, - " " - ], - [ - 0.128191, - "c" - ], - [ - 0.000178, - "o" - ], - [ - 0.079694, - "m" - ], - [ - 0.279792, - "m" - ], - [ - 0.248528, - "i" - ], - [ - 0.111816, - "t" - ], - [ - 0.087626, - " " - ], - [ - 0.128727, - "-" - ], - [ - 0.095251, - "a" - ], - [ - 0.096123, - " " - ], - [ - 0.104484, - "-" - ], - [ - 0.143571, - "-" - ], - [ - 0.087942, - "a" - ], - [ - 0.104175, - "m" - ], - [ - 0.112113, - "e" - ], - [ - 0.103848, - "n" - ], - [ - 0.112584, - "d" - ], - [ - 0.247762, - "\u001b[?1l\u001b>" - ], - [ - 0.000208, - "\u001b[?2004l\r\r\n" - ], - [ - 0.005064, - "\u001b]2;git commit -a --amend\u0007\u001b]1;git\u0007" - ], - [ - 0.032749, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000317, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"~/Development/Go/src/github.com/kubernetes-incubator/cri-o/.git/COMMIT_EDITMSG\"" - ], - [ - 5.3e-05, - " 24L, 716C" - ], - [ - 0.000154, - "\u001b[1;1HRefactor kpod tests\r\n\r\nMove kpod tests from kpod.bats to kpod_[commandname].bats\r\n\r\nSigned-off-by: Ryan Cole \r\n\r\n# Please enter the commit message for your changes. Lines starting\r\n# with '#' will be ignored, and an empty message aborts the commit.\r\n#\r\n# Date: Mon Aug 14 09:15:22 2017 -0400\r\n#\r\n# On branch kpod-test-refactor\r\n# Changes to be committed:\r\n#\u001b[7Cdeleted: test/kpod.bats\r\n#\u001b[7Cmodified: test/kpod_diff.bats\r\n#\u001b[7Cnew file: test/kpod_history.bats\r\n#\u001b[7Cnew file: test/kpod_images.bats\r\n#\u001b[7Cnew file: test/kpod_inspect.bats\r\n#\u001b[7Cmodified: test/kpod_load.bats\r\n#\u001b[7Cnew file: test/kpod_pull.bats\r\n#\u001b[7Cnew file: test/kpod_push.bats\r\n#\u001b[7Cmodified: test/kpod_save.bats\r\n#\u001b[7Cnew file: test/kpod_version.bats\r\n#\r\n\u001b[94m~ \u001b[26;1H~ " - ], - [ - 2.5e-05, - " \u001b[27;1H~ \u001b[28;1H~ \u001b[29;1H~ \u001b[30;1H~ " - ], - [ - 5.1e-05, - " \u001b[31;1H~ \u001b[32;1H~ \u001b[33;1H~ \u001b[34;1H~ \u001b[35;1H~ " - ], - [ - 1.9e-05, - " \u001b[36;1H~ \u001b[37;1H~ \u001b[38;1H~ \u001b[39;1H~ \u001b[40;1H~ " - ], - [ - 1.7e-05, - " \u001b[41;1H~ \u001b[42;1H~ \u001b[43;1H~ \u001b[44;1H~ " - ], - [ - 1.6e-05, - " \u001b[45;1H~ \u001b[46;1H~ \u001b[47;1H~ \u001b[48;1H~ \u001b[49;1H~ " - ], - [ - 1.8e-05, - " \u001b[50;1H~ \u001b[51;1H~ \u001b[1;1H\u001b[?12l\u001b[?25h" - ], - [ - 0.25699, - "\u001b[?25l\u001b[m\u001b[52;1H\u001b[K\u001b[52;1H:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.159615, - "w" - ], - [ - 0.06399, - "q" - ], - [ - 0.072292, - "\r\u001b[?25l\u001b[?2004l\".git/COMMIT_EDITMSG\"" - ], - [ - 0.006984, - " 24L, 716C written" - ], - [ - 0.000191, - "\r\r\r\n\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.004761, - "[kpod-test-refactor 72c6c49b] Refactor kpod tests\r\n Date: Mon Aug 14 09:15:22 2017 -0400\r\n 10 files changed, 335 insertions(+), 253 deletions(-)\r\n delete mode 100644 test/kpod.bats\r\n create mode 100644 test/kpod_history.bats\r\n create mode 100644 test/kpod_images.bats\r\n create mode 100644 test/kpod_inspect.bats\r\n create mode 100644 test/kpod_pull.bats\r\n create mode 100644 test/kpod_push.bats\r\n create mode 100644 test/kpod_version.bats\r\n\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.02706, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-test-refactor \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.003213, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000199, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.9e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000122, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 5.3e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.6e-05, - "\u001b[?2004h" - ], - [ - 0.141189, - "g" - ], - [ - 0.128024, - "\bgi" - ], - [ - 0.071759, - "t" - ], - [ - 0.10427, - " " - ], - [ - 0.096092, - "p" - ], - [ - 0.09589, - "u" - ], - [ - 0.112424, - "s" - ], - [ - 0.1201, - "h" - ], - [ - 0.111109, - " " - ], - [ - 0.088607, - "-" - ], - [ - 0.119704, - "f" - ], - [ - 0.128014, - " " - ], - [ - 0.11191, - "o" - ], - [ - 0.112657, - "r" - ], - [ - 0.103811, - "i" - ], - [ - 0.119689, - "g" - ], - [ - 0.096661, - "i" - ], - [ - 0.039472, - "n" - ], - [ - 0.095541, - " " - ], - [ - 0.12086, - "k" - ], - [ - 0.085614, - "pod-" - ], - [ - 0.185148, - "t" - ], - [ - 0.048017, - "e" - ], - [ - 0.205899, - "st-refactor\u001b[1m \u001b[0m" - ], - [ - 0.179325, - "\b\u001b[0m r" - ], - [ - 0.112012, - "e" - ], - [ - 0.271558, - "\b \b" - ], - [ - 0.127775, - "\b \b" - ], - [ - 0.160077, - "\b" - ], - [ - 0.144317, - "\b \b" - ], - [ - 0.455941, - "r" - ], - [ - 0.336133, - "\u001b[?1l\u001b>" - ], - [ - 0.001355, - "\u001b[?2004l\r\r\n" - ], - [ - 0.003918, - "\u001b]2;git push -f origin kpod-test-refactor\u0007\u001b]1;git\u0007" - ], - [ - 1.183521, - "Counting objects: 12, done.\r\n" - ], - [ - 0.000161, - "Delta compression using up to 4 threads.\r\n" - ], - [ - 8.9e-05, - "Compressing objects: 8% (1/12) \rCompressing objects: 16% (2/12) \r" - ], - [ - 0.00016, - "Compressing objects: 25% (3/12) \r" - ], - [ - 2.3e-05, - "Compressing objects: 33% (4/12) \r" - ], - [ - 3.7e-05, - "Compressing objects: 41% (5/12) \r" - ], - [ - 6.9e-05, - "Compressing objects: 50% (6/12) \r" - ], - [ - 0.000102, - "Compressing objects: 58% (7/12) \r" - ], - [ - 5.4e-05, - "Compressing objects: 66% (8/12) \r" - ], - [ - 8.2e-05, - "Compressing objects: 75% (9/12) \r" - ], - [ - 5.7e-05, - "Compressing objects: 83% (10/12) \r" - ], - [ - 5.8e-05, - "Compressing objects: 91% (11/12) \r" - ], - [ - 5.4e-05, - "Compressing objects: 100% (12/12) \r" - ], - [ - 4.4e-05, - "Compressing objects: 100% (12/12), done.\r\n" - ], - [ - 0.000167, - "Writing objects: 8% (1/12) \r" - ], - [ - 5.7e-05, - "Writing objects: 16% (2/12) \r" - ], - [ - 0.000171, - "Writing objects: 25% (3/12) \r" - ], - [ - 5e-05, - "Writing objects: 33% (4/12) \r" - ], - [ - 0.000129, - "Writing objects: 41% (5/12) \r" - ], - [ - 0.000182, - "Writing objects: 58% (7/12) \r" - ], - [ - 5.9e-05, - "Writing objects: 66% (8/12) \r" - ], - [ - 7e-05, - "Writing objects: 75% (9/12) \r" - ], - [ - 7.9e-05, - "Writing objects: 83% (10/12) \r" - ], - [ - 3.8e-05, - "Writing objects: 91% (11/12) \r" - ], - [ - 4.3e-05, - "Writing objects: 100% (12/12) \r" - ], - [ - 4.1e-05, - "Writing objects: 100% (12/12), 2.57 KiB | 2.57 MiB/s, done.\r\nTotal 12 (delta 9), reused 0 (delta 0)\r\n" - ], - [ - 0.086353, - "remote: Resolving deltas: 0% (0/9) \u001b[K\r" - ], - [ - 0.040457, - "remote: Resolving deltas: 22% (2/9) \u001b[K\rremote: Resolving deltas: 44% (4/9) \u001b[K\rremote: Resolving deltas: 55% (5/9) \u001b[K\rremote: Resolving deltas: 66% (6/9) \u001b[K\rremote: Resolving deltas: 77% (7/9) \u001b[K\rremote: Resolving deltas: 88% (8/9) \u001b[K\rremote: Resolving deltas: 100% (9/9) \u001b[K\rremote: Resolving deltas: 100% (9/9), completed with 5 local objects.\u001b[K\r\n" - ], - [ - 1.343638, - "To github.com:14rcole/cri-o\r\n + 9327604d...72c6c49b kpod-test-refactor -> kpod-test-refactor (forced update)\r\n" - ], - [ - 0.003519, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.027554, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-test-refactor \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.002119, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000223, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 2.1e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 8.9e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.1e-05, - "\u001b[?2004h" - ], - [ - 73.029405, - "g" - ], - [ - 0.116068, - "\bgi" - ], - [ - 0.083652, - "t" - ], - [ - 0.111933, - " " - ], - [ - 0.116198, - "c" - ], - [ - 0.095668, - "h" - ], - [ - 0.100371, - "e" - ], - [ - 0.075276, - "c" - ], - [ - 0.069067, - "k" - ], - [ - 0.155349, - "o" - ], - [ - 0.080863, - "u" - ], - [ - 0.107261, - "t" - ], - [ - 0.143931, - " " - ], - [ - 6.94391, - "k" - ], - [ - 0.367099, - "pod-" - ], - [ - 0.46577, - "r" - ], - [ - 0.096061, - "e" - ], - [ - 0.095387, - "n" - ], - [ - 0.18511, - "ame\u001b[1m \u001b[0m" - ], - [ - 0.335066, - "\b\u001b[0m \b" - ], - [ - 0.00018, - "\u001b[?1l\u001b>" - ], - [ - 8.1e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004082, - "\u001b]2;git checkout kpod-rename\u0007\u001b]1;git\u0007" - ], - [ - 0.040772, - "Switched to branch 'kpod-rename'\r\n" - ], - [ - 0.000659, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.028625, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-rename \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.002148, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 8.7e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000293, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 5.1e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.6e-05, - "\u001b[?2004h" - ], - [ - 2.266962, - "2" - ], - [ - 0.199874, - "\b27" - ], - [ - 0.248374, - "G" - ], - [ - 0.479833, - "\b \b" - ], - [ - 0.151084, - "\b\b2 \b" - ], - [ - 0.168998, - "\b \b" - ], - [ - 0.976089, - "v" - ], - [ - 0.102867, - "\bvi" - ], - [ - 0.05621, - " " - ], - [ - 0.096671, - "t" - ], - [ - 0.072195, - "e" - ], - [ - 0.167866, - "t" - ], - [ - 0.199366, - "k" - ], - [ - 0.351527, - "\b \b" - ], - [ - 0.113612, - "\u0007" - ], - [ - 0.454797, - "\b \b" - ], - [ - 0.11181, - "s" - ], - [ - 0.178442, - "t\u001b[1m/\u001b[0m" - ], - [ - 0.39881, - "\b\u001b[0m/k" - ], - [ - 0.096645, - "pod" - ], - [ - 0.622458, - "\b \b" - ], - [ - 0.616269, - "f" - ], - [ - 0.424159, - "\b \b" - ], - [ - 0.136035, - "d" - ], - [ - 0.191761, - "_" - ], - [ - 0.255541, - "r" - ], - [ - 0.096112, - "e" - ], - [ - 0.224777, - "n" - ], - [ - 0.139367, - "ame.bats\u001b[1m \u001b[0m" - ], - [ - 0.909463, - "\b\u001b[0m \b\u001b[?1l\u001b>\u001b[?2004l\r\r\n" - ], - [ - 0.001881, - "\u001b]2;vim test/kpod_rename.bats\u0007\u001b]1;vi\u0007" - ], - [ - 0.132692, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000558, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"test/kpod_rename.bats\"" - ], - [ - 0.000156, - " 35L, 907C" - ], - [ - 0.003097, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.000674, - "\u001b[1;1H\u001b[96m\u001b[47m 1 \u001b[m\u001b[93m\u001b[107m\u001b[96m#!/usr/bin/env bats\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 2 \r\n 3 \u001b[m\u001b[93m\u001b[107mload helpers\r\n\u001b[96m\u001b[47m 4 \r\n 5 \u001b[m\u001b[93m\u001b[107mIMAGE=\u001b[36m\"redis:alpine\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 6 \u001b[m\u001b[93m\u001b[107mROOT=\u001b[36m\"$TESTDIR/crio\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 7 \u001b[m\u001b[93m\u001b[107mRUNROOT=\u001b[36m\"$TESTDIR/crio-run\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 8 \u001b[m\u001b[93m\u001b[107mKPOD_OPTIONS=\u001b[36m\"--root $ROOT --runroot $RUNROOT $STORAGE_OPTS\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 9 \u001b[m\u001b[93m\u001b[107mNEW_NAME=\u001b[36m\"rename-test\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 10 \r\n 11 \u001b[m\u001b[93m\u001b[107mfunction teardown() {\r\n\u001b[96m\u001b[47m 12 \u001b[m\u001b[93m\u001b[107m cleanup_test\r\n\u001b[96m\u001b[47m 13 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 14 \r\n 15 \u001b[m\u001b[93m\u001b[107m@test \u001b[36m\"kpod rename successful\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 16 \u001b[m\u001b[93m\u001b[107m start_crio\r\n\u001b[96m\u001b[47m 17 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE\r\n\u001b[96m\u001b[47m 18 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m run crioctl pod run --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m" - ], - [ - 1.2e-05, - "\u001b[107m/sandbox_config.json\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m pod_id=\u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m run ${OCIC_BINARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m ctr_id=\u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS rename \u001b[36m\"$ctr_id\"\u001b[m\u001b[93m\u001b[107m \u001b[36m\"$NEW_NAME\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 28 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS inspect \u001b[36m\"$ctr_id\"\u001b[m\u001b[93m\u001b[107m --format {{.Name}}\r\n\u001b[96m\u001b[47m 29 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 30 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 31 " - ], - [ - 7.9e-05, - "\u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m == \u001b[36m\"$NEW_NAME\"\u001b[m\u001b[93m\u001b[107m ]\r\n\u001b[96m\u001b[47m 32 \u001b[m\u001b[93m\u001b[107m cleanup_ctrs\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m cleanup_pods\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m stop_crio\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[1m\u001b[94m~ \u001b[37;1H~ \u001b[38;1H~ \u001b[39;1H~ " - ], - [ - 1.1e-05, - " \u001b[40;1H~ \u001b[41;1H~ \u001b[42;1H~ \u001b[43;1H~ \u001b[44;1H~ " - ], - [ - 0.029235, - " \u001b[45;1H~ \u001b[46;1H~ \u001b[47;1H~ \u001b[48;1H~ " - ], - [ - 9.4e-05, - " \u001b[49;1H~ \u001b[50;1H~ \u001b[m\u001b[93m\u001b[107m\u001b[51;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H kpod-rename \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;26H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mtest/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mkpod_rename.bats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;50H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                            " - ], - [ - 0.011325, - "                                               \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;180H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;188H\u001b[38;5;247m\u001b[48;5;236m conf\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 19\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5  \u001b[19;9H\u001b[?12l\u001b[?25h\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 2.388427, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[27;9H\u001b[1m\u001b[31m\u001b[106m[\u001b[17C]\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  77%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m27\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[27;9H\u001b[?12l\u001b[?25h" - ], - [ - 2.299389, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m[ \u001b[16C]\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  74%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[26;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.528255, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[27;9H\u001b[1m\u001b[31m\u001b[106m[\u001b[17C]\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  77%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[27;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.321714, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m[ \u001b[16C]\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  74%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[26;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.403252, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  71%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[25;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.215177, - "\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  74%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[26;9H" - ], - [ - 27.227284, - "\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  71%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[25;9H" - ], - [ - 0.172124, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  69%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[24;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.422664, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  66%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[23;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.245489, - "\u001b[51;209H4\u001b[23;8H" - ], - [ - 0.280877, - "\u001b[51;209H5\u001b[23;9H" - ], - [ - 0.495366, - "\u001b[51;209H6\u001b[23;10H" - ], - [ - 0.032687, - "\u001b[51;209H7\u001b[23;11H" - ], - [ - 0.030514, - "\u001b[51;209H8\u001b[23;12H" - ], - [ - 0.031651, - "\u001b[51;209H9\u001b[23;13H" - ], - [ - 0.033388, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m$\u001b[1m\u001b[31m\u001b[106m{\u001b[11C}\u001b[m\u001b[93m\u001b[107m\u001b[51;209H\u001b[38;5;22m\u001b[48;5;252m10\u001b[23;14H\u001b[?12l\u001b[?25h" - ], - [ - 0.027913, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{O\u001b[10C} \u001b[51;210H\u001b[38;5;22m\u001b[48;5;252m1\u001b[23;15H\u001b[?12l\u001b[?25h" - ], - [ - 0.155223, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[31m\u001b[106m{\u001b[11C}\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;252m0\u001b[23;14H\u001b[?12l\u001b[?25h" - ], - [ - 0.174127, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{O\u001b[10C} \u001b[51;209H\u001b[38;5;22m\u001b[48;5;252m9 \u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.191338, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107mOCIC_BINARY\u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;96H\u001b[K\u001b[51;48H\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;52H \u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.500617, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mOCIC_BINARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;95H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.021176, - "\u001b[?25lCIC_BINARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;94H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.035102, - "\u001b[?25lIC_BINARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;93H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.028745, - "\u001b[?25lC_BINARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;92H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.026326, - "\u001b[?25l_BINARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;91H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.034424, - "\u001b[?25lBINARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;90H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.029844, - "\u001b[?25lINARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;89H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.031125, - "\u001b[?25lNARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;88H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.03287, - "\u001b[?25lARY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;87H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.196796, - "\u001b[?25lRY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;86H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.182819, - "\u001b[?25lY} ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;85H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.433786, - "\u001b[?25l\u001b[15;36H\u001b[1m\u001b[31m\u001b[106m{\u001b[23;13H}\u001b[m\u001b[93m\u001b[107m ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;84H\u001b[K\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.196117, - "\u001b[?25l\u001b[52;1H\u001b[34m-- INSERT --\u001b[m\u001b[93m\u001b[107m\u001b[52;13H\u001b[K" - ], - [ - 0.015372, - "\u001b[23;13H\u001b[1m\u001b[31m\u001b[106m \u001b[m\u001b[93m\u001b[107mctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[23;83H\u001b[K\u001b[15;36H{\u001b[23;13H c\u001b[51;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[51;12H kpod-rename \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[51;26H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mtest/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mkpod_rename.bats\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[m\u001b[93m\u001b[107m\u001b[51;51H\u001b[38;5;31m\u001b[48;5;24m\u001b[51;52H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                         \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107" - ], - [ - 4.8e-05, - "m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;180H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;188H\u001b[38;5;117m\u001b[48;5;24m conf\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;186m\u001b[48;5;31m  66%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 23\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:9  \u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.860848, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mc ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m10\u001b[23;14H\u001b[?12l\u001b[?25h" - ], - [ - 0.195551, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mr ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[23;15H\u001b[?12l\u001b[?25h" - ], - [ - 0.048944, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mi ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[23;16H\u001b[?12l\u001b[?25h" - ], - [ - 0.106817, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mo ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[23;17H\u001b[?12l\u001b[?25h" - ], - [ - 0.070042, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mc ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[23;18H\u001b[?12l\u001b[?25h" - ], - [ - 0.207299, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mt ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[23;19H\u001b[?12l\u001b[?25h" - ], - [ - 0.084867, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107ml ctr create --config \u001b[36m\"$TESTDATA\"\u001b[m\u001b[93m\u001b[107m/container_config.json --pod \u001b[36m\"$pod_id\"\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[23;20H\u001b[?12l\u001b[?25h" - ], - [ - 0.230817, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K" - ], - [ - 0.005085, - "\u001b[51;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H kpod-rename \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;26H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mtest/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mkpod_rename.bats\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[51;51H\u001b[38;5;240m\u001b[48;5;236m\u001b[51;52H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                         \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;180H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;188H\u001b[38;5;247m\u001b[48;5;236m " - ], - [ - 4.1e-05, - "conf\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  66%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 23\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:15 \u001b[23;19H\u001b[?12l\u001b[?25h" - ], - [ - 0.30678, - "\u001b[?25l\u001b[52;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.127132, - "w" - ], - [ - 3.3e-05, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.056701, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.079826, - "\r" - ], - [ - 0.000364, - "\u001b[?25l\u001b[?2004l\"test/kpod_rename.bats\"" - ], - [ - 0.012084, - " 35L, 900C written" - ], - [ - 0.014515, - "\r\r\r\n\u001b[39;49m\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.002629, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024698, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-rename* \u001b[39m \u001b[33m40s\u001b[39m\r\n" - ], - [ - 0.000938, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000106, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 9.8e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 1.6e-05, - "\u001b[?2004h" - ], - [ - 1.169246, - "g" - ], - [ - 0.271442, - "\bgi" - ], - [ - 0.079187, - "t" - ], - [ - 0.145049, - " " - ], - [ - 0.112281, - "c" - ], - [ - 0.048316, - "o" - ], - [ - 0.091902, - "m" - ], - [ - 0.743999, - "m" - ], - [ - 0.143304, - "i" - ], - [ - 0.107889, - "t" - ], - [ - 0.096264, - " " - ], - [ - 0.132679, - "-" - ], - [ - 0.103518, - "a" - ], - [ - 0.107512, - " " - ], - [ - 0.131904, - "-" - ], - [ - 0.116215, - "-" - ], - [ - 0.115981, - "a" - ], - [ - 0.204267, - "e" - ], - [ - 0.163917, - "n" - ], - [ - 0.392189, - "\b \b" - ], - [ - 0.151483, - "\b \b" - ], - [ - 0.176794, - "m" - ], - [ - 0.103142, - "e" - ], - [ - 0.111978, - "n" - ], - [ - 0.119562, - "d" - ], - [ - 0.112961, - "\u001b[?1l\u001b>" - ], - [ - 0.001554, - "\u001b[?2004l\r\r\n" - ], - [ - 0.00586, - "\u001b]2;git commit -a --amend\u0007\u001b]1;git\u0007" - ], - [ - 0.024458, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.003473, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"~/Development/Go/src/github.com/kubernetes-incubator/cri-o/.git/COMMIT_EDITMSG\"" - ], - [ - 0.000192, - " 23L, 623C" - ], - [ - 0.000113, - "\u001b[1;1Himplement kpod rename\r\n\r\nrename a container\r\n\r\nSigned-off-by: Ryan Cole \r\n\r\n# Please enter the commit message for your changes. Lines starting\r\n# with '#' will be ignored, and an empty message aborts the commit.\r\n#\r\n# Date: Mon Aug 14 13:30:24 2017 -0400\r\n#\r\n# On branch kpod-rename\r\n# Changes to be committed:\r\n#\u001b[7Cmodified: cmd/kpod/common.go\r\n#\u001b[7Cmodified: cmd/kpod/main.go\r\n#\u001b[7Cnew file: cmd/kpod/rename.go\r\n#\u001b[7Cmodified: completions/bash/kpod\r\n#\u001b[7Cnew file: docs/kpod-rename.1.md\r\n#\u001b[7Cmodified: docs/kpod.1.md\r\n#\u001b[7Cnew file: libkpod/rename.go\r\n#\u001b[7Cmodified: oci/container.go\r\n#\u001b[7Cnew file: test/kpod_rename.bats\r\n#\r\n\u001b[94m~ \u001b[25;1H~ " - ], - [ - 5.1e-05, - " \u001b[26;1H~ \u001b[27;1H~ \u001b[28;1H~ \u001b[29;1H~ \u001b[30;1H~ " - ], - [ - 3.6e-05, - " \u001b[31;1H~ \u001b[32;1H~ \u001b[33;1H~ \u001b[34;1H~ " - ], - [ - 3.3e-05, - " \u001b[35;1H~ \u001b[36;1H~ \u001b[37;1H~ \u001b[38;1H~ \u001b[39;1H~ " - ], - [ - 9.6e-05, - " \u001b[40;1H~ \u001b[41;1H~ \u001b[42;1H~ \u001b[43;1H~ \u001b[44;1H~ " - ], - [ - 2.8e-05, - " \u001b[45;1H~ \u001b[46;1H~ \u001b[47;1H~ \u001b[48;1H~ \u001b[49;1H~ " - ], - [ - 1.7e-05, - " \u001b[50;1H~ \u001b[51;1H~ \u001b[1;1H\u001b[?12l\u001b[?25h" - ], - [ - 0.324118, - "\u001b[?25l\u001b[m\u001b[52;1H\u001b[K\u001b[52;1H:\u001b[?2004h" - ], - [ - 0.000183, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.183, - "w" - ], - [ - 0.056247, - "q" - ], - [ - 0.073571, - "\r\u001b[?25l\u001b[?2004l\".git/COMMIT_EDITMSG\"" - ], - [ - 0.012337, - " 23L, 623C written" - ], - [ - 9.6e-05, - "\r\r\r\n\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.003601, - "[kpod-rename 1eb21f8e] implement kpod rename\r\n Date: Mon Aug 14 13:30:24 2017 -0400\r\n" - ], - [ - 0.000896, - " 9 files changed, 261 insertions(+), 1 deletion(-)\r\n create mode 100644 cmd/kpod/rename.go\r\n create mode 100644 docs/kpod-rename.1.md\r\n create mode 100644 libkpod/rename.go\r\n create mode 100644 test/kpod_rename.bats\r\n" - ], - [ - 0.000559, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.027462, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-rename \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001241, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000117, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.4e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 9.9e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.3e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.7e-05, - "\u001b[?2004h" - ], - [ - 0.12051, - "g" - ], - [ - 0.119913, - "\bgi" - ], - [ - 0.112417, - "t" - ], - [ - 0.143512, - " " - ], - [ - 0.095889, - "p" - ], - [ - 0.079922, - "u" - ], - [ - 0.088511, - "s" - ], - [ - 0.127486, - "h" - ], - [ - 0.087736, - " " - ], - [ - 0.151933, - "-" - ], - [ - 0.128526, - "f" - ], - [ - 0.112515, - " " - ], - [ - 0.06351, - "o" - ], - [ - 0.136509, - "r" - ], - [ - 0.103539, - "i" - ], - [ - 0.128398, - "g" - ], - [ - 0.063485, - "i" - ], - [ - 0.080165, - "n" - ], - [ - 0.064001, - " " - ], - [ - 0.1677, - "k" - ], - [ - 0.091269, - "pod-" - ], - [ - 0.293058, - "r" - ], - [ - 0.11185, - "e" - ], - [ - 0.135834, - "n" - ], - [ - 0.171262, - "ame\u001b[1m \u001b[0m" - ], - [ - 0.164965, - "\b\u001b[0m \b" - ], - [ - 0.000197, - "\u001b[?1l\u001b>" - ], - [ - 0.000188, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004459, - "\u001b]2;git push -f origin kpod-rename\u0007\u001b]1;git\u0007" - ], - [ - 0.74388, - "Counting objects: 19, done.\r\n" - ], - [ - 0.000183, - "Delta compression using up to 4 threads.\r\n" - ], - [ - 3.8e-05, - "Compressing objects: 5% (1/17) \rCompressing objects: 11% (2/17) \r" - ], - [ - 0.000177, - "Compressing objects: 17% (3/17) \r" - ], - [ - 8.9e-05, - "Compressing objects: 23% (4/17) \r" - ], - [ - 4.7e-05, - "Compressing objects: 29% (5/17) \r" - ], - [ - 0.000114, - "Compressing objects: 35% (6/17) \r" - ], - [ - 0.000132, - "Compressing objects: 41% (7/17) \r" - ], - [ - 9.1e-05, - "Compressing objects: 47% (8/17) \r" - ], - [ - 9.3e-05, - "Compressing objects: 52% (9/17) \r" - ], - [ - 6.5e-05, - "Compressing objects: 58% (10/17) \r" - ], - [ - 3.6e-05, - "Compressing objects: 64% (11/17) \r" - ], - [ - 3.2e-05, - "Compressing objects: 70% (12/17) \r" - ], - [ - 3e-05, - "Compressing objects: 76% (13/17) \r" - ], - [ - 3.8e-05, - "Compressing objects: 82% (14/17) \r" - ], - [ - 3e-05, - "Compressing objects: 88% (15/17) \r" - ], - [ - 2.4e-05, - "Compressing objects: 94% (16/17) \r" - ], - [ - 4.1e-05, - "Compressing objects: 100% (17/17) \r" - ], - [ - 3.5e-05, - "Compressing objects: 100% (17/17), done.\r\n" - ], - [ - 0.000128, - "Writing objects: 5% (1/19) \r" - ], - [ - 6.2e-05, - "Writing objects: 10% (2/19) \r" - ], - [ - 4.2e-05, - "Writing objects: 15% (3/19) \r" - ], - [ - 4.8e-05, - "Writing objects: 21% (4/19) \r" - ], - [ - 4.5e-05, - "Writing objects: 26% (5/19) \r" - ], - [ - 4.6e-05, - "Writing objects: 31% (6/19) \r" - ], - [ - 0.000103, - "Writing objects: 36% (7/19) \r" - ], - [ - 5.3e-05, - "Writing objects: 42% (8/19) \r" - ], - [ - 5.2e-05, - "Writing objects: 47% (9/19) \r" - ], - [ - 8.6e-05, - "Writing objects: 52% (10/19) \r" - ], - [ - 1.7e-05, - "Writing objects: 57% (11/19) \r" - ], - [ - 8.1e-05, - "Writing objects: 63% (12/19) \r" - ], - [ - 4.6e-05, - "Writing objects: 68% (13/19) \r" - ], - [ - 4.2e-05, - "Writing objects: 73% (14/19) \r" - ], - [ - 0.000145, - "Writing objects: 78% (15/19) \r" - ], - [ - 4.1e-05, - "Writing objects: 84% (16/19) \r" - ], - [ - 4.9e-05, - "Writing objects: 89% (17/19) \r" - ], - [ - 3.8e-05, - "Writing objects: 94% (18/19) \r" - ], - [ - 9.1e-05, - "Writing objects: 100% (19/19) \r" - ], - [ - 4.5e-05, - "Writing objects: 100% (19/19), 3.69 KiB | 3.69 MiB/s, done.\r\nTotal 19 (delta 12), reused 0 (delta 0)\r\n" - ], - [ - 0.04705, - "remote: Resolving deltas: 0% (0/12) \u001b[K\r" - ], - [ - 0.038152, - "remote: Resolving deltas: 8% (1/12) \u001b[K\rremote: Resolving deltas: 16% (2/12) \u001b[K\rremote: Resolving deltas: 25% (3/12) \u001b[K\rremote: Resolving deltas: 33% (4/12) \u001b[K\rremote: Resolving deltas: 41% (5/12) \u001b[K\rremote: Resolving deltas: 50% (6/12) \u001b[K\rremote: Resolving deltas: 58% (7/12) \u001b[K\rremote: Resolving deltas: 66% (8/12) \u001b[K\rremote: Resolving deltas: 75% (9/12) \u001b[K\rremote: Resolving deltas: 83% (10/12) \u001b[K\rremote: Resolving deltas: 91% (11/12) \u001b[K\rremote: Resolving deltas: 100% (12/12) \u001b[K\rremote: Resolving deltas: 100% (12/12), completed with 12 local objects.\u001b[K\r\n" - ], - [ - 1.420422, - "To github.com:14rcole/cri-o\r\n + 2a874565...1eb21f8e kpod-rename -> kpod-rename (forced update)\r\n" - ], - [ - 0.002366, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.029573, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-rename \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.002048, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.00012, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 3.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000179, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000127, - "\u001b[?1h\u001b=" - ], - [ - 3.6e-05, - "\u001b[?2004h" - ], - [ - 1969.422781, - "k" - ], - [ - 0.16807, - "\bkp" - ], - [ - 0.09268, - "o" - ], - [ - 0.156049, - "d" - ], - [ - 0.087661, - " " - ], - [ - 3.123898, - "r" - ], - [ - 0.360843, - "m" - ], - [ - 0.942902, - "\b \b" - ], - [ - 0.16075, - "\b \b" - ], - [ - 0.151174, - "\b" - ], - [ - 0.176905, - "\b \b" - ], - [ - 0.159513, - "\b \b" - ], - [ - 0.168379, - "\b\bk \b" - ], - [ - 0.15094, - "\b \b" - ], - [ - 0.128879, - "s" - ], - [ - 0.223768, - "\bsd" - ], - [ - 0.280126, - "\b\bs \b" - ], - [ - 0.247922, - "\bsu" - ], - [ - 0.144548, - "d" - ], - [ - 0.095353, - "o" - ], - [ - 0.063452, - " " - ], - [ - 0.073201, - "c" - ], - [ - 0.176123, - "r" - ], - [ - 0.10351, - "i" - ], - [ - 0.071248, - "o" - ], - [ - 0.056543, - "c" - ], - [ - 0.208591, - "t" - ], - [ - 0.071256, - "l" - ], - [ - 0.120819, - " " - ], - [ - 0.224225, - "c" - ], - [ - 0.215122, - "t" - ], - [ - 0.184575, - "r" - ], - [ - 0.199863, - " " - ], - [ - 0.223807, - "l" - ], - [ - 0.104351, - "i" - ], - [ - 0.031403, - "s" - ], - [ - 0.095857, - "t" - ], - [ - 0.176783, - "\u001b[?1l\u001b>" - ], - [ - 0.000303, - "\u001b[?2004l\r\r\n" - ], - [ - 0.006555, - "\u001b]2;sudo crioctl ctr list\u0007\u001b]1;crioctl\u0007" - ], - [ - 0.954158, - "[sudo] password for ryan: " - ], - [ - 1.966785, - "\r\n" - ], - [ - 0.091087, - "ID: d0ddf13569c69f6fed1934eb0e5e9d41b0cf37477c3d42c63931e9b9d2e8ab41\r\nPod: 75d9cdb9e450cd7b67e71136c35e6fa850edfd783b1893f61a69659e2eb8cb77\r\nName: podsandbox1-redis\r\nAttempt: 0\r\nStatus: CONTAINER_RUNNING\r\nImage: redis:alpine\r\nCreated: 2017-08-14 14:45:11.278672966 -0400 EDT\r\nLabels:\r\n\ttier -> backend\r\nAnnotations:\r\n" - ], - [ - 3.3e-05, - "\tpod -> podsandbox1\r\n\r\n" - ], - [ - 0.003294, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.022039, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-rename \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.002127, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000384, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 6.2e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000872, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 5.3e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 3.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 7.8e-05, - "\u001b[?2004h" - ], - [ - 0.48893, - "s" - ], - [ - 0.110979, - "\bsu" - ], - [ - 0.0795, - "d" - ], - [ - 0.080647, - "o" - ], - [ - 0.10398, - " " - ], - [ - 0.087979, - "k" - ], - [ - 0.103896, - "p" - ], - [ - 0.072293, - "o" - ], - [ - 0.063704, - "d" - ], - [ - 0.095893, - " " - ], - [ - 0.07231, - "r" - ], - [ - 0.08694, - "m" - ], - [ - 0.127982, - "i" - ], - [ - 0.072625, - " " - ], - [ - 0.087713, - "r" - ], - [ - 0.080339, - "e" - ], - [ - 0.151296, - "d" - ], - [ - 0.096924, - "i" - ], - [ - 0.119446, - "s" - ], - [ - 0.144858, - ":" - ], - [ - 0.232002, - "a" - ], - [ - 0.087086, - "l" - ], - [ - 0.152797, - "p" - ], - [ - 0.071877, - "i" - ], - [ - 0.080055, - "n" - ], - [ - 0.072073, - "e" - ], - [ - 0.104314, - "\u001b[?1l\u001b>" - ], - [ - 0.0004, - "\u001b[?2004l\r\r\n" - ], - [ - 0.002847, - "\u001b]2;sudo kpod rmi redis:alpine\u0007\u001b]1;kpod\u0007" - ], - [ - 0.089244, - "Could not remove image \"redis:alpine\" (must force) - one or more containers are using its reference image%!(EXTRA int=0)\r\n" - ], - [ - 0.003327, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.023875, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-rename \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.002907, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000104, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 6.8e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m " - ], - [ - 2.4e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.9e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.6e-05, - "\u001b[?2004h" - ], - [ - 4.955818, - "g" - ], - [ - 0.128566, - "\bgi" - ], - [ - 0.064574, - "t" - ], - [ - 0.095493, - " " - ], - [ - 0.167938, - "c" - ], - [ - 0.112316, - "h" - ], - [ - 0.103897, - "e" - ], - [ - 0.072405, - "c" - ], - [ - 0.054921, - "k" - ], - [ - 0.120936, - "o" - ], - [ - 0.103471, - "u" - ], - [ - 0.063554, - "t" - ], - [ - 0.088583, - " " - ], - [ - 0.176206, - "m" - ], - [ - 0.104132, - "a" - ], - [ - 0.063023, - "s" - ], - [ - 0.251503, - "ter\u001b[1m \u001b[0m" - ], - [ - 0.341276, - "\b\u001b[0m \b" - ], - [ - 0.0001, - "\u001b[?1l\u001b>" - ], - [ - 0.000196, - "\u001b[?2004l" - ], - [ - 0.000167, - "\r\r\n" - ], - [ - 0.004807, - "\u001b]2;git checkout master\u0007\u001b]1;git\u0007" - ], - [ - 0.031022, - "Switched to branch 'master'\r\n" - ], - [ - 5.4e-05, - "Your branch is up-to-date with 'origin/master'.\r\n" - ], - [ - 0.000548, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.02297, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001023, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000111, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.6e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 9.6e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.7e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.6e-05, - "\u001b[?2004h" - ], - [ - 0.154245, - "g" - ], - [ - 0.176512, - "\bgi" - ], - [ - 0.016088, - "t" - ], - [ - 0.104093, - " " - ], - [ - 0.032212, - "p" - ], - [ - 0.103747, - "u" - ], - [ - 0.903255, - "l" - ], - [ - 0.160769, - "l" - ], - [ - 0.118954, - " " - ], - [ - 0.104575, - "-" - ], - [ - 0.151817, - "-" - ], - [ - 0.088524, - "r" - ], - [ - 0.32749, - "\b \b" - ], - [ - 0.14449, - "\b \b" - ], - [ - 0.151997, - "\b \b" - ], - [ - 0.679562, - "u" - ], - [ - 0.05667, - "p" - ], - [ - 0.147276, - "stream\u001b[1m \u001b[0m" - ], - [ - 0.324463, - "\b\u001b[0m m" - ], - [ - 0.088344, - "a" - ], - [ - 0.096029, - "s" - ], - [ - 0.187689, - "ter\u001b[1m:\u001b[0m" - ], - [ - 0.091664, - "\b\u001b[0m \b" - ], - [ - 0.000172, - "\u001b[?1l\u001b>" - ], - [ - 0.000159, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004194, - "\u001b]2;git pull upstream master\u0007\u001b]1;git\u0007" - ], - [ - 1.697902, - "From github.com:kubernetes-incubator/cri-o\r\n * branch master -> FETCH_HEAD\r\n" - ], - [ - 0.057748, - "Updating a69631c1..6ca462a3\r\n" - ], - [ - 0.026098, - "Fast-forward" - ], - [ - 4.3e-05, - "\r\n" - ], - [ - 0.005314, - " README.md | 3 \u001b[32m+\u001b[m\u001b[31m-\u001b[m\r\n cmd/kpod/logs.go | 88 \u001b[32m+++++++++++++++++++++++\u001b[m\r\n cmd/kpod/main.go | 1 \u001b[32m+\u001b[m\r\n code-of-conduct.md | 20 \u001b[32m+++\u001b[m\u001b[31m---\u001b[m\r\n" - ], - [ - 0.000167, - " completions/bash/kpod | 25 \u001b[32m+++++++\u001b[m\r\n docs/kpod-logs.1.md | 61 \u001b[32m++++++++++++++++\u001b[m\r\n libkpod/image/copy.go | 18 \u001b[32m++++\u001b[m\u001b[31m-\u001b[m\r\n libkpod/logs.go | 80 \u001b[32m+++++++++++++++++++++\u001b[m\r\n test/kpod_logs.bats | 77 \u001b[32m++++++++++++++++++++\u001b[m\r\n vendor.conf | 3 \u001b[32m+\u001b[m\r\n vendor/github.com/hpcloud/tail/LICENSE.txt | 21 \u001b[32m++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/README.md | 28 \u001b[32m++++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go | 97 \u001b[32m+++++++++++++++++++++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/ratelimiter/memory.go | 58 \u001b[32m+++++++++++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/ratelimiter/storage.go | 6 \u001b[32m++\u001b[m\r\n vendor/github.com/hpcloud/tail/tail.go | 438 " - ], - [ - 2.2e-05, - "\u001b[32m+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/tail_posix.go | 11 \u001b[32m+++\u001b[m\r\n vendor/github.com/hpcloud/tail/tail_windows.go | 12 \u001b[32m++++\u001b[m\r\n vendor/github.com/hpcloud/tail/util/util.go | 48 \u001b[32m+++++++++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/watch/filechanges.go | 36 \u001b[32m++++++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/watch/inotify.go | 128 \u001b[32m+++++++++++++++++++++++++++++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/watch/inotify_tracker.go | 260 \u001b[32m+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/watch/polling.go | 118 \u001b[32m+++++++++++++++++++++++++++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/watch/watch.go | 20 \u001b[32m++++++\u001b[m\r\n vendor/github.com/hpcloud/tail/winfile/winfile.go | 92 \u001b[32m++++++++++++++++++++++++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/LICENS" - ], - [ - 1.7e-05, - "E | 28 \u001b[32m++++++++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/README.md | 50 \u001b[32m+++++++++++++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/fen.go | 37 \u001b[32m++++++++++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/fsnotify.go | 62 \u001b[32m++++++++++++++++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/inotify.go | 325 \u001b[32m++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/inotify_poller.go | 187 \u001b[32m+++++++++++++++++++++++++++++++++++++++++++++++++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/kqueue.go | 503 \u001b[32m++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go | 11 \u001b[32m+++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go | 12 \u001b[32m++++\u001b[m\r\n vendor/gopkg.in/fsnotify.v1/windows.go | 561 \u001b[" - ], - [ - 1.6e-05, - "32m+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\u001b[m\r\n vendor/gopkg.in/tomb.v1/LICENSE | 29 \u001b[32m++++++++\u001b[m\r\n vendor/gopkg.in/tomb.v1/README.md | 4 \u001b[32m++\u001b[m\r\n vendor/gopkg.in/tomb.v1/tomb.go | 176 \u001b[32m++++++++++++++++++++++++++++++++++++++++++++++\u001b[m\r\n 38 files changed, 3722 insertions(+), 12 deletions(-)\r\n create mode 100644 cmd/kpod/logs.go\r\n create mode 100644 docs/kpod-logs.1.md\r\n create mode 100644 libkpod/logs.go\r\n create mode 100644 test/kpod_logs.bats\r\n create mode 100644 vendor/github.com/hpcloud/tail/LICENSE.txt\r\n create mode 100644 vendor/github.com/hpcloud/tail/README.md\r\n create mode 100644 vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/ratelimiter/memory.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/ratelimiter/storage.go\r\n create mode 100644 ve" - ], - [ - 0.000117, - "ndor/github.com/hpcloud/tail/tail.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/tail_posix.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/tail_windows.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/util/util.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/watch/filechanges.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/watch/inotify.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/watch/inotify_tracker.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/watch/polling.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/watch/watch.go\r\n create mode 100644 vendor/github.com/hpcloud/tail/winfile/winfile.go\r\n create mode 100644 vendor/gopkg.in/fsnotify.v1/LICENSE\r\n create mode 100644 vendor/gopkg.in/fsnotify.v1/README.md\r\n create mode 100644 vendor/gopkg.in/fsnotify.v1/fen.go\r\n create mode 100644 vendor/gopkg.in/fsnotify.v1/fsnotify.go\r\n create mode 100644 vendor/gopkg.in/fsnotify.v1/inotify.go\r\n create mode 100644 vendor/gopkg.in/fsnotify.v1/inotify_poller.go\r\n " - ], - [ - 3.1e-05, - "create mode 100644 vendor/gopkg.in/fsnotify.v1/kqueue.go\r\n create mode 100644 vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go\r\n create mode 100644 vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go\r\n create mode 100644 vendor/gopkg.in/fsnotify.v1/windows.go\r\n create mode 100644 vendor/gopkg.in/tomb.v1/LICENSE\r\n create mode 100644 vendor/gopkg.in/tomb.v1/README.md\r\n create mode 100644 vendor/gopkg.in/tomb.v1/tomb.go\r\n" - ], - [ - 0.001059, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.038598, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001722, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 1.798324, - "v" - ], - [ - 0.088099, - "\bvi" - ], - [ - 0.095471, - " " - ], - [ - 0.313127, - "m" - ], - [ - 0.239446, - "\b \b" - ], - [ - 0.103855, - "c" - ], - [ - 0.104993, - "m" - ], - [ - 0.117338, - "d\u001b[1m/\u001b[0m" - ], - [ - 0.07865, - "\b\u001b[0m/k" - ], - [ - 0.11481, - "pod\u001b[1m/\u001b[0m" - ], - [ - 0.137367, - "\b\u001b[0m/m" - ], - [ - 0.287656, - "\b \b" - ], - [ - 0.135144, - "r" - ], - [ - 0.068676, - "e" - ], - [ - 0.148163, - "n" - ], - [ - 0.157283, - "\u0007" - ], - [ - 0.706737, - "a" - ], - [ - 0.225253, - "\u0007" - ], - [ - 1.09834, - "\b \b" - ], - [ - 0.584105, - "\b \b" - ], - [ - 0.248355, - "m" - ], - [ - 0.259974, - "\b \b" - ], - [ - 0.123623, - "\b \b" - ], - [ - 0.144232, - "m" - ], - [ - 0.159494, - "i" - ], - [ - 0.154991, - ".go\u001b[1m \u001b[0m" - ], - [ - 0.373147, - "\b\u001b[0m \b" - ], - [ - 5.9e-05, - "\u001b[?1l\u001b>" - ], - [ - 0.000152, - "\u001b[?2004l\r\r\n" - ], - [ - 0.003812, - "\u001b]2;vim cmd/kpod/rmi.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.140952, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000561, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"cmd/kpod/rmi.go\"" - ], - [ - 0.000112, - " 123L, 3096C" - ], - [ - 0.007972, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.002019, - "\u001b[1;1H\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m rmiCommand = cli.Command{\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m\u001b[8CName:\u001b[8C\u001b[36m\"rmi\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m\u001b[8CUsage:\u001b[7C\u001b[36m\"removes one or more images from local storage\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m\u001b[8CDescription: rmiDescription,\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[8CAction: rmiCmd,\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m\u001b[8CArgsUsage: \u001b[36m\"IMAGE-NAME-OR-ID [...]\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m\u001b[8CFlags:\u001b[7CrmiFlags,\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 28 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 29 \r\n 30 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m rmiCmd(c *cli.Context) \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 31 \r\n 32 \u001b[m\u001b[93m\u001b[107m force := \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m c.IsSet(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m\u001b[8Cforce = c.Bool(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 36 \r\n 37 \u001b[m\u001b[93m\u001b[107m args := c.Args()\r\n\u001b[96m\u001b[47m 3" - ], - [ - 2.1e-05, - "8 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(args) == \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Errorf(\u001b[36m\"image name or ID must be specified\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 41 \r\n 42 \u001b[m\u001b[93m\u001b[107m config, err := getConfig(c)\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Could not get config\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m store, err := getStore(config)\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 50 \r\n 51 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, id := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m args {\r\n\u001b[96m\u001b[47m 52 \u001b[m\u001b[93m\u001b[107m\u001b[8Cimage, err := libkpodimage.FindImage(store, id)\r\n" - ], - [ - 0.032607, - "\u001b[96m\u001b[47m 53 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 54 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"could not get image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 55 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 56 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m image != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 57 \u001b[m\u001b[93m\u001b[107m\u001b[12CctrIDs, err := runningContainers(image, store)\r\n\u001b[96m\u001b[47m 58 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 59 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"error getting running containers for image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 60 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 61 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(ctrIDs) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m && \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(image.Names) <= \u001b[36m1\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 62 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m force {\r\n\u001b[96m\u001b[47m 63 \u001b[" - ], - [ - 0.000108, - "m\u001b[93m\u001b[107m\u001b[20CremoveContainers(ctrIDs, store)\r\n\u001b[96m\u001b[47m 64 \u001b[m\u001b[93m\u001b[107m\u001b[16C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 65 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mfor\u001b[m\u001b[93m\u001b[107m ctrID := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\r\n\u001b[96m\u001b[47m 66 \u001b[m\u001b[93m\u001b[107m\u001b[24C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m fmt.Errorf(\u001b[36m\"Could not remove image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m (must force) - container \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\r\n\u001b[96m\u001b[47m 67 \u001b[m\u001b[93m\u001b[107m\u001b[20C}\r\n\u001b[96m\u001b[47m 68 \u001b[m\u001b[93m\u001b[107m\u001b[16C}\r\n\u001b[96m\u001b[47m 69 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;39H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                           " - ], - [ - 0.009234, - "                                                                                                             \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:25 \u001b[47;29H\u001b[?12l\u001b[?25h\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 1.74193, - "\u001b[?25l\u001b[51;210H4\u001b[47;28H\u001b[?12l\u001b[?25h" - ], - [ - 0.184955, - "\u001b[51;210H5\u001b[47;29H" - ], - [ - 0.501563, - "\u001b[51;210H6\u001b[47;30H" - ], - [ - 0.027652, - "\u001b[51;210H7\u001b[47;31H" - ], - [ - 0.031729, - "\u001b[51;210H8\u001b[47;32H" - ], - [ - 0.032977, - "\u001b[51;210H9\u001b[47;33H" - ], - [ - 0.028374, - "\u001b[51;209H30\u001b[47;34H" - ], - [ - 0.029832, - "\u001b[51;210H1\u001b[47;35H" - ], - [ - 0.032526, - "\u001b[51;210H2\u001b[47;36H" - ], - [ - 0.029166, - "\u001b[51;210H3\u001b[47;37H" - ], - [ - 0.031288, - "\u001b[51;210H4\u001b[47;38H" - ], - [ - 0.031594, - "\u001b[51;210H5\u001b[47;39H" - ], - [ - 0.030975, - "\u001b[51;210H6\u001b[47;40H" - ], - [ - 0.029298, - "\u001b[51;210H7\u001b[47;41H" - ], - [ - 0.029023, - "\u001b[51;210H8\u001b[47;42H" - ], - [ - 0.03314, - "\u001b[51;210H9\u001b[47;43H" - ], - [ - 0.031948, - "\u001b[51;209H40\u001b[47;44H" - ], - [ - 0.028742, - "\u001b[51;210H1\u001b[47;45H" - ], - [ - 0.034439, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107mf\u001b[1m\u001b[31m\u001b[106m(\u001b[95C)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;252m2\u001b[47;46H\u001b[?12l\u001b[?25h" - ], - [ - 0.028925, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[94C)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;252m3\u001b[47;47H\u001b[?12l\u001b[?25h" - ], - [ - 0.030179, - "\u001b[51;210H4\u001b[47;48H" - ], - [ - 0.030092, - "\u001b[51;210H5\u001b[47;49H" - ], - [ - 0.03015, - "\u001b[51;210H6\u001b[47;50H" - ], - [ - 0.029603, - "\u001b[51;210H7\u001b[47;51H" - ], - [ - 0.032153, - "\u001b[51;210H8\u001b[47;52H" - ], - [ - 0.030144, - "\u001b[51;210H9\u001b[47;53H" - ], - [ - 0.03006, - "\u001b[51;209H50\u001b[47;54H" - ], - [ - 0.030687, - "\u001b[51;210H1\u001b[47;55H" - ], - [ - 0.02981, - "\u001b[51;210H2\u001b[47;56H" - ], - [ - 0.037596, - "\u001b[51;210H3\u001b[47;57H" - ], - [ - 0.027628, - "\u001b[51;210H4\u001b[47;58H" - ], - [ - 0.026376, - "\u001b[51;210H5\u001b[47;59H" - ], - [ - 0.031146, - "\u001b[51;210H6\u001b[47;60H" - ], - [ - 0.032485, - "\u001b[51;210H7\u001b[47;61H" - ], - [ - 0.026451, - "\u001b[51;210H8\u001b[47;62H" - ], - [ - 0.030128, - "\u001b[51;210H9\u001b[47;63H" - ], - [ - 0.031166, - "\u001b[51;209H60\u001b[47;64H" - ], - [ - 0.036072, - "\u001b[51;210H1\u001b[47;65H" - ], - [ - 0.024924, - "\u001b[51;210H2\u001b[47;66H" - ], - [ - 0.032987, - "\u001b[51;210H3\u001b[47;67H" - ], - [ - 0.029506, - "\u001b[51;210H4\u001b[47;68H" - ], - [ - 0.031373, - "\u001b[51;210H5\u001b[47;69H" - ], - [ - 0.033657, - "\u001b[51;210H6\u001b[47;70H" - ], - [ - 0.02801, - "\u001b[51;210H7\u001b[47;71H" - ], - [ - 0.031093, - "\u001b[51;210H8\u001b[47;72H" - ], - [ - 0.0306, - "\u001b[51;210H9\u001b[47;73H" - ], - [ - 0.034356, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[1m\u001b[31m\u001b[106m(\u001b[10C)\u001b[m\u001b[93m\u001b[107m\u001b[51;209H\u001b[38;5;22m\u001b[48;5;252m70\u001b[47;74H\u001b[?12l\u001b[?25h" - ], - [ - 0.026368, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36m(m\u001b[9C) \u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;252m1\u001b[47;75H\u001b[?12l\u001b[?25h" - ], - [ - 0.031718, - "\u001b[51;210H2\u001b[47;76H" - ], - [ - 0.032486, - "\u001b[51;210H3\u001b[47;77H" - ], - [ - 0.025168, - "\u001b[51;210H4\u001b[47;78H" - ], - [ - 0.036429, - "\u001b[51;210H5\u001b[47;79H" - ], - [ - 0.028144, - "\u001b[51;210H6\u001b[47;80H" - ], - [ - 0.031687, - "\u001b[51;210H7\u001b[47;81H" - ], - [ - 0.031436, - "\u001b[51;210H8\u001b[47;82H" - ], - [ - 0.028164, - "\u001b[51;210H9\u001b[47;83H" - ], - [ - 0.033254, - "\u001b[51;209H80\u001b[47;84H" - ], - [ - 0.033571, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[47;74H\u001b[1m\u001b[31m\u001b[106m(\u001b[10C)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;252m1\u001b[47;85H\u001b[?12l\u001b[?25h" - ], - [ - 0.028404, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[47;74H\u001b[36m(m\u001b[9C) \u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;252m2\u001b[47;86H\u001b[?12l\u001b[?25h" - ], - [ - 0.029114, - "\u001b[51;210H3\u001b[47;87H" - ], - [ - 0.030981, - "\u001b[51;210H4\u001b[47;88H" - ], - [ - 0.034571, - "\u001b[51;210H5\u001b[47;89H" - ], - [ - 0.02456, - "\u001b[51;210H6\u001b[47;90H" - ], - [ - 0.424165, - "\u001b[51;210H5\u001b[47;89H" - ], - [ - 0.191612, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36montainer \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;142H\u001b[K\u001b[51;37H\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;41H \u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.501563, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mntainer \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;141H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.019326, - "\u001b[?25l\u001b[36mtainer \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;140H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.032305, - "\u001b[?25l\u001b[36mainer \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;139H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.027042, - "\u001b[?25l\u001b[36miner \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;138H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.033786, - "\u001b[?25l\u001b[36mner \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;137H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.032065, - "\u001b[?25l\u001b[36mer \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;136H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.02938, - "\u001b[?25l\u001b[36mr \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;135H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.031174, - "\u001b[?25l\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;134H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.032572, - "\u001b[?25l\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;133H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.025383, - "\u001b[?25l\u001b[36mq is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;132H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.209138, - "\u001b[?25l\u001b[36m is using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;131H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.199764, - "\u001b[?25l\u001b[36mis using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;130H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.172621, - "\u001b[?25l\u001b[36ms using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;129H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.186391, - "\u001b[?25l\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;128H\u001b[K\u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.397295, - "\u001b[?25l\u001b[52;1H\u001b[34m-- INSERT --\u001b[m\u001b[93m\u001b[107m\u001b[52;13H\u001b[K" - ], - [ - 0.04224, - "\u001b[51;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[m\u001b[93m\u001b[107m\u001b[51;40H\u001b[38;5;31m\u001b[48;5;24m\u001b[51;41H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                                      \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;117m\u001b[48;5;24m" - ], - [ - 3.5e-05, - " go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;186m\u001b[48;5;31m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:85 \u001b[47;89H\u001b[?12l\u001b[?25h" - ], - [ - 0.318684, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mo using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[47;90H\u001b[?12l\u001b[?25h" - ], - [ - 0.085993, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mn using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[47;91H\u001b[?12l\u001b[?25h" - ], - [ - 0.102417, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[47;92H\u001b[?12l\u001b[?25h" - ], - [ - 0.096152, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[47;93H\u001b[?12l\u001b[?25h" - ], - [ - 0.172551, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mm using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m90\u001b[47;94H\u001b[?12l\u001b[?25h" - ], - [ - 0.084589, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mo using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[47;95H\u001b[?12l\u001b[?25h" - ], - [ - 0.266563, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;133H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m0\u001b[47;94H\u001b[?12l\u001b[?25h" - ], - [ - 0.134027, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[47;132H\u001b[K\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m89\u001b[47;93H\u001b[?12l\u001b[?25h" - ], - [ - 0.160501, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mo using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m90\u001b[47;94H\u001b[?12l\u001b[?25h" - ], - [ - 0.158121, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mr using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[47;95H\u001b[?12l\u001b[?25h" - ], - [ - 0.064495, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[47;96H\u001b[?12l\u001b[?25h" - ], - [ - 0.178203, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mm using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[47;97H\u001b[?12l\u001b[?25h" - ], - [ - 0.070967, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mo using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[47;98H\u001b[?12l\u001b[?25h" - ], - [ - 0.049728, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mr using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[47;99H\u001b[?12l\u001b[?25h" - ], - [ - 0.085073, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[47;100H\u001b[?12l\u001b[?25h" - ], - [ - 0.064728, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[47;101H\u001b[?12l\u001b[?25h" - ], - [ - 0.115396, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mc using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[47;102H\u001b[?12l\u001b[?25h" - ], - [ - 0.109608, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mo using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[47;103H\u001b[?12l\u001b[?25h" - ], - [ - 0.07234, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mn using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;175H\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;181H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;189H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;194H\u001b[38;5;186m\u001b[48;5;31m \u001b[1C54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;201H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:100\u001b[47;104H\u001b[?12l\u001b[?25h" - ], - [ - 0.106017, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mt using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[47;105H\u001b[?12l\u001b[?25h" - ], - [ - 0.037147, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36ma using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[47;106H\u001b[?12l\u001b[?25h" - ], - [ - 0.120393, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mi using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[47;107H\u001b[?12l\u001b[?25h" - ], - [ - 0.054877, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mn using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[47;108H\u001b[?12l\u001b[?25h" - ], - [ - 0.055511, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[47;109H\u001b[?12l\u001b[?25h" - ], - [ - 0.044523, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mr using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[47;110H\u001b[?12l\u001b[?25h" - ], - [ - 0.164345, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36ms using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[47;111H\u001b[?12l\u001b[?25h" - ], - [ - 0.113242, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[36m using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[47;112H\u001b[?12l\u001b[?25h" - ], - [ - 0.065749, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36ma using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m9\u001b[47;113H\u001b[?12l\u001b[?25h" - ], - [ - 0.067013, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36mr using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;209H\u001b[38;5;22m\u001b[48;5;117m10\u001b[47;114H\u001b[?12l\u001b[?25h" - ], - [ - 0.082416, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[36me using its reference image\"\u001b[m\u001b[93m\u001b[107m, id, ctrID)\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m1\u001b[47;115H\u001b[?12l\u001b[?25h" - ], - [ - 0.391867, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K" - ], - [ - 0.002802, - "\u001b[51;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[51;40H\u001b[38;5;240m\u001b[48;5;236m\u001b[51;41H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                                     \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;181H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;189H\u001b[38;5;247m" - ], - [ - 4.4e-05, - "\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;194H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;201H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:110 \u001b[47;114H\u001b[?12l\u001b[?25h" - ], - [ - 0.330025, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[47;46H\u001b[1m\u001b[31m\u001b[106m(\u001b[106C)\u001b[m\u001b[93m\u001b[107m\u001b[51;209H\u001b[38;5;22m\u001b[48;5;252m49\u001b[47;153H\u001b[?12l\u001b[?25h" - ], - [ - 0.483853, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[34m-- INSERT --" - ], - [ - 0.009014, - "\u001b[m\u001b[93m\u001b[107m\u001b[51;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[m\u001b[93m\u001b[107m\u001b[51;40H\u001b[38;5;31m\u001b[48;5;24m\u001b[51;41H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                                     \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;181H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;189H\u001b[38;5;11" - ], - [ - 8.4e-05, - "7m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;194H\u001b[38;5;186m\u001b[48;5;31m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;201H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:149 \u001b[47;153H\u001b[?12l\u001b[?25h" - ], - [ - 0.255014, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b)\u001b[47;153H\u001b[K\u001b[47;152H\u001b[1m\u001b[31m\u001b[106m)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[47;152H\u001b[?12l\u001b[?25h" - ], - [ - 0.143393, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b)\u001b[47;152H\u001b[K\u001b[47;151H\u001b[1m\u001b[31m\u001b[106m)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[47;151H\u001b[?12l\u001b[?25h" - ], - [ - 0.173726, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b)\u001b[47;151H\u001b[K\u001b[47;150H\u001b[1m\u001b[31m\u001b[106m)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[47;150H\u001b[?12l\u001b[?25h" - ], - [ - 0.151736, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b)\u001b[47;150H\u001b[K\u001b[47;149H\u001b[1m\u001b[31m\u001b[106m)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[47;149H\u001b[?12l\u001b[?25h" - ], - [ - 0.17734, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b)\u001b[47;149H\u001b[K\u001b[47;148H\u001b[1m\u001b[31m\u001b[106m)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m4\u001b[47;148H\u001b[?12l\u001b[?25h" - ], - [ - 0.158422, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b)\u001b[47;148H\u001b[K\u001b[47;147H\u001b[1m\u001b[31m\u001b[106m)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m3\u001b[47;147H\u001b[?12l\u001b[?25h" - ], - [ - 0.144092, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b)\u001b[47;147H\u001b[K\u001b[47;146H\u001b[1m\u001b[31m\u001b[106m)\u001b[m\u001b[93m\u001b[107m\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m2\u001b[47;146H\u001b[?12l\u001b[?25h" - ], - [ - 0.211601, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K" - ], - [ - 0.009089, - "\u001b[47;46H(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[98C)\u001b[51;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[51;40H\u001b[38;5;240m\u001b[48;5;236m\u001b[51;41H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                                     \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;181H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m " - ], - [ - 3.1e-05, - "\u001b[m\u001b[93m\u001b[107m\u001b[51;189H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;194H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;201H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:141 \u001b[47;145H\u001b[?12l\u001b[?25h" - ], - [ - 0.214474, - "\u001b[?25l\u001b[52;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.143718, - "w\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.056661, - "q" - ], - [ - 7.6e-05, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.071657, - "\r" - ], - [ - 7.3e-05, - "\u001b[?25l\u001b[?2004l" - ], - [ - 0.023973, - "\"cmd/kpod/rmi.go\"" - ], - [ - 0.003962, - " 123L, 3100C written" - ], - [ - 0.012358, - "\r\r\r\n\u001b[39;49m\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.001868, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.016767, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m13s\u001b[39m\r\n" - ], - [ - 0.001258, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 5.2e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.6e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 7e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 5.8e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.4e-05, - "\u001b[?2004h" - ], - [ - 2.403767, - "s" - ], - [ - 0.072404, - "\bsu" - ], - [ - 0.087776, - "d" - ], - [ - 0.10411, - "o" - ], - [ - 0.103535, - " " - ], - [ - 0.112565, - "m" - ], - [ - 0.103814, - "a" - ], - [ - 0.127747, - "k" - ], - [ - 0.104285, - "e" - ], - [ - 0.055181, - " " - ], - [ - 0.096552, - "k" - ], - [ - 0.695888, - "\b \b" - ], - [ - 0.499894, - "\b" - ], - [ - 0.031116, - "\b \b" - ], - [ - 0.031334, - "\b \b" - ], - [ - 0.03129, - "\b \b" - ], - [ - 0.030503, - "\b \b" - ], - [ - 0.029752, - "\b" - ], - [ - 0.03046, - "\b \b" - ], - [ - 0.029619, - "\b \b" - ], - [ - 0.031117, - "\b\bs \b" - ], - [ - 0.030333, - "\b \b" - ], - [ - 0.143801, - "m" - ], - [ - 0.137018, - "\bma" - ], - [ - 0.127748, - "k" - ], - [ - 0.071257, - "e" - ], - [ - 0.112655, - " " - ], - [ - 0.103705, - "k" - ], - [ - 0.120423, - "p" - ], - [ - 0.088219, - "o" - ], - [ - 0.151337, - "d" - ], - [ - 0.144145, - "\u001b[?1l\u001b>" - ], - [ - 0.000245, - "\u001b[?2004l\r\r\n" - ], - [ - 0.003789, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 6.807666, - "go build -ldflags '-X main.gitCommit=6ca462a3 -X main.buildInfo=1502976873' -tags \"selinux seccomp \" -o kpod github.com/kubernetes-incubator/cri-o/cmd/kpod\r\n" - ], - [ - 2.098933, - "# github.com/kubernetes-incubator/cri-o/cmd/kpod\r\ncmd/kpod/rmi.go:65: ctrID declared and not used\r\n" - ], - [ - 0.002582, - "make: *** [Makefile:83: kpod] Error 2\r\n" - ], - [ - 0.000308, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.020816, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m9s\u001b[39m\r\n" - ], - [ - 0.001294, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000209, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 3e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m " - ], - [ - 2.1e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000157, - "\u001b[?1h\u001b=" - ], - [ - 3.1e-05, - "\u001b[?2004h" - ], - [ - 2.964102, - "v" - ], - [ - 0.136044, - "\bvu" - ], - [ - 0.095982, - " " - ], - [ - 0.088003, - "c" - ], - [ - 0.08868, - "n" - ], - [ - 0.154713, - "\u0007" - ], - [ - 0.10947, - "j" - ], - [ - 0.161602, - "\u0007" - ], - [ - 0.173494, - "\b \b" - ], - [ - 0.151719, - "\b \b" - ], - [ - 0.609085, - "m" - ], - [ - 0.167394, - "d\u001b[1m/\u001b[0m" - ], - [ - 0.840479, - "\b\u001b[0m/k" - ], - [ - 0.206026, - "pod\u001b[1m/\u001b[0m" - ], - [ - 0.257517, - "\b\u001b[0m/r" - ], - [ - 0.127635, - "m" - ], - [ - 0.103364, - "i.go\u001b[1m \u001b[0m" - ], - [ - 0.616305, - "\b\u001b[0m \b" - ], - [ - 0.000215, - "\u001b[?1l\u001b>" - ], - [ - 0.000236, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004049, - "\u001b]2;vu cmd/kpod/rmi.go\u0007\u001b]1;vu\u0007" - ], - [ - 0.00938, - "zsh: vu: command not found..." - ], - [ - 6.4e-05, - "\r\n" - ], - [ - 0.171147, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.017238, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001466, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.00011, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.7e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.3e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.2e-05, - "\u001b[?2004h" - ], - [ - 0.476196, - "6" - ], - [ - 0.080551, - "\b65" - ], - [ - 0.255454, - "G" - ], - [ - 0.432225, - "\b \b" - ], - [ - 0.159879, - "\b\b6 \b" - ], - [ - 0.1445, - "\b \b" - ], - [ - 0.384217, - "vu cmd/kpod/rmi.go" - ], - [ - 0.895489, - "\u001b[18D" - ], - [ - 0.176977, - "\u001b[1C" - ], - [ - 0.166725, - "\u001b[1C" - ], - [ - 0.256332, - "\b\bv cmd/kpod/rmi.go \u001b[17D" - ], - [ - 0.520213, - "\bvi cmd/kpod/rmi.go\u001b[16D" - ], - [ - 0.223669, - "\u001b[?1l\u001b>" - ], - [ - 0.000151, - "\u001b[?2004l\r\r\n" - ], - [ - 0.00446, - "\u001b]2;vim cmd/kpod/rmi.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.138232, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000485, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"cmd/kpod/rmi.go\"" - ], - [ - 0.000106, - " 123L, 3100C" - ], - [ - 0.008045, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.002005, - "\u001b[1;1H\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m rmiCommand = cli.Command{\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m\u001b[8CName:\u001b[8C\u001b[36m\"rmi\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m\u001b[8CUsage:\u001b[7C\u001b[36m\"removes one or more images from local storage\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m\u001b[8CDescription: rmiDescription,\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[8CAction: rmiCmd,\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m\u001b[8CArgsUsage: \u001b[36m\"IMAGE-NAME-OR-ID [...]\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m\u001b[8CFlags:\u001b[7CrmiFlags,\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 28 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 29 \r\n 30 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m rmiCmd(c *cli.Context) \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 31 \r\n 32 \u001b[m\u001b[93m\u001b[107m force := \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m c.IsSet(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m\u001b[8Cforce = c.Bool(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 36 \r\n 37 \u001b[m\u001b[93m\u001b[107m args := c.Args()\r\n\u001b[96m\u001b[47m 3" - ], - [ - 1.9e-05, - "8 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(args) == \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Errorf(\u001b[36m\"image name or ID must be specified\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 41 \r\n 42 \u001b[m\u001b[93m\u001b[107m config, err := getConfig(c)\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Could not get config\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m store, err := getStore(config)\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 50 \r\n 51 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, id := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m args {\r\n\u001b[96m\u001b[47m 52 \u001b[m\u001b[93m\u001b[107m\u001b[8Cimage, err := libkpodimage.FindImage(store, id)\r\n" - ], - [ - 0.031049, - "\u001b[96m\u001b[47m 53 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 54 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"could not get image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 55 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 56 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m image != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 57 \u001b[m\u001b[93m\u001b[107m\u001b[12CctrIDs, err := runningContainers(image, store)\r\n\u001b[96m\u001b[47m 58 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 59 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"error getting running containers for image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 60 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 61 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(ctrIDs) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m && \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(image.Names) <= \u001b[36m1\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 62 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m force {\r\n\u001b[96m\u001b[47m 63 \u001b[" - ], - [ - 0.000197, - "m\u001b[93m\u001b[107m\u001b[20CremoveContainers(ctrIDs, store)\r\n\u001b[96m\u001b[47m 64 \u001b[m\u001b[93m\u001b[107m\u001b[16C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 65 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mfor\u001b[m\u001b[93m\u001b[107m ctrID := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\r\n\u001b[96m\u001b[47m 66 \u001b[m\u001b[93m\u001b[107m\u001b[24C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m fmt.Errorf(\u001b[36m\"Could not remove image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m (must force) - one or more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 67 \u001b[m\u001b[93m\u001b[107m\u001b[20C}\r\n\u001b[96m\u001b[47m 68 \u001b[m\u001b[93m\u001b[107m\u001b[16C}\r\n\u001b[96m\u001b[47m 69 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;220m\u001b[48;5;240m M \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;41H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                        " - ], - [ - 0.009393, - "                                                                                                              \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 66\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:25 \u001b[47;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.000186, - "\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 0.894574, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[43;30H\u001b[1m\u001b[31m\u001b[106m{\u001b[45;21H}\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  52%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:17\u001b[45;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.755059, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[43;30H{\u001b[45;21H} \u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  53%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:20\u001b[46;24H\u001b[?12l\u001b[?25h" - ], - [ - 0.297693, - "\u001b[?25l\u001b[51;210H1\u001b[46;25H\u001b[?12l\u001b[?25h" - ], - [ - 0.494594, - "\u001b[51;210H2\u001b[46;26H" - ], - [ - 0.029144, - "\u001b[51;210H3\u001b[46;27H" - ], - [ - 0.189856, - "\u001b[51;210H4\u001b[46;28H" - ], - [ - 0.503162, - "\u001b[51;210H5\u001b[46;29H" - ], - [ - 0.02688, - "\u001b[51;210H6\u001b[46;30H" - ], - [ - 0.031882, - "\u001b[51;210H7\u001b[46;31H" - ], - [ - 0.031015, - "\u001b[51;210H8\u001b[46;32H" - ], - [ - 0.028268, - "\u001b[51;210H9\u001b[46;33H" - ], - [ - 0.725696, - "\u001b[51;209H30\u001b[46;34H" - ], - [ - 5.869681, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[34m-- INSERT --\u001b[m\u001b[93m\u001b[107m\u001b[52;13H\u001b[K" - ], - [ - 0.040358, - "\u001b[51;1H\u001b[1m\u001b[38;5;23m\u001b[48;5;231m INSERT \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;231m\u001b[48;5;31m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;31m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;31mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;220m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;220m\u001b[48;5;31m M \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[51;41H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;24m                                                                                                                                      \u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;24munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;117m\u001b[48;5;24m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;74m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;117m\u001b[48;5;24m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m \u001b[m\u001b[93m\u001b[10" - ], - [ - 2.4e-05, - "7m\u001b[51;195H\u001b[38;5;186m\u001b[48;5;31m  53%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;117m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;24m\u001b[48;5;117m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;24m\u001b[48;5;117m 65\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;117m:30 \u001b[46;34H\u001b[?12l\u001b[?25h" - ], - [ - 0.274464, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\u001b[46;51H\u001b[K\u001b[51;39H\u001b[1m\u001b[38;5;220m\u001b[48;5;31m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[51;43H \u001b[m\u001b[93m\u001b[107m\u001b[165C\u001b[38;5;22m\u001b[48;5;117m29\u001b[46;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.151925, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\u001b[46;50H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m8\u001b[46;32H\u001b[?12l\u001b[?25h" - ], - [ - 0.167731, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\u001b[46;49H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m7\u001b[46;31H\u001b[?12l\u001b[?25h" - ], - [ - 0.184041, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\u001b[46;48H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[46;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.156407, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\u001b[46;47H\u001b[K\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m5\u001b[46;29H\u001b[?12l\u001b[?25h" - ], - [ - 1.347077, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m_ := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\u001b[51;210H\u001b[38;5;22m\u001b[48;5;117m6\u001b[46;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.334595, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K" - ], - [ - 0.014443, - "\u001b[51;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;31m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;220m\u001b[48;5;31m \u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;220m\u001b[48;5;240m M\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;31m\u001b[48;5;24m\u001b[m\u001b[93m\u001b[107m\u001b[51;42H\u001b[38;5;240m\u001b[48;5;236m\u001b[51;43H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                                    \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b" - ], - [ - 2.9e-05, - "[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  53%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 65\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;117m:\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:25 \u001b[46;29H\u001b[?12l\u001b[?25h" - ], - [ - 0.257293, - "\u001b[?25l\u001b[52;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h" - ], - [ - 8.6e-05, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.135176, - "w\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.03633, - "q" - ], - [ - 0.000156, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.124203, - "\r" - ], - [ - 0.0003, - "\u001b[?25l\u001b[?2004l" - ], - [ - 0.023972, - "\"cmd/kpod/rmi.go\"" - ], - [ - 0.012083, - " 123L, 3096C written" - ], - [ - 0.013278, - "\r\r\r\n\u001b[39;49m\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.002006, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.025275, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m13s\u001b[39m\r\n" - ], - [ - 0.001052, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 8.3e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 6.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 7e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.4e-05, - "\u001b[?1h\u001b=" - ], - [ - 2e-05, - "\u001b[?2004h" - ], - [ - 0.122123, - "vi cmd/kpod/rmi.go" - ], - [ - 0.183865, - "\u001b[18Dvu\u001b[16C" - ], - [ - 0.8524, - "\u001b[18Dvi\u001b[16C" - ], - [ - 0.251769, - "\u001b[18D \u001b[18D" - ], - [ - 0.427799, - "vi cmd/kpod/rmi.go" - ], - [ - 0.996091, - "\u001b[?1l\u001b>" - ], - [ - 0.000117, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004829, - "\u001b]2;vim cmd/kpod/rmi.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.136844, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000622, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"cmd/kpod/rmi.go\"" - ], - [ - 7.1e-05, - " 123L, 3096C" - ], - [ - 0.008554, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.002929, - "\u001b[1;1H\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m rmiCommand = cli.Command{\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m\u001b[8CName:\u001b[8C\u001b[36m\"rmi\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m\u001b[8CUsage:\u001b[7C\u001b[36m\"removes one or more images from local storage\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m\u001b[8CDescription: rmiDescription,\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[8CAction: rmiCmd,\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m\u001b[8CArgsUsage: \u001b[36m\"IMAGE-NAME-OR-ID [...]\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m\u001b[8CFlags:\u001b[7CrmiFlags,\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 28 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 29 \r\n 30 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m rmiCmd(c *cli.Context) \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 31 \r\n 32 \u001b[m\u001b[93m\u001b[107m force := \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m c.IsSet(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m\u001b[8Cforce = c.Bool(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 36 \r\n 37 \u001b[m\u001b[93m\u001b[107" - ], - [ - 1.9e-05, - "m args := c.Args()\r\n\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(args) == \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Errorf(\u001b[36m\"image name or ID must be specified\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 41 \r\n 42 \u001b[m\u001b[93m\u001b[107m config, err := getConfig(c)\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Could not get config\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m store, err := getStore(config)\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 50 \r\n 51 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, id := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m args {\r\n\u001b[96m\u001b[47m 52 \u001b[m\u001b[93m\u001b[107m\u001b[8Cimage, err := libkpodimage.FindI" - ], - [ - 0.030095, - "mage(store, id)\r\n\u001b[96m\u001b[47m 53 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 54 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"could not get image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 55 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 56 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m image != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 57 \u001b[m\u001b[93m\u001b[107m\u001b[12CctrIDs, err := runningContainers(image, store)\r\n\u001b[96m\u001b[47m 58 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 59 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"error getting running containers for image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 60 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 61 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(ctrIDs) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m && \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(image.Names) <= \u001b[36m1\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 62 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m force {\r" - ], - [ - 3.7e-05, - "\n\u001b[96m\u001b[47m 63 \u001b[m\u001b[93m\u001b[107m\u001b[20CremoveContainers(ctrIDs, store)\r\n\u001b[96m\u001b[47m 64 \u001b[m\u001b[93m\u001b[107m\u001b[16C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 65 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mfor\u001b[m\u001b[93m\u001b[107m _ := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\r\n\u001b[96m\u001b[47m 66 \u001b[m\u001b[93m\u001b[107m\u001b[24C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m fmt.Errorf(\u001b[36m\"Could not remove image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m (must force) - one or more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 67 \u001b[m\u001b[93m\u001b[107m\u001b[20C}\r\n\u001b[96m\u001b[47m 68 \u001b[m\u001b[93m\u001b[107m\u001b[16C}\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;220m\u001b[48;5;240m M \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;41H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                    " - ], - [ - 0.011253, - "                                                                                                  \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  53%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 65\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:21 \u001b[47;25H\u001b[?12l\u001b[?25h\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 0.884291, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K\u001b[52;1H:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.175395, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.047934, - "\r" - ], - [ - 0.019314, - "\u001b[?25l\u001b[?2004l\u001b[52;1H\u001b[K\u001b[52;1H\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.001837, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.019156, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001205, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000106, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 9.5e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.1e-05, - "\u001b[?2004h" - ], - [ - 0.22274, - "m" - ], - [ - 0.120257, - "\bma" - ], - [ - 0.087838, - "k" - ], - [ - 0.119717, - "e" - ], - [ - 0.055994, - " " - ], - [ - 0.103706, - "k" - ], - [ - 0.080698, - "p" - ], - [ - 0.051603, - "o" - ], - [ - 0.124175, - "d" - ], - [ - 0.076543, - "\u001b[?1l\u001b>\u001b[?2004l\r\r\n" - ], - [ - 0.004916, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 6.744424, - "go build -ldflags '-X main.gitCommit=6ca462a3 -X main.buildInfo=1502976912' -tags \"selinux seccomp \" -o kpod github.com/kubernetes-incubator/cri-o/cmd/kpod\r\n" - ], - [ - 1.99486, - "# github.com/kubernetes-incubator/cri-o/cmd/kpod\r\ncmd/kpod/rmi.go:65: no new variables on left side of :=\r\n" - ], - [ - 0.002634, - "make: *** [Makefile:83: kpod] Error 2\r\n" - ], - [ - 0.000711, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.019707, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m9s\u001b[39m\r\n" - ], - [ - 0.001352, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000124, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000112, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.6e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.8e-05, - "\u001b[?2004h" - ], - [ - 12.559376, - "make kpod" - ], - [ - 0.160079, - "\u001b[9Dvi cmd/kpod/rmi.go" - ], - [ - 0.376119, - "\\" - ], - [ - 0.719352, - "\u001b[?1l\u001b>" - ], - [ - 0.000252, - "\u001b[?2004l\r\r\n" - ], - [ - 0.000889, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J> \u001b[K" - ], - [ - 0.000257, - "\u001b[?1h\u001b=" - ], - [ - 9e-05, - "\u001b[?2004h" - ], - [ - 0.814396, - "\u001b[?2004l\r\r\n" - ], - [ - 0.002088, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.020752, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m24s\u001b[39m\r\n" - ], - [ - 0.001027, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000116, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.6e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000109, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.1e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.9e-05, - "\u001b[?2004h" - ], - [ - 0.191891, - "vi cmd/kpod/rmi.go\\" - ], - [ - 0.255869, - "\b \b" - ], - [ - 0.164227, - "\u001b[?1l\u001b>" - ], - [ - 0.000395, - "\u001b[?2004l\r\r\n" - ], - [ - 0.005085, - "\u001b]2;vim cmd/kpod/rmi.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.139286, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000664, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"cmd/kpod/rmi.go\" 123L, 3096C" - ], - [ - 0.007849, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.001971, - "\u001b[1;1H\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m rmiCommand = cli.Command{\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m\u001b[8CName:\u001b[8C\u001b[36m\"rmi\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m\u001b[8CUsage:\u001b[7C\u001b[36m\"removes one or more images from local storage\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m\u001b[8CDescription: rmiDescription,\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[8CAction: rmiCmd,\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m\u001b[8CArgsUsage: \u001b[36m\"IMAGE-NAME-OR-ID [...]\"\u001b[m\u001b[93m\u001b[107m,\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m\u001b[8CFlags:\u001b[7CrmiFlags,\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 28 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 29 \r\n 30 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m rmiCmd(c *cli.Context) \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 31 \r\n 32 \u001b[m\u001b[93m\u001b[107m force := \u001b[36mfalse\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m c.IsSet(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m\u001b[8Cforce = c.Bool(\u001b[36m\"force\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 36 \r\n 37 \u001b[m\u001b[93m\u001b[107" - ], - [ - 2e-05, - "m args := c.Args()\r\n\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(args) == \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Errorf(\u001b[36m\"image name or ID must be specified\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 41 \r\n 42 \u001b[m\u001b[93m\u001b[107m config, err := getConfig(c)\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Could not get config\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m store, err := getStore(config)\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 50 \r\n 51 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, id := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m args {\r\n\u001b[96m\u001b[47m 52 \u001b[m\u001b[93m\u001b[107m\u001b[8Cimage, err := libkpodimage.FindI" - ], - [ - 0.030785, - "mage(store, id)\r\n\u001b[96m\u001b[47m 53 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 54 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"could not get image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 55 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 56 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m image != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 57 \u001b[m\u001b[93m\u001b[107m\u001b[12CctrIDs, err := runningContainers(image, store)\r\n\u001b[96m\u001b[47m 58 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 59 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"error getting running containers for image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 60 \u001b[m\u001b[93m\u001b[107m\u001b[12C}\r\n\u001b[96m\u001b[47m 61 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(ctrIDs) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m && \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(image.Names) <= \u001b[36m1\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 62 \u001b[m\u001b[93m\u001b[107m\u001b[16C\u001b[32mif\u001b[m\u001b[93m\u001b[107m force {\r" - ], - [ - 3e-05, - "\n\u001b[96m\u001b[47m 63 \u001b[m\u001b[93m\u001b[107m\u001b[20CremoveContainers(ctrIDs, store)\r\n\u001b[96m\u001b[47m 64 \u001b[m\u001b[93m\u001b[107m\u001b[16C} \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 65 \u001b[m\u001b[93m\u001b[107m\u001b[20C\u001b[32mfor\u001b[m\u001b[93m\u001b[107m _ := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\r\n\u001b[96m\u001b[47m 66 \u001b[m\u001b[93m\u001b[107m\u001b[24C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m fmt.Errorf(\u001b[36m\"Could not remove image \u001b[m\u001b[93m\u001b[107m\u001b[31m%q\u001b[m\u001b[93m\u001b[107m\u001b[36m (must force) - one or more containers are using its reference image\"\u001b[m\u001b[93m\u001b[107m, id)\r\n\u001b[96m\u001b[47m 67 \u001b[m\u001b[93m\u001b[107m\u001b[20C}\r\n\u001b[96m\u001b[47m 68 \u001b[m\u001b[93m\u001b[107m\u001b[16C}\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mrmi.go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;220m\u001b[48;5;240m M \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;41H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                    " - ], - [ - 0.009791, - "                                                                                                  \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  53%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 65\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:21 \u001b[47;25H\u001b[?12l\u001b[?25h" - ], - [ - 3.2e-05, - "\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 0.549454, - "\u001b[?25l\u001b[51;210H2\u001b[47;26H\u001b[?12l\u001b[?25h" - ], - [ - 0.282227, - "\u001b[51;210H3\u001b[47;27H" - ], - [ - 0.500981, - "\u001b[51;210H4\u001b[47;28H" - ], - [ - 0.028181, - "\u001b[51;210H5\u001b[47;29H" - ], - [ - 0.120266, - "\u001b[51;210H6\u001b[47;30H" - ], - [ - 0.176334, - "\u001b[51;210H7\u001b[47;31H" - ], - [ - 0.332685, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m= \u001b[32mrange\u001b[m\u001b[93m\u001b[107m ctrIDs {\u001b[47;47H\u001b[K\u001b[51;39H\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;43H \u001b[47;31H\u001b[?12l\u001b[?25h" - ], - [ - 0.457865, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K\u001b[52;1H:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.16749, - "w\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.072064, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.096121, - "\r" - ], - [ - 0.000123, - "\u001b[?25l\u001b[?2004l" - ], - [ - 0.01842, - "\"cmd/kpod/rmi.go\"" - ], - [ - 0.012505, - " 123L, 3095C written" - ], - [ - 0.016238, - "\r\r\r\n\u001b[39;49m\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.002512, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.022671, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001565, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000124, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000106, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.2e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.7e-05, - "\u001b[?2004h" - ], - [ - 0.334146, - "vi cmd/kpod/rmi.go" - ], - [ - 0.182947, - "\\" - ], - [ - 0.599976, - "\u001b[?1l\u001b>" - ], - [ - 5.2e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.000542, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J> \u001b[K" - ], - [ - 0.000169, - "\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 1.143722, - "\u001b[?2004l\r\r\n" - ], - [ - 0.001295, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.028119, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m6s\u001b[39m\r\n" - ], - [ - 0.00136, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000114, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.7e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000126, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.9e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.2e-05, - "\u001b[?2004h" - ], - [ - 0.193141, - "vi cmd/kpod/rmi.go\\" - ], - [ - 0.159626, - "\b \b" - ], - [ - 0.18457, - "\\" - ], - [ - 0.29669, - "\u001b[19Dmake kpod \u001b[10D" - ], - [ - 0.863034, - "\u001b[?1l\u001b>" - ], - [ - 0.000232, - "\u001b[?2004l\r\r\n" - ], - [ - 0.004368, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 6.733531, - "go build -ldflags '-X main.gitCommit=6ca462a3 -X main.buildInfo=1502976943' -tags \"selinux seccomp \" -o kpod github.com/kubernetes-incubator/cri-o/cmd/kpod\r\n" - ], - [ - 4.55149, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.018009, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m12s\u001b[39m\r\n" - ], - [ - 0.001122, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000105, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8.3e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.1e-05, - "\u001b[?1h\u001b=" - ], - [ - 2e-05, - "\u001b[?2004h" - ], - [ - 20.811668, - "m" - ], - [ - 0.119573, - "\bma" - ], - [ - 0.111827, - "k" - ], - [ - 0.080357, - "e" - ], - [ - 0.103149, - " " - ], - [ - 0.084516, - "k" - ], - [ - 0.143968, - "p" - ], - [ - 0.079279, - "o" - ], - [ - 0.088902, - "d" - ], - [ - 0.135804, - "\u001b[?1l\u001b>" - ], - [ - 0.000119, - "\u001b[?2004l" - ], - [ - 0.000522, - "\r\r\n" - ], - [ - 0.003588, - "\u001b]2;make kpod\u0007\u001b]1;make\u0007" - ], - [ - 6.696294, - "make: 'kpod' is up to date.\r\n" - ], - [ - 0.000283, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.020139, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m7s\u001b[39m\r\n" - ], - [ - 0.001161, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 9e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.8e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000108, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.5e-05, - "\u001b[?1h\u001b=" - ], - [ - 7.7e-05, - "\u001b[?2004h" - ], - [ - 243.314771, - "s" - ], - [ - 0.111394, - "\bsu" - ], - [ - 0.096183, - "d" - ], - [ - 0.080041, - "o" - ], - [ - 0.128398, - " " - ], - [ - 0.119735, - "m" - ], - [ - 0.095922, - "a" - ], - [ - 0.088141, - "k" - ], - [ - 0.072233, - "e" - ], - [ - 0.095742, - " " - ], - [ - 0.087941, - "i" - ], - [ - 0.104278, - "n" - ], - [ - 0.032039, - "s" - ], - [ - 0.118983, - "t" - ], - [ - 0.072644, - "a" - ], - [ - 0.143994, - "l" - ], - [ - 0.168065, - "l" - ], - [ - 0.752715, - "\u001b[?1l\u001b>\u001b[?2004l\r\r\n" - ], - [ - 0.009834, - "\u001b]2;sudo make install\u0007\u001b]1;make\u0007" - ], - [ - 0.955201, - "[sudo] password for ryan: " - ], - [ - 20.245864, - "\r\n" - ], - [ - 0.045577, - "mkdir -p \"/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/_output/src/github.com/kubernetes-incubator\"\r\n" - ], - [ - 0.001888, - "ln -s \"/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\" \"/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/_output/src/github.com/kubernetes-incubator\"\r\n" - ], - [ - 0.001297, - "touch \"/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/_output/.gopathok\"\r\n" - ], - [ - 0.001197, - "install -D -m 755 crio /usr/local/bin/crio\r\n" - ], - [ - 0.000745, - "install: cannot stat 'crio': No such file or directory\r\n" - ], - [ - 0.000258, - "make: *** [Makefile:133: install] Error 1\r\n" - ], - [ - 0.002376, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.025148, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m21s\u001b[39m\r\n" - ], - [ - 0.001447, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000116, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 3.1e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000295, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[31m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 8.2e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.9e-05, - "\u001b[?2004h" - ], - [ - 16.688284, - "s" - ], - [ - 0.136222, - "\bsu" - ], - [ - 0.119402, - "d" - ], - [ - 0.080011, - "o" - ], - [ - 0.135772, - " " - ], - [ - 0.088659, - "m" - ], - [ - 0.112281, - "a" - ], - [ - 0.118598, - "k" - ], - [ - 0.120387, - "e" - ], - [ - 0.104104, - " " - ], - [ - 0.248577, - "u" - ], - [ - 0.215792, - "n" - ], - [ - 0.140158, - "i" - ], - [ - 0.067519, - "n" - ], - [ - 0.071505, - "s" - ], - [ - 0.104231, - "t" - ], - [ - 0.08841, - "a" - ], - [ - 0.120753, - "l" - ], - [ - 0.111041, - "l" - ], - [ - 0.188244, - "\u001b[?1l\u001b>" - ], - [ - 0.000191, - "\u001b[?2004l\r\r\n" - ], - [ - 0.008904, - "\u001b]2;sudo make uninstall\u0007\u001b]1;make\u0007" - ], - [ - 2.992644, - "rm -f /usr/local/bin/crio\r\n" - ], - [ - 0.008835, - "rm -f /usr/local/bin/crioctl\r\n" - ], - [ - 0.006105, - "rm -f /usr/local/libexec/crio/conmon\r\n" - ], - [ - 0.001162, - "rm -f /usr/local/libexec/crio/pause" - ], - [ - 0.000104, - "\r\n" - ], - [ - 0.000807, - "for i in docs/kpod-diff.1 docs/kpod-push.1 docs/kpod.1 docs/kpod-export.1 docs/kpod-load.1 docs/kpod-images.1 docs/kpod-umount.1 docs/kpod-save.1 docs/kpod-cp.1 docs/kpod-info.1 docs/kpod-mount.1 docs/kpod-inspect.1 docs/kpod-logs.1 docs/kpod-history.1 docs/kpod-pull.1 docs/kpod-rmi.1 docs/kpod-version.1 docs/kpod-tag.1; do \\\r\n\trm -f /usr/local/share/man/man8/$(basename ${i}); \\\r\ndone\r\n" - ], - [ - 0.038238, - "for i in docs/crio.conf.5; do \\\r\n\trm -f /usr/local/share/man/man5/$(basename ${i}); \\\r\ndone\r\n" - ], - [ - 0.003095, - "for i in docs/crio.8; do \\\r\n\trm -f /usr/local/share/man/man8/$(basename ${i}); \\\r\ndone\r\n" - ], - [ - 0.006486, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.024997, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001214, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 8.9e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.5e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 8e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.1e-05, - "\u001b[?2004h" - ], - [ - 0.734872, - "m" - ], - [ - 0.09234, - "\bma" - ], - [ - 0.115606, - "k" - ], - [ - 0.060071, - "e" - ], - [ - 0.080073, - " " - ], - [ - 0.10406, - "a" - ], - [ - 0.055694, - "l" - ], - [ - 0.108501, - "l" - ], - [ - 0.199594, - "\u001b[?1l\u001b>" - ], - [ - 6.5e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.00357, - "\u001b]2;make all\u0007" - ], - [ - 7.2e-05, - "\u001b]1;make\u0007" - ], - [ - 7.412593, - "go build -ldflags '-X main.gitCommit=6ca462a3 -X main.buildInfo=1502977274' -tags \"selinux seccomp \" -o crio github.com/kubernetes-incubator/cri-o/cmd/crio\r\n" - ], - [ - 6.928493, - "go build -ldflags '-X main.gitCommit=6ca462a3 -X main.buildInfo=1502977274' -tags \"selinux seccomp \" -o crioctl github.com/kubernetes-incubator/cri-o/cmd/crioctl\r\n" - ], - [ - 1.93333, - "make -C conmon\r\n" - ], - [ - 0.004362, - "make[1]: Entering directory '/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/conmon'\r\n" - ], - [ - 0.000772, - "cc -std=c99 -Os -Wall -Wextra -I/usr/include/glib-2.0 -I/usr/lib64/glib-2.0/include -c -o conmon.o conmon.c\r\n" - ], - [ - 0.314835, - "cc -std=c99 -Os -Wall -Wextra -I/usr/include/glib-2.0 -I/usr/lib64/glib-2.0/include -c -o cmsg.o cmsg.c\r\n" - ], - [ - 0.040765, - "cc -o conmon conmon.o cmsg.o -std=c99 -Os -Wall -Wextra -I/usr/include/glib-2.0 -I/usr/lib64/glib-2.0/include -lglib-2.0 \r\n" - ], - [ - 0.01855, - "make[1]: Leaving directory '/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/conmon'\r\n" - ], - [ - 0.000285, - "make -C pause\r\n" - ], - [ - 0.00317, - "make[1]: Entering directory '/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/pause'\r\ncc -std=c99 -Os -Wall -Wextra -static -c -o pause.o pause.c\r\n" - ], - [ - 0.030319, - "cc -o pause pause.o -std=c99 -Os -Wall -Wextra -static \r\n" - ], - [ - 0.090802, - "strip pause\r\n" - ], - [ - 0.004286, - "make[1]: Leaving directory '/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o/pause'\r\n" - ], - [ - 0.068555, - "go build -ldflags '-X main.gitCommit=6ca462a3 -X main.buildInfo=1502977274' -tags \"selinux seccomp \" -o test/bin2img/bin2img github.com/kubernetes-incubator/cri-o/test/bin2img\r\n" - ], - [ - 2.210776, - "go build -ldflags '-X main.gitCommit=6ca462a3 -X main.buildInfo=1502977274' -tags \"selinux seccomp \" -o test/copyimg/copyimg github.com/kubernetes-incubator/cri-o/test/copyimg\r\n" - ], - [ - 2.412996, - "go build -ldflags '-X main.gitCommit=6ca462a3 -X main.buildInfo=1502977274' -tags \"selinux seccomp \" -o test/checkseccomp/checkseccomp github.com/kubernetes-incubator/cri-o/test/checkseccomp" - ], - [ - 6.7e-05, - "\r\n" - ], - [ - 0.162664, - "./crio --config=\"\" config --default > crio.conf\r\n" - ], - [ - 0.091642, - "(go-md2man -in docs/kpod-diff.1.md -out docs/kpod-diff.1.tmp && touch docs/kpod-diff.1.tmp && mv docs/kpod-diff.1.tmp docs/kpod-diff.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-diff.1.md -out docs/kpod-diff.1.tmp && touch docs/kpod-diff.1.tmp && mv docs/kpod-diff.1.tmp docs/kpod-diff.1)\r\n" - ], - [ - 0.006608, - "(go-md2man -in docs/kpod-push.1.md -out docs/kpod-push.1.tmp && touch docs/kpod-push.1.tmp && mv docs/kpod-push.1.tmp docs/kpod-push.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-push.1.md -out docs/kpod-push.1.tmp && touch docs/kpod-push.1.tmp && mv docs/kpod-push.1.tmp docs/kpod-push.1)\r\n" - ], - [ - 0.008204, - "(go-md2man -in docs/crio.conf.5.md -out docs/crio.conf.5.tmp && touch docs/crio.conf.5.tmp && mv docs/crio.conf.5.tmp docs/crio.conf.5) || (/home/ryan/Development/Go/bin/go-md2man -in docs/crio.conf.5.md -out docs/crio.conf.5.tmp && touch docs/crio.conf.5.tmp && mv docs/crio.conf.5.tmp docs/crio.conf.5)\r\n" - ], - [ - 0.00881, - "(go-md2man -in docs/kpod.1.md -out docs/kpod.1.tmp && touch docs/kpod.1.tmp && mv docs/kpod.1.tmp docs/kpod.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod.1.md -out docs/kpod.1.tmp && touch docs/kpod.1.tmp && mv docs/kpod.1.tmp docs/kpod.1)\r\n" - ], - [ - 0.008192, - "(go-md2man -in docs/kpod-export.1.md -out docs/kpod-export.1.tmp && touch docs/kpod-export.1.tmp && mv docs/kpod-export.1.tmp docs/kpod-export.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-export.1.md -out docs/kpod-export.1.tmp && touch docs/kpod-export.1.tmp && mv docs/kpod-export.1.tmp docs/kpod-export.1)\r\n" - ], - [ - 0.0109, - "(go-md2man -in docs/kpod-load.1.md -out docs/kpod-load.1.tmp && touch docs/kpod-load.1.tmp && mv docs/kpod-load.1.tmp docs/kpod-load.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-load.1.md -out docs/kpod-load.1.tmp && touch docs/kpod-load.1.tmp && mv docs/kpod-load.1.tmp docs/kpod-load.1)\r\n" - ], - [ - 0.009389, - "(go-md2man -in docs/kpod-images.1.md -out docs/kpod-images.1.tmp && touch docs/kpod-images.1.tmp && mv docs/kpod-images.1.tmp docs/kpod-images.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-images.1.md -out docs/kpod-images.1.tmp && touch docs/kpod-images.1.tmp && mv docs/kpod-images.1.tmp docs/kpod-images.1)\r\n" - ], - [ - 0.005353, - "(go-md2man -in docs/kpod-umount.1.md -out docs/kpod-umount.1.tmp && touch docs/kpod-umount.1.tmp && mv docs/kpod-umount.1.tmp docs/kpod-umount.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-umount.1.md -out docs/kpod-umount.1.tmp && touch docs/kpod-umount.1.tmp && mv docs/kpod-umount.1.tmp docs/kpod-umount.1)\r\n" - ], - [ - 0.008716, - "(go-md2man -in docs/crio.8.md -out docs/crio.8.tmp && touch docs/crio.8.tmp && mv docs/crio.8.tmp docs/crio.8) || (/home/ryan/Development/Go/bin/go-md2man -in docs/crio.8.md -out docs/crio.8.tmp && touch docs/crio.8.tmp && mv docs/crio.8.tmp docs/crio.8)\r\n" - ], - [ - 0.010697, - "(go-md2man -in docs/kpod-save.1.md -out docs/kpod-save.1.tmp && touch docs/kpod-save.1.tmp && mv docs/kpod-save.1.tmp docs/kpod-save.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-save.1.md -out docs/kpod-save.1.tmp && touch docs/kpod-save.1.tmp && mv docs/kpod-save.1.tmp docs/kpod-save.1)\r\n" - ], - [ - 0.007561, - "(go-md2man -in docs/kpod-cp.1.md -out docs/kpod-cp.1.tmp && touch docs/kpod-cp.1.tmp && mv docs/kpod-cp.1.tmp docs/kpod-cp.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-cp.1.md -out docs/kpod-cp.1.tmp && touch docs/kpod-cp.1.tmp && mv docs/kpod-cp.1.tmp docs/kpod-cp.1)\r\n" - ], - [ - 0.005973, - "(go-md2man -in docs/kpod-info.1.md -out docs/kpod-info.1.tmp && touch docs/kpod-info.1.tmp && mv docs/kpod-info.1.tmp docs/kpod-info.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-info.1.md -out docs/kpod-info.1.tmp && touch docs/kpod-info.1.tmp && mv docs/kpod-info.1.tmp docs/kpod-info.1)\r\n" - ], - [ - 0.005551, - "(go-md2man -in docs/kpod-mount.1.md -out docs/kpod-mount.1.tmp && touch docs/kpod-mount.1.tmp && mv docs/kpod-mount.1.tmp docs/kpod-mount.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-mount.1.md -out docs/kpod-mount.1.tmp && touch docs/kpod-mount.1.tmp && mv docs/kpod-mount.1.tmp docs/kpod-mount.1)\r\n" - ], - [ - 0.009121, - "(go-md2man -in docs/kpod-inspect.1.md -out docs/kpod-inspect.1.tmp && touch docs/kpod-inspect.1.tmp && mv docs/kpod-inspect.1.tmp docs/kpod-inspect.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-inspect.1.md -out docs/kpod-inspect.1.tmp && touch docs/kpod-inspect.1.tmp && mv docs/kpod-inspect.1.tmp docs/kpod-inspect.1)\r\n" - ], - [ - 0.006826, - "(go-md2man -in docs/kpod-logs.1.md -out docs/kpod-logs.1.tmp && touch docs/kpod-logs.1.tmp && mv docs/kpod-logs.1.tmp docs/kpod-logs.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-logs.1.md -out docs/kpod-logs.1.tmp && touch docs/kpod-logs.1.tmp && mv docs/kpod-logs.1.tmp docs/kpod-logs.1)\r\n" - ], - [ - 0.007772, - "(go-md2man -in docs/kpod-history.1.md -out docs/kpod-history.1.tmp && touch docs/kpod-history.1.tmp && mv docs/kpod-history.1.tmp docs/kpod-history.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-history.1.md -out docs/kpod-history.1.tmp && touch docs/kpod-history.1.tmp && mv docs/kpod-history.1.tmp docs/kpod-history.1)\r\n" - ], - [ - 0.005003, - "(go-md2man -in docs/kpod-pull.1.md -out docs/kpod-pull.1.tmp && touch docs/kpod-pull.1.tmp && mv docs/kpod-pull.1.tmp docs/kpod-pull.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-pull.1.md -out docs/kpod-pull.1.tmp && touch docs/kpod-pull.1.tmp && mv docs/kpod-pull.1.tmp docs/kpod-pull.1)\r\n" - ], - [ - 0.010404, - "(go-md2man -in docs/kpod-rmi.1.md -out docs/kpod-rmi.1.tmp && touch docs/kpod-rmi.1.tmp && mv docs/kpod-rmi.1.tmp docs/kpod-rmi.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-rmi.1.md -out docs/kpod-rmi.1.tmp && touch docs/kpod-rmi.1.tmp && mv docs/kpod-rmi.1.tmp docs/kpod-rmi.1)\r\n" - ], - [ - 0.007878, - "(go-md2man -in docs/kpod-version.1.md -out docs/kpod-version.1.tmp && touch docs/kpod-version.1.tmp && mv docs/kpod-version.1.tmp docs/kpod-version.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-version.1.md -out docs/kpod-version.1.tmp && touch docs/kpod-version.1.tmp && mv docs/kpod-version.1.tmp docs/kpod-version.1)\r\n" - ], - [ - 0.006074, - "(go-md2man -in docs/kpod-tag.1.md -out docs/kpod-tag.1.tmp && touch docs/kpod-tag.1.tmp && mv docs/kpod-tag.1.tmp docs/kpod-tag.1) || (/home/ryan/Development/Go/bin/go-md2man -in docs/kpod-tag.1.md -out docs/kpod-tag.1.tmp && touch docs/kpod-tag.1.tmp && mv docs/kpod-tag.1.tmp docs/kpod-tag.1)\r\n" - ], - [ - 0.007358, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.021811, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m22s\u001b[39m\r\n" - ], - [ - 0.001102, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000118, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 1.5e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 6.9e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000142, - "\u001b[?1h\u001b=" - ], - [ - 2.9e-05, - "\u001b[?2004h" - ], - [ - 22.687022, - "s" - ], - [ - 0.176986, - "\bsu" - ], - [ - 0.231668, - "d" - ], - [ - 0.167315, - "o" - ], - [ - 0.224159, - " " - ], - [ - 0.184546, - "m" - ], - [ - 0.111947, - "a" - ], - [ - 0.127867, - "k" - ], - [ - 0.07167, - "e" - ], - [ - 0.095736, - " " - ], - [ - 0.072818, - "i" - ], - [ - 0.071884, - "n" - ], - [ - 0.07219, - "s" - ], - [ - 0.088277, - "t" - ], - [ - 0.071516, - "a" - ], - [ - 0.14414, - "l" - ], - [ - 0.119615, - "l" - ], - [ - 0.239306, - "\u001b[?1l\u001b>" - ], - [ - 7.7e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.001719, - "\u001b]2;sudo make install\u0007\u001b]1;make\u0007" - ], - [ - 3.018747, - "install -D -m 755 crio /usr/local/bin/crio\r\n" - ], - [ - 0.042567, - "install -D -m 755 crioctl /usr/local/bin/crioctl\r\n" - ], - [ - 0.026457, - "install -D -m 755 kpod /usr/local/bin/kpod" - ], - [ - 6.3e-05, - "\r\n" - ], - [ - 0.040019, - "install -D -m 755 conmon/conmon /usr/local/libexec/crio/conmon\r\n" - ], - [ - 0.003162, - "install -D -m 755 pause/pause /usr/local/libexec/crio/pause\r\n" - ], - [ - 0.003708, - "install -d -m 755 /usr/local/share/man/man1\r\n" - ], - [ - 0.00089, - "install -d -m 755 /usr/local/share/man/man5\r\n" - ], - [ - 0.000625, - "install -d -m 755 /usr/local/share/man/man8\r\n" - ], - [ - 0.000846, - "install -m 644 docs/kpod-diff.1 docs/kpod-push.1 docs/kpod-cp.1 docs/kpod.1 docs/kpod-export.1 docs/kpod-load.1 docs/kpod-logs.1 docs/kpod-images.1 docs/kpod-umount.1 docs/kpod-save.1 docs/kpod-mount.1 docs/kpod-info.1 docs/kpod-inspect.1 docs/kpod-history.1 docs/kpod-pull.1 docs/kpod-rmi.1 docs/kpod-version.1 docs/kpod-tag.1 -t /usr/local/share/man/man1\r\n" - ], - [ - 0.009723, - "install -m 644 docs/crio.conf.5 -t /usr/local/share/man/man5\r\n" - ], - [ - 0.002898, - "install -m 644 docs/crio.8 -t /usr/local/share/man/man8\r\n" - ], - [ - 0.006399, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.021303, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master* \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001274, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.0001, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 7.9e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 7e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.4e-05, - "\u001b[?1h\u001b=" - ], - [ - 3.4e-05, - "\u001b[?2004h" - ], - [ - 255.497514, - "v" - ], - [ - 0.111133, - "\bvi" - ], - [ - 0.096261, - " " - ], - [ - 0.055637, - "c" - ], - [ - 0.128288, - "m" - ], - [ - 0.295825, - "\b \b" - ], - [ - 0.499642, - "\b \b" - ], - [ - 0.03181, - "\b" - ], - [ - 0.030112, - "\b\bv \b" - ], - [ - 0.029886, - "\b \b" - ], - [ - 2470.181351, - "g" - ], - [ - 0.134982, - "\bgi" - ], - [ - 0.116687, - "t" - ], - [ - 0.248596, - " " - ], - [ - 0.091959, - "c" - ], - [ - 0.221434, - "h" - ], - [ - 0.511729, - "\b \b" - ], - [ - 0.149467, - "\b \b" - ], - [ - 0.180553, - "r" - ], - [ - 0.09336, - "e" - ], - [ - 0.118113, - "s" - ], - [ - 0.166341, - "e" - ], - [ - 0.101897, - "t" - ], - [ - 0.135639, - " " - ], - [ - 0.101112, - "-" - ], - [ - 0.133755, - "-" - ], - [ - 0.204129, - "h" - ], - [ - 0.087003, - "a" - ], - [ - 0.070922, - "r" - ], - [ - 0.107809, - "d" - ], - [ - 0.081233, - " " - ], - [ - 0.210727, - "H" - ], - [ - 0.08464, - "E" - ], - [ - 0.340797, - "AD\u001b[1m \u001b[0m" - ], - [ - 0.359977, - "\b\u001b[0m \b" - ], - [ - 0.000209, - "\u001b[?1l\u001b>" - ], - [ - 3.7e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.00869, - "\u001b]2;git reset --hard HEAD\u0007\u001b]1;git\u0007" - ], - [ - 0.033935, - "HEAD is now at 6ca462a3 Merge pull request #718 from 14rcole/kpod-logs\r\n" - ], - [ - 0.000491, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.039313, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.00621, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000838, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.401122, - "g" - ], - [ - 0.143266, - "\bgi" - ], - [ - 0.141142, - "t" - ], - [ - 0.13335, - " " - ], - [ - 0.080629, - "p" - ], - [ - 0.068567, - "u" - ], - [ - 0.139439, - "s" - ], - [ - 0.111629, - "h" - ], - [ - 0.103453, - " " - ], - [ - 0.140922, - "o" - ], - [ - 0.09323, - "r" - ], - [ - 0.126723, - "i" - ], - [ - 0.116153, - "g" - ], - [ - 0.103085, - "i" - ], - [ - 0.110423, - "n" - ], - [ - 0.047839, - " " - ], - [ - 0.211066, - "m" - ], - [ - 0.140391, - "a" - ], - [ - 0.493452, - "s" - ], - [ - 0.228966, - "ter\u001b[1m \u001b[0m" - ], - [ - 0.433861, - "\b\u001b[0m \b" - ], - [ - 0.000112, - "\u001b[?1l\u001b>" - ], - [ - 3.1e-05, - "\u001b[?2004l" - ], - [ - 0.001239, - "\r\r\n" - ], - [ - 0.004134, - "\u001b]2;git push origin master\u0007\u001b]1;git\u0007" - ], - [ - 0.74689, - "Total 0 (delta 0), reused 0 (delta 0)\r\n" - ], - [ - 1.494101, - "To github.com:14rcole/cri-o\r\n a69631c1..6ca462a3 master -> master\r\n" - ], - [ - 0.000159, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.046807, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001056, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000155, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 4e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 9.5e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 7.1e-05, - "\u001b[?1h\u001b=" - ], - [ - 4.1e-05, - "\u001b[?2004h" - ], - [ - 7.372135, - "g" - ], - [ - 0.126302, - "\bgi" - ], - [ - 0.101424, - "t" - ], - [ - 0.42971, - "n" - ], - [ - 1.654559, - "\b \b" - ], - [ - 0.292926, - " " - ], - [ - 0.227087, - "c" - ], - [ - 0.127935, - "h" - ], - [ - 0.100042, - "e" - ], - [ - 0.081115, - "c" - ], - [ - 0.195682, - "k" - ], - [ - 0.505623, - "o" - ], - [ - 0.138216, - "ut" - ], - [ - 0.45806, - " " - ], - [ - 0.100148, - "m" - ], - [ - 0.335248, - "\b \b" - ], - [ - 0.188284, - "k" - ], - [ - 0.140606, - "pod-" - ], - [ - 0.287649, - "t" - ], - [ - 0.053698, - "e" - ], - [ - 0.304664, - "st-refactor\u001b[1m \u001b[0m" - ], - [ - 0.651671, - "\b\u001b[0m \b" - ], - [ - 9.4e-05, - "\u001b[?1l\u001b>" - ], - [ - 6.3e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.002007, - "\u001b]2;git checkout kpod-test-refactor\u0007\u001b]1;git\u0007" - ], - [ - 0.048356, - "Switched to branch 'kpod-test-refactor'\r\n" - ], - [ - 0.001669, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.055687, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-test-refactor \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.000911, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 6.7e-05, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.2e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000112, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.9e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.1e-05, - "\u001b[?2004h" - ], - [ - 101.519318, - "v" - ], - [ - 0.095071, - "\bvi" - ], - [ - 0.079201, - " " - ], - [ - 1.258215, - "c" - ], - [ - 0.126693, - "m" - ], - [ - 0.116773, - "d\u001b[1m/\u001b[0m" - ], - [ - 0.846315, - "\b\u001b[0m/t" - ], - [ - 0.047782, - "e" - ], - [ - 0.357957, - "\b \b" - ], - [ - 0.134026, - "\b \b" - ], - [ - 0.180238, - "k" - ], - [ - 0.084009, - "pod\u001b[1m/\u001b[0m" - ], - [ - 1.056429, - "\b\u001b[0m \b" - ], - [ - 0.500154, - "\b \b" - ], - [ - 0.0301, - "\b \b" - ], - [ - 0.031128, - "\b \b" - ], - [ - 0.030599, - "\b \b" - ], - [ - 0.029396, - "\b \b" - ], - [ - 0.031795, - "\b \b" - ], - [ - 0.120498, - "\b \b" - ], - [ - 0.173821, - "\b \b" - ], - [ - 0.155303, - "t" - ], - [ - 0.046457, - "e" - ], - [ - 0.208794, - "st\u001b[1m/\u001b[0m" - ], - [ - 0.115379, - "\b\u001b[0m/k" - ], - [ - 0.204258, - "pod_" - ], - [ - 0.686373, - "p" - ], - [ - 0.067817, - "s" - ], - [ - 0.535881, - "\b \b" - ], - [ - 0.187935, - "u" - ], - [ - 0.08672, - "\u0007" - ], - [ - 0.000211, - "\r\r\n" - ], - [ - 6.1e-05, - "\u001b[J\u001b[0mkpod_pull.bats \u001b[Jkpod_push.bats\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\u001b[39m\r\u001b[2Cvi test/kpod_pu\u001b[K\u001b[193C\u001b[90m\u001b[39m\u001b[39m\u001b[193D" - ], - [ - 0.347316, - "s" - ], - [ - 0.191815, - "h.bats\u001b[1m \u001b[0m" - ], - [ - 0.247116, - "\b\u001b[0m \b\u001b[?1l\u001b>" - ], - [ - 0.001036, - "\u001b[?2004l\r\r\n\u001b[J" - ], - [ - 0.001089, - "\u001b]2;vim test/kpod_push.bats\u0007" - ], - [ - 4.6e-05, - "\u001b]1;vi\u0007" - ], - [ - 0.251946, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.001597, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"test/kpod_push.bats\"" - ], - [ - 0.000102, - " 87L, 2372C" - ], - [ - 0.003896, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.000853, - "\u001b[1;1H\u001b[96m\u001b[47m 1 \u001b[m\u001b[93m\u001b[107m\u001b[96m#!/usr/bin/env bats\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 2 \r\n 3 \u001b[m\u001b[93m\u001b[107mload helpers\r\n\u001b[96m\u001b[47m 4 \r\n 5 \u001b[m\u001b[93m\u001b[107mIMAGE=\u001b[36m\"alpine:latest\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 6 \u001b[m\u001b[93m\u001b[107mROOT=\u001b[36m\"$TESTDIR/crio\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 7 \u001b[m\u001b[93m\u001b[107mRUNROOT=\u001b[36m\"$TESTDIR/crio-run\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 8 \u001b[m\u001b[93m\u001b[107mKPOD_OPTIONS=\u001b[36m\"--root $ROOT --runroot $RUNROOT $STORAGE_OPTS\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 9 \r\n 10 \u001b[m\u001b[93m\u001b[107mfunction teardown() {\r\n\u001b[96m\u001b[47m 11 \u001b[m\u001b[93m\u001b[107m cleanup_test\r\n\u001b[96m\u001b[47m 12 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 13 \r\n 14 \u001b[m\u001b[93m\u001b[107m@test \u001b[36m\"kpod push to containers/storage\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 15 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 16 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 17 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 18 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m contai" - ], - [ - 3e-05, - "ners-storage:[$ROOT]busybox:test\r\n\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 22 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 23 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m stop_crio\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 27 \r\n 28 \u001b[m\u001b[93m\u001b[107m@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 29 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 30 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 31 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 32 \u001b[m\u001b[93m\u001b[107m run mkdir /tmp/busybox\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m echo " - ], - [ - 0.059183, - "\u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\r\n\u001b[96m\u001b[47m 36 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 37 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 39 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 40 \u001b[m\u001b[93m\u001b[107m rm -rf /tmp/busybox\r\n\u001b[96m\u001b[47m 41 \u001b[m\u001b[93m\u001b[107m stop_crio\r\n\u001b[96m\u001b[47m 42 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 43 \r\n 44 \u001b[m\u001b[93m\u001b[107m@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m run ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m run ${KP" - ], - [ - 4.1e-05, - "OD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 50 \u001b[m\u001b[93m\u001b[107m [ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H kpod-test-refactor \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;33H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mtest/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mkpod_push.bats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;55H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                                                                                                      \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;180H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m" - ], - [ - 0.013876, - "\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;188H\u001b[38;5;247m\u001b[48;5;236m conf\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  26%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 23\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5  \u001b[23;9H\u001b[?12l\u001b[?25h\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 4.816904, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[24;9H\u001b[1m\u001b[31m\u001b[106m[\u001b[17C]\u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  28%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[24;9H\u001b[?12l\u001b[?25h" - ], - [ - 4.011791, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.843663, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m[ \u001b[16C]\u001b[51;195H\u001b[38;5;107m\u001b[48;5;240m  26%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[23;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.530257, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\b\b\b\u001b[7m\u001b[96m\u001b[107m \u001b[m\u001b[93m\u001b[107mr\u001b[7m\u001b[96m\u001b[107mun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[96m\u001b[107m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[96m\u001b[107m " - ], - [ - 0.005375, - "\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[34m-- VISUAL LINE --\u001b[m\u001b[93m\u001b[107m\u001b[52;18H\u001b[K\u001b[51;1H\u001b[1m\u001b[38;5;94m\u001b[48;5;214m V·\u001b[51;4HLINE \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;214m\u001b[48;5;94m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;214m\u001b[48;5;94m⇕1 \u001b[m\u001b[93m\u001b[107m\u001b[38;5;94m\u001b[48;5;240m\u001b[51;15H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;17H kpod-test-refactor \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;38H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mtest/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mp\bkpod_push.bats \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;60H \u001b[23;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.187201, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[96m\u001b[107mr\u001b[24;5H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[31m\u001b[106m[\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[96m\u001b[107m \u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[96m\u001b[107m\"$status\"\u001b[m\u001b[93m\u001b[107m\u001b[7m\u001b[96m\u001b[107m -eq 0 ] \u001b[m\u001b[93m\u001b[107m\u001b[51;12H\u001b[1m\u001b[38;5;214m\u001b[48;5;94m2 \u001b[m\u001b[93m\u001b[107m\u001b[181C\u001b[38;5;107m\u001b[48;5;240m  28%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[24;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.311219, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[23;5H stop_crio\u001b[23;18H\u001b[K\u001b[24;5H}\u001b[24;6H\u001b[K\u001b[25;9H\u001b[K\u001b[26;5H@test \u001b[36m\"kpod push to directory\"\u001b[m\u001b[93m\u001b[107m {\u001b[27;9Hrun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[28;5H echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[28;23H\u001b[K\u001b[29;9H[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[29;28H\u001b[K\u001b[30;9Hrun mkdir /tmp/busybox\u001b[31;9Hecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[31;23H\u001b[K\u001b[32;9H[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[32;28H\u001b[K\u001b[33;9Hrun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m dir:/tmp/busybox\u001b[34;9Hecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[34;23H\u001b[K\u001b[35;9H[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[35;28H\u001b[K\u001b[36;9Hrun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[38;10Hm -rf /tmp/busybox\u001b[38;28H\u001b[K\u001b[39;9Hstop_crio\u001b[39;18H\u001b[K\u001b[40;5H}\u001b[40;9H\u001b[K\u001b[41;9H\u001b[K\u001b[42;5H@test \u001b[36m\"kpod push to docker archive\"\u001b[m\u001b[93m\u001b[107m {\u001b[43;9Hrun ${KPOD_BINARY} $KPOD_OPTIONS pull \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\u001b[44;5H echo \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[44;23H\u001b[K\u001b[45;9H[ \u001b[36m\"$sta" - ], - [ - 6.1e-05, - "tus\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[45;28H\u001b[K\u001b[46;9Hrun ${KPOD_BINARY} $KPOD_OPTIONS push \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m docker-archive:/tmp/busybox-archive:1.26\u001b[47;9Hecho \u001b[36m\"$output\"\u001b[m\u001b[93m\u001b[107m\u001b[47;23H\u001b[K\u001b[48;9H[ \u001b[36m\"$status\"\u001b[m\u001b[93m\u001b[107m -eq 0 ]\u001b[48;28H\u001b[K\u001b[49;9Hrm /tmp/busybox-archive\u001b[50;9Hrun ${KPOD_BINARY} $KPOD_OPTIONS rmi \u001b[36m\"$IMAGE\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;214m\u001b[48;5;94m\u001b[m\u001b[93m\u001b[107m\u001b[51;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H kpod-test-refactor \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;33H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mtest/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mkpod_push.bats.\u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[38;5;220m\u001b[48;5;240m + \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mt\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;240m\u001b[48;5;236m\u001b[51;57H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;231m\u001b[48;5;236m   \u001b[m\u001b[93m\u001b[107m\u001b[134C\u001b[38;5;107m\u001b[48;5;240m  27%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m" - ], - [ - 0.003283, - "\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\u001b[K\u001b[23;9H\u001b[?12l\u001b[?25h" - ], - [ - 4.012199, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 2.525173, - "\u001b[?25l\u001b[52;1H:" - ], - [ - 3.9e-05, - "\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.585401, - "w\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.118422, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 8.69039, - "\r\u001b[?25l\u001b[?2004l\"test/kpod_push.bats\"" - ], - [ - 0.005802, - " 85L, 2298C written" - ], - [ - 0.01652, - "\r\r\r\n\u001b[39;49m" - ], - [ - 0.000114, - "\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.00281, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.044965, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-test-refactor* \u001b[39m \u001b[33m27s\u001b[39m\r\n" - ], - [ - 0.004489, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000232, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 9.4e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000151, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 5.2e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 4.3e-05, - "\u001b[?1h\u001b=" - ], - [ - 3e-05, - "\u001b[?2004h" - ], - [ - 1.0948, - "g" - ], - [ - 0.142791, - "\bgi" - ], - [ - 0.093165, - "t" - ], - [ - 0.104881, - " " - ], - [ - 0.146445, - "c" - ], - [ - 0.080499, - "o" - ], - [ - 0.116396, - "m" - ], - [ - 0.149451, - "m" - ], - [ - 0.126645, - "i" - ], - [ - 0.124885, - "t" - ], - [ - 0.057566, - " " - ], - [ - 0.196291, - "-" - ], - [ - 0.162575, - "a" - ], - [ - 0.097583, - " " - ], - [ - 0.118011, - "-" - ], - [ - 0.149342, - "-" - ], - [ - 0.046139, - "a" - ], - [ - 0.16449, - "m" - ], - [ - 0.100832, - "e" - ], - [ - 0.454585, - "n" - ], - [ - 0.157844, - "d" - ], - [ - 0.128767, - "\u001b[?1l\u001b>" - ], - [ - 6.4e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.002817, - "\u001b]2;git commit -a --amend\u0007\u001b]1;git\u0007" - ], - [ - 0.030281, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.003594, - "\u001b[1;52r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[H\u001b[2J\u001b[?25l\u001b[52;1H\"~/Development/Go/src/github.com/kubernetes-incubator/cri-o/.git/COMMIT_EDITMSG\"" - ], - [ - 0.000205, - " 24L, 716C" - ], - [ - 0.000426, - "\u001b[1;1HRefactor kpod tests\r\n\r\nMove kpod tests from kpod.bats to kpod_[commandname].bats\r\n\r\nSigned-off-by: Ryan Cole \r\n\r\n# Please enter the commit message for your changes. Lines starting\r\n# with '#' will be ignored, and an empty message aborts the commit.\r\n#\r\n# Date: Mon Aug 14 09:15:22 2017 -0400\r\n#\r\n# On branch kpod-test-refactor\r\n# Changes to be committed:\r\n#\u001b[7Cdeleted: test/kpod.bats\r\n#\u001b[7Cmodified: test/kpod_diff.bats\r\n#\u001b[7Cnew file: test/kpod_history.bats\r\n#\u001b[7Cnew file: test/kpod_images.bats\r\n#\u001b[7Cnew file: test/kpod_inspect.bats\r\n#\u001b[7Cmodified: test/kpod_load.bats\r\n#\u001b[7Cnew file: test/kpod_pull.bats\r\n#\u001b[7Cnew file: test/kpod_push.bats\r\n#\u001b[7Cmodified: test/kpod_save.bats\r\n#\u001b[7Cnew file: test/kpod_version.bats\r\n#\r\n\u001b[94m~ \u001b[26;1H~ " - ], - [ - 3e-05, - " \u001b[27;1H~ \u001b[28;1H~ \u001b[29;1H~ \u001b[30;1H~ " - ], - [ - 0.000148, - " \u001b[31;1H~ \u001b[32;1H~ \u001b[33;1H~ \u001b[34;1H~ \u001b[35;1H~ " - ], - [ - 2.2e-05, - " \u001b[36;1H~ \u001b[37;1H~ \u001b[38;1H~ \u001b[39;1H~ \u001b[40;1H~ " - ], - [ - 0.000582, - " \u001b[41;1H~ \u001b[42;1H~ \u001b[43;1H~ \u001b[44;1H~ " - ], - [ - 3e-05, - " \u001b[45;1H~ \u001b[46;1H~ \u001b[47;1H~ \u001b[48;1H~ \u001b[49;1H~ " - ], - [ - 0.000162, - " \u001b[50;1H~ \u001b[51;1H~ \u001b[1;1H\u001b[?12l\u001b[?25h" - ], - [ - 0.276849, - "\u001b[?25l\u001b[m\u001b[52;1H\u001b[K\u001b[52;1H:" - ], - [ - 5e-05, - "\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.161861, - "w" - ], - [ - 0.062363, - "q" - ], - [ - 0.113128, - "\r" - ], - [ - 7e-05, - "\u001b[?25l\u001b[?2004l\".git/COMMIT_EDITMSG\"" - ], - [ - 0.016122, - " 24L, 716C written\r\r\r\n\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.001866, - "[kpod-test-refactor 9f5954a6] Refactor kpod tests\r\n Date: Mon Aug 14 09:15:22 2017 -0400\r\n 10 files changed, 333 insertions(+), 253 deletions(-)\r\n delete mode 100644 test/kpod.bats\r\n create mode 100644 test/kpod_history.bats\r\n create mode 100644 test/kpod_images.bats\r\n create mode 100644 test/kpod_inspect.bats\r\n create mode 100644 test/kpod_pull.bats\r\n create mode 100644 test/kpod_push.bats\r\n create mode 100644 test/kpod_version.bats\r\n\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.046977, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-test-refactor \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001287, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000166, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000157, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 2.7e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000266, - "\u001b[?1h\u001b=" - ], - [ - 2.4e-05, - "\u001b[?2004h" - ], - [ - 0.08068, - "g" - ], - [ - 0.127014, - "\bgi" - ], - [ - 0.077769, - "t" - ], - [ - 0.07119, - " " - ], - [ - 0.125899, - "p" - ], - [ - 0.0933, - "u" - ], - [ - 0.107051, - "s" - ], - [ - 0.103696, - "h" - ], - [ - 0.079917, - " " - ], - [ - 0.173889, - "-" - ], - [ - 0.115307, - "f" - ], - [ - 0.080793, - " " - ], - [ - 0.077616, - "o" - ], - [ - 0.131483, - "r" - ], - [ - 0.1108, - "i" - ], - [ - 0.164739, - "g" - ], - [ - 0.119456, - "i" - ], - [ - 0.102232, - "n" - ], - [ - 0.064044, - " " - ], - [ - 0.163385, - "k" - ], - [ - 0.063896, - "pod-" - ], - [ - 0.226923, - "t" - ], - [ - 0.070106, - "e" - ], - [ - 0.236851, - "st-refactor\u001b[1m \u001b[0m" - ], - [ - 0.608419, - "\b\u001b[0m \b" - ], - [ - 5.6e-05, - "\u001b[?1l\u001b>" - ], - [ - 4.9e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.002783, - "\u001b]2;git push -f origin kpod-test-refactor\u0007\u001b]1;git\u0007" - ], - [ - 0.779856, - "Counting objects: 12, done.\r\n" - ], - [ - 0.000105, - "Delta compression using up to 4 threads.\r\n" - ], - [ - 9.5e-05, - "Compressing objects: 8% (1/12) \rCompressing objects: 16% (2/12) \r" - ], - [ - 0.000129, - "Compressing objects: 25% (3/12) \r" - ], - [ - 0.00012, - "Compressing objects: 33% (4/12) \r" - ], - [ - 1.9e-05, - "Compressing objects: 41% (5/12) \r" - ], - [ - 4.6e-05, - "Compressing objects: 50% (6/12) \r" - ], - [ - 0.000177, - "Compressing objects: 58% (7/12) \rCompressing objects: 66% (8/12) \r" - ], - [ - 0.000199, - "Compressing objects: 75% (9/12) \r" - ], - [ - 3.8e-05, - "Compressing objects: 83% (10/12) \r" - ], - [ - 5.6e-05, - "Compressing objects: 91% (11/12) \r" - ], - [ - 5e-05, - "Compressing objects: 100% (12/12) \r" - ], - [ - 3.7e-05, - "Compressing objects: 100% (12/12), done.\r\n" - ], - [ - 0.000272, - "Writing objects: 8% (1/12) \rWriting objects: 16% (2/12) \rWriting objects: 25% (3/12) \r" - ], - [ - 2.8e-05, - "Writing objects: 33% (4/12) \r" - ], - [ - 0.0001, - "Writing objects: 41% (5/12) \r" - ], - [ - 0.00016, - "Writing objects: 58% (7/12) \r" - ], - [ - 4e-05, - "Writing objects: 66% (8/12) \r" - ], - [ - 6.7e-05, - "Writing objects: 75% (9/12) \r" - ], - [ - 0.000162, - "Writing objects: 83% (10/12) \r" - ], - [ - 3.9e-05, - "Writing objects: 91% (11/12) \r" - ], - [ - 4.4e-05, - "Writing objects: 100% (12/12) \r" - ], - [ - 5.8e-05, - "Writing objects: 100% (12/12), 2.56 KiB | 2.56 MiB/s, done.\r\n" - ], - [ - 3e-05, - "Total 12 (delta 9), reused 0 (delta 0)\r\n" - ], - [ - 0.054483, - "remote: Resolving deltas: 0% (0/9) \u001b[K\r" - ], - [ - 0.038253, - "remote: Resolving deltas: 22% (2/9) \u001b[K\rremote: Resolving deltas: 44% (4/9) \u001b[K\rremote: Resolving deltas: 55% (5/9) \u001b[K\rremote: Resolving deltas: 66% (6/9) \u001b[K\rremote: Resolving deltas: 77% (7/9) \u001b[K\rremote: Resolving deltas: 88% (8/9) \u001b[K\rremote: Resolving deltas: 100% (9/9) \u001b[K\rremote: Resolving deltas: 100% (9/9), completed with 5 local objects.\u001b[K\r\n" - ], - [ - 1.340424, - "To github.com:14rcole/cri-o\r\n + 72c6c49b...9f5954a6 kpod-test-refactor -> kpod-test-refactor" - ], - [ - 6.1e-05, - " (forced update)\r\n" - ], - [ - 0.00145, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.03447, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/kpod-test-refactor \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.001176, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000105, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 6.4e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 6.2e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 4e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000267, - "\u001b[?1h\u001b=" - ], - [ - 1.9e-05, - "\u001b[?2004h" - ], - [ - 0.606748, - "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 1186.178261, - "g" - ], - [ - 0.102859, - "\bgi" - ], - [ - 0.126056, - "t" - ], - [ - 0.086902, - " " - ], - [ - 0.763198, - "c" - ], - [ - 0.110654, - "h" - ], - [ - 0.062485, - "e" - ], - [ - 0.087974, - "c" - ], - [ - 0.045368, - "k" - ], - [ - 0.149436, - "o" - ], - [ - 0.070017, - "u" - ], - [ - 0.062669, - "t" - ], - [ - 0.109796, - " " - ], - [ - 0.164267, - "m" - ], - [ - 0.26327, - "aster\u001b[1m \u001b[0m" - ], - [ - 0.456115, - "\b\u001b[0m \b" - ], - [ - 2.9e-05, - "\u001b[?1l\u001b>" - ], - [ - 0.000106, - "\u001b[?2004l\r\r\n" - ], - [ - 0.005237, - "\u001b]2;git checkout master\u0007\u001b]1;git\u0007" - ], - [ - 0.070788, - "Switched to branch 'master'\r\n" - ], - [ - 0.000203, - "Your branch is up-to-date with 'origin/master'.\r\n" - ], - [ - 0.000797, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.074676, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.002717, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.32787, - "g" - ], - [ - 0.126467, - "\bgi" - ], - [ - 0.072925, - "t" - ], - [ - 0.076466, - " " - ], - [ - 0.102271, - "p" - ], - [ - 0.068608, - "u" - ], - [ - 0.213672, - "l" - ], - [ - 0.142438, - "l" - ], - [ - 0.102593, - " " - ], - [ - 0.126466, - "u" - ], - [ - 0.110638, - "p" - ], - [ - 0.175053, - "stream\u001b[1m \u001b[0m" - ], - [ - 0.338118, - "\b\u001b[0m m" - ], - [ - 0.11582, - "a" - ], - [ - 0.071538, - "s" - ], - [ - 0.203225, - "ter\u001b[1m:\u001b[0m" - ], - [ - 0.396247, - "\b\u001b[0m \b\u001b[?1l\u001b>\u001b[?2004l\r\r\n" - ], - [ - 0.003149, - "\u001b]2;git pull upstream master\u0007" - ], - [ - 0.000119, - "\u001b]1;git\u0007" - ], - [ - 1.611391, - "remote: Counting objects: 1, done.\u001b[K\r\n" - ], - [ - 0.000939, - "remote: Total 1 (delta 0), reused 1 (delta 0), pack-reused 0\u001b[K\r\n" - ], - [ - 0.00237, - "Unpacking objects: 100% (1/1) \r" - ], - [ - 9.3e-05, - "Unpacking objects: 100% (1/1), done.\r\n" - ], - [ - 0.096361, - "From github.com:kubernetes-incubator/cri-o" - ], - [ - 0.000156, - "\r\n" - ], - [ - 8.9e-05, - " * branch master -> FETCH_HEAD" - ], - [ - 7.3e-05, - "\r\n" - ], - [ - 0.000902, - " 6ca462a3..8c496a10 master -> upstream/master" - ], - [ - 0.000108, - "\r\n" - ], - [ - 0.013153, - "Updating 6ca462a3..8c496a10" - ], - [ - 0.000529, - "\r\n" - ], - [ - 0.035506, - "Fast-forward" - ], - [ - 5.4e-05, - "\r\n" - ], - [ - 0.006117, - " cmd/kpod/common.go | 8 \u001b[32m++\u001b[m" - ], - [ - 5.2e-05, - "\r\n" - ], - [ - 4.4e-05, - " cmd/kpod/formats/formats.go | 23 \u001b[32m+++\u001b[m\u001b[31m-\u001b[m" - ], - [ - 3.6e-05, - "\r\n" - ], - [ - 3.8e-05, - " cmd/kpod/formats/templates.go | 78 \u001b[32m++++++++++++\u001b[m" - ], - [ - 3.3e-05, - "\r\n" - ], - [ - 3.9e-05, - " cmd/kpod/images.go | 126 \u001b[32m+++++++++\u001b[m\u001b[31m----------\u001b[m" - ], - [ - 3.5e-05, - "\r\n" - ], - [ - 3.9e-05, - " vendor.conf | 1 \u001b[32m+\u001b[m" - ], - [ - 3.3e-05, - "\r\n" - ], - [ - 4.4e-05, - " vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go | 934 \u001b[31m---------------------------------------------------------------------------------------------------------------------------------------\u001b[m" - ], - [ - 3.5e-05, - "\r\n" - ], - [ - 3.7e-05, - " vendor/github.com/containers/storage/pkg/archive/example_changes.go | 97 \u001b[31m--------------\u001b[m" - ], - [ - 3e-05, - "\r\n" - ], - [ - 3.5e-05, - " vendor/github.com/fatih/camelcase/LICENSE.md | 20 \u001b[32m+++\u001b[m" - ], - [ - 3e-05, - "\r\n" - ], - [ - 3.6e-05, - " vendor/github.com/fatih/camelcase/README.md | 58 \u001b[32m+++++++++\u001b[m" - ], - [ - 3e-05, - "\r\n" - ], - [ - 4.2e-05, - " vendor/github.com/fatih/camelcase/camelcase.go | 90 \u001b[32m+++++++++++++\u001b[m" - ], - [ - 3.3e-05, - "\r\n" - ], - [ - 3.8e-05, - " 10 files changed, 331 insertions(+), 1104 deletions(-)" - ], - [ - 3e-05, - "\r\n" - ], - [ - 3.8e-05, - " create mode 100644 cmd/kpod/formats/templates.go" - ], - [ - 2.8e-05, - "\r\n" - ], - [ - 3.5e-05, - " delete mode 100644 vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go" - ], - [ - 3.1e-05, - "\r\n" - ], - [ - 3.2e-05, - " delete mode 100644 vendor/github.com/containers/storage/pkg/archive/example_changes.go" - ], - [ - 2.6e-05, - "\r\n" - ], - [ - 3.1e-05, - " create mode 100644 vendor/github.com/fatih/camelcase/LICENSE.md" - ], - [ - 2.7e-05, - "\r\n" - ], - [ - 3.1e-05, - " create mode 100644 vendor/github.com/fatih/camelcase/README.md" - ], - [ - 2.7e-05, - "\r\n" - ], - [ - 3.3e-05, - " create mode 100644 vendor/github.com/fatih/camelcase/camelcase.go" - ], - [ - 2.7e-05, - "\r\n" - ], - [ - 0.000942, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.058415, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master \u001b[39m \u001b[33m\u001b[39m\r\n" - ], - [ - 0.00259, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.00032, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000152, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 0.000223, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m " - ], - [ - 8.5e-05, - "\u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 0.000154, - "\u001b[?1h\u001b=" - ], - [ - 0.000119, - "\u001b[?2004h" - ], - [ - 29.505715, - "v" - ], - [ - 0.127766, - "\bvi" - ], - [ - 0.118693, - " " - ], - [ - 0.402967, - "c" - ], - [ - 0.134732, - "m" - ], - [ - 0.171696, - "d\u001b[1m/\u001b[0m" - ], - [ - 0.082566, - "\b\u001b[0m/k" - ], - [ - 0.141218, - "pod\u001b[1m/\u001b[0m" - ], - [ - 0.41599, - "\b\u001b[0m/i" - ], - [ - 0.051368, - "m" - ], - [ - 0.088439, - "a" - ], - [ - 0.170485, - "ges.go\u001b[1m \u001b[0m" - ], - [ - 0.319745, - "\b\u001b[0m \b" - ], - [ - 7.2e-05, - "\u001b[?1l\u001b>" - ], - [ - 4.7e-05, - "\u001b[?2004l\r" - ], - [ - 0.000136, - "\r\n" - ], - [ - 0.003148, - "\u001b]2;vim cmd/kpod/images.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.330583, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000904, - "\u001b[1;54r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[54;1H\"cmd/kpod/images.go\"" - ], - [ - 8.4e-05, - " 203L, 4796C" - ], - [ - 0.019404, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c" - ], - [ - 0.012968, - "\u001b[1;1H\u001b[96m\u001b[47m 98 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 99 \u001b[m\u001b[93m\u001b[107m } \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m100 \u001b[m\u001b[93m\u001b[107m\u001b[8Cparams = \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m101 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m102 \r\n103 \u001b[m\u001b[93m\u001b[107m imageList, err := libkpodimage.GetImagesMatchingFilter(store, params, name)\r\n\u001b[96m\u001b[47m104 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m105 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"could not get list of images matching filter\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m106 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m107 \r\n108 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m outputImages(store, imageList, truncate, digests, quiet, outputFormat, noheading)\r\n\u001b[96m\u001b[47m109 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m110 \r\n111 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m genImagesFormat(quiet, truncate, digests \u001b[33mbool\u001b[m\u001b[93m\u001b[107m) (format \u001b[33mstring\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m112 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m quiet {\r\n\u001b[96m\u001b[47m113 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mr" - ], - [ - 4e-05, - "eturn\u001b[m\u001b[93m\u001b[107m \u001b[36m\"{{.ID}}\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m114 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m115 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m truncate {\r\n\u001b[96m\u001b[47m116 \u001b[m\u001b[93m\u001b[107m\u001b[8Cformat = \u001b[36m\"table {{ .ID | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-20.12s\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m }} \"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m117 \u001b[m\u001b[93m\u001b[107m } \u001b[32melse\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m118 \u001b[m\u001b[93m\u001b[107m\u001b[8Cformat = \u001b[36m\"table {{ .ID | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-64s\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m }} \"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m119 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m120 \u001b[m\u001b[93m\u001b[107m format += \u001b[36m\"{{ .Name | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-56s\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m }} \"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m121 \r\n122 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m digests {\r\n\u001b[96m\u001b[47m123 \u001b[m\u001b[93m\u001b[107m\u001b[8Cformat += \u001b[36m\"{{ .Digest | printf \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-71s\u001b[m\u001b[93m\u001b[107m\u001b[36m \u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m}} \"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m124 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m125 \r\n126 \u001b[m\u001b[93m\u001b[107m format += \u001b[36m\"{{ .CreatedAt | printf " - ], - [ - 0.073924, - "\u001b[m\u001b[93m\u001b[107m\u001b[31m\\\"%-22s\\\"\u001b[m\u001b[93m\u001b[107m\u001b[36m }} {{.Size}}\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m127 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m128 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m129 \r\n130 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m outputImages(store storage.Store, images []storage.Image, truncate, digests, quiet \u001b[33mbool\u001b[m\u001b[93m\u001b[107m, outputFormat \u001b[33mstring\u001b[m\u001b[93m\u001b[107m, noheading \u001b[33mbool\u001b[m\u001b[93m\u001b[107m) \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m131 \u001b[m\u001b[93m\u001b[107m imageOutput := []imageOutputParams{}\r\n\u001b[96m\u001b[47m132 \r\n133 \u001b[m\u001b[93m\u001b[107m lastID := \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m134 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, img := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m images {\r\n\u001b[96m\u001b[47m135 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m quiet && lastID == img.ID {\r\n\u001b[96m\u001b[47m136 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mcontinue\u001b[m\u001b[93m\u001b[107m \u001b[96m// quiet should not show the same ID multiple times\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m137 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m138 \u001b[m\u001b[93m\u001b[107m\u001b[8CcreatedTime := img.Created\r\n\u001b[96m\u001b[47m139 \r\n140 \u001b[m\u001b[93m\u001b[107m\u001b[" - ], - [ - 4e-05, - "8Cname := \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m141 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(img.Names) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m142 \u001b[m\u001b[93m\u001b[107m\u001b[12Cname = img.Names[\u001b[36m0\u001b[m\u001b[93m\u001b[107m]\r\n\u001b[96m\u001b[47m143 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m144 \r\n145 \u001b[m\u001b[93m\u001b[107m\u001b[8Cinfo, imageDigest, size, _ := libkpodimage.InfoAndDigestAndSize(store, img)\r\n\u001b[96m\u001b[47m146 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m info != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m147 \u001b[m\u001b[93m\u001b[107m\u001b[12CcreatedTime = info.Created\r\n\u001b[96m\u001b[47m148 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m149 \u001b[m\u001b[93m\u001b[107m\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mimages.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;42H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                         " - ], - [ - 0.028403, - "                                                                                                            \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  61%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m123\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9  \u001b[26;13H\u001b[?12l\u001b[?25h" - ], - [ - 3.8e-05, - "\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 2.1862, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  60%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[25;13H\u001b[?12l\u001b[?25h" - ], - [ - 4.025378, - "\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 34.053856, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  61%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[26;13H\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;20H\u001b[1m\u001b[31m\u001b[106m{\u001b[27;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[27;9H\u001b[?12l\u001b[?25h\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;20H{\u001b[27;9H}\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  62%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[28;5H\u001b[?12l\u001b[?25h\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[29;13H\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  63%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[30;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.081812, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[14;73H\u001b[1m\u001b[31m\u001b[106m{\u001b[31;5H}\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[31;5H\u001b[?12l\u001b[?25h\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[14;73H{\u001b[31;5H}\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  64%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[32;5H\u001b[?12l\u001b[?25h\u001b[m\u001b[93m\u001b[107m\u001b[53;206H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m30\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[33;13H\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  65%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[34;13H\u001b[?12l\u001b[?25h\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[35;5H\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  66%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[36;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.035045, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[37;13H\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  67%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[38;13H\u001b[?12l\u001b[?25h\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:12\u001b[39;16H\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[38;42H\u001b[1m\u001b[31m\u001b[106m{\u001b[40;13H}\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9 \u001b[40;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.014251, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[38;42H{\u001b[40;13H}\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  68%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[41;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.035294, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[42;5H" - ], - [ - 3.803564, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[41;13H" - ], - [ - 0.502126, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[38;42H\u001b[1m\u001b[31m\u001b[106m{\u001b[40;13H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  67%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[40;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.039219, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[38;42H{\u001b[40;13H}\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:12\u001b[39;16H\u001b[?12l\u001b[?25h" - ], - [ - 0.022715, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9 \u001b[38;13H" - ], - [ - 0.030883, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  66%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[37;13H" - ], - [ - 0.032199, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[36;13H" - ], - [ - 0.028523, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  65%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[35;5H" - ], - [ - 0.031179, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[34;13H" - ], - [ - 0.032779, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  64%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[33;13H" - ], - [ - 0.028555, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;206H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m29\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[32;5H" - ], - [ - 0.048269, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[14;73H\u001b[1m\u001b[31m\u001b[106m{\u001b[31;5H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  63%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[31;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.019915, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[14;73H{\u001b[31;5H}\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[30;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.028641, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  62%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[29;13H" - ], - [ - 0.027012, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[28;5H" - ], - [ - 0.033581, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;20H\u001b[1m\u001b[31m\u001b[106m{\u001b[27;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  61%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[27;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.02925, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;20H{\u001b[27;9H}\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[26;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.030751, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  60%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[25;13H" - ], - [ - 0.031339, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[24;5H" - ], - [ - 0.041524, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  59%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[23;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.022427, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[20;16H\u001b[1m\u001b[31m\u001b[106m{\u001b[22;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;206H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m19\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[22;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.033264, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[20;16H{\u001b[22;9H}\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  58%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[21;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.023387, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[20;13H" - ], - [ - 0.037125, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  57%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[19;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.026419, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[18;13H" - ], - [ - 0.040842, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[15;18H\u001b[1m\u001b[31m\u001b[106m{\u001b[17;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  56%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[17;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.023205, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[15;18H{\u001b[17;9H}\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[16;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.033995, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  55%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[15;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.023807, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[14;13H" - ], - [ - 0.037724, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  54%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[13;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.028157, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;206H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m09\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[12;5H" - ], - [ - 0.035934, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  53%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[11;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.027185, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[10;5H" - ], - [ - 0.036538, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[7;23H\u001b[1m\u001b[31m\u001b[106m{\u001b[9;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  52%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[9;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.029736, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[7;23H{\u001b[9;9H}\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[8;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.036535, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  51%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[7;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.023374, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[6;13H" - ], - [ - 0.033091, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  50%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[5;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.025945, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[2;16H\u001b[1m\u001b[31m\u001b[106m{\u001b[4;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[4;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.041382, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m 97 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"error parsing filter\"\u001b[m\u001b[93m\u001b[107m)\u001b[3;16H{\u001b[5;9H}\u001b[54;1H\u001b[K\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  49%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9\u001b[4;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.030519, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m 96 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[53;205H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 99\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.048667, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m 95 \u001b[m\u001b[93m\u001b[107m\u001b[8Cparams, err = libkpodimage.ParseFilter(store, c.String(\u001b[36m\"filter\"\u001b[m\u001b[93m\u001b[107m))\u001b[2;27H\u001b[1m\u001b[31m\u001b[106m{\u001b[4;13H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  48%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.028735, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m 94 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m c.IsSet(\u001b[36m\"filter\"\u001b[m\u001b[93m\u001b[107m) {\u001b[3;27H{\u001b[5;13H}\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:12\u001b[4;16H\u001b[?12l\u001b[?25h" - ], - [ - 0.048253, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m 93 \u001b[m\u001b[93m\u001b[107m \u001b[32mvar\u001b[m\u001b[93m\u001b[107m params *libkpodimage.FilterParams\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  47%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9 \u001b[4;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.011542, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m 92 \u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.035746, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m 91 \u001b[m\u001b[93m\u001b[107m }\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  46%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.039133, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m 90 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.New(\u001b[36m\"'kpod images' requires at most 1 argument\"\u001b[m\u001b[93m\u001b[107m)\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[4;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.033562, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[L\u001b[1;54r\u001b[1;1H\u001b[96m\u001b[47m 89 \u001b[m\u001b[93m\u001b[107m } \u001b[32melse\u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(c.Args()) > \u001b[36m1\u001b[m\u001b[93m\u001b[107m {\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  45%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[4;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.423507, - "\u001b[?25l\u001b[54;1H\u001b[m\u001b[93m\u001b[107m/" - ], - [ - 8.6e-05, - "\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.351895, - "h\u001b[?25l" - ], - [ - 0.0106, - "\u001b[15;53H\u001b[7m\u001b[91mh\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[53;2H\u001b[1m\u001b[38;5;22m\u001b[48;5;148mCOMMND \u001b[m\u001b[93m\u001b[107m\u001b[186C\u001b[38;5;107m\u001b[48;5;240m  51%\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m103\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:49\r\n\u001b[m\u001b[93m\u001b[107m/h" - ], - [ - 6e-05, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.063442, - "e" - ], - [ - 0.000104, - "\u001b[?25l" - ], - [ - 0.012164, - "\u001b[15;53Hh\u001b[20;89H\u001b[7m\u001b[91mhe\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  53%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:85\r\n\u001b[m\u001b[93m\u001b[107m/he" - ], - [ - 0.000532, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.072256, - "a" - ], - [ - 7.1e-05, - "\u001b[?25l" - ], - [ - 0.00992, - "\u001b[20;91H\u001b[7m\u001b[91ma\u001b[54;5H" - ], - [ - 0.001459, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.162629, - "\u001b[27m\u001b[m\u001b[93m\u001b[107md" - ], - [ - 8.2e-05, - "\u001b[?25l" - ], - [ - 0.009968, - "\u001b[20;92H\u001b[7m\u001b[91md\u001b[54;6H" - ], - [ - 0.00011, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.147673, - "\u001b[27m\u001b[m\u001b[93m\u001b[107me\u001b[?25l" - ], - [ - 0.013264, - "\u001b[1;52r\u001b[1;1H\u001b[29M\u001b[1;54r\u001b[24;1H\u001b[96m\u001b[47m141 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(img.Names) > \u001b[36m0\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m142 \u001b[m\u001b[93m\u001b[107m\u001b[12Cname = img.Names[\u001b[36m0\u001b[m\u001b[93m\u001b[107m]\r\n\u001b[96m\u001b[47m143 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m144 \r\n145 \u001b[m\u001b[93m\u001b[107m\u001b[8Cinfo, imageDigest, size, _ := libkpodimage.InfoAndDigestAndSize(store, img)\r\n\u001b[96m\u001b[47m146 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m info != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m147 \u001b[m\u001b[93m\u001b[107m\u001b[12CcreatedTime = info.Created\r\n\u001b[96m\u001b[47m148 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m149 \r\n150 \u001b[m\u001b[93m\u001b[107m\u001b[8Cparams := imageOutputParams{\r\n\u001b[96m\u001b[47m151 \u001b[m\u001b[93m\u001b[107m\u001b[12CID:\u001b[8Cimg.ID,\r\n\u001b[96m\u001b[47m152 \u001b[m\u001b[93m\u001b[107m\u001b[12CName: name,\r\n\u001b[96m\u001b[47m153 \u001b[m\u001b[93m\u001b[107m\u001b[12CDigest: imageDigest,\r\n\u001b[96m\u001b[47m154 \u001b[m\u001b[93m\u001b[107m\u001b[12CCreatedAt: createdTime.Format(\u001b[36m\"Jan 2, 2006 15:04\"\u001b[m\u001b[93m\u001b[107m),\r\n\u001b[96m\u001b[47m155 \u001b[m\u001b[93m\u001b[107m\u001b[12CSize: libkpodimage.FormattedSize(size),\r\n\u001b[96m\u001b[47m156 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m157 \u001b[m\u001b[93m\u001b[10" - ], - [ - 6.2e-05, - "7m\u001b[8CimageOutput = \u001b[32mappend\u001b[m\u001b[93m\u001b[107m(imageOutput, params)\r\n\u001b[96m\u001b[47m158 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m159 \r\n160 \u001b[m\u001b[93m\u001b[107m \u001b[32mvar\u001b[m\u001b[93m\u001b[107m out formats.Writer\r\n\u001b[96m\u001b[47m161 \r\n162 \u001b[m\u001b[93m\u001b[107m \u001b[32mswitch\u001b[m\u001b[93m\u001b[107m outputFormat {\r\n\u001b[96m\u001b[47m163 \u001b[m\u001b[93m\u001b[107m \u001b[32mcase\u001b[m\u001b[93m\u001b[107m \u001b[36m\"json\"\u001b[m\u001b[93m\u001b[107m:\r\n\u001b[96m\u001b[47m164 \u001b[m\u001b[93m\u001b[107m\u001b[8Cout = formats.JSONstruct{Output: toGeneric(imageOutput)}\r\n\u001b[96m\u001b[47m165 \u001b[m\u001b[93m\u001b[107m \u001b[32mdefault\u001b[m\u001b[93m\u001b[107m:\r\n\u001b[96m\u001b[47m166 \u001b[m\u001b[93m\u001b[107m\u001b[8Cout = formats.StdoutTemplate{Output: toGeneric(imageOutput), Template: outputFormat, Fields: imageOutput[\u001b[36m0\u001b[m\u001b[93m\u001b[107m].\u001b[7m\u001b[91mheade\u001b[27m\u001b[m\u001b[93m\u001b[107mrMap()}\r\n\u001b[96m\u001b[47m167 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m168 \r\n169 \u001b[m\u001b[93m\u001b[107m formats.Writer(out).Out()\u001b[53;175H\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;181H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;189H\u001b[38;5;247m\u001b[48;5;236m" - ], - [ - 0.000184, - " go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;194H\u001b[38;5;144m\u001b[48;5;240m  82%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;201H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;203H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1668\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;22m\u001b[48;5;252m:117\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[K\u001b[54;1H/heade" - ], - [ - 6.2e-05, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.090014, - "r\u001b[?25l" - ], - [ - 0.010578, - "\u001b[49;126H\u001b[7m\u001b[91mr\u001b[54;8H\u001b[?12l\u001b[?25h" - ], - [ - 0.242781, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mM" - ], - [ - 6.5e-05, - "\u001b[?25l" - ], - [ - 0.018322, - "\u001b[49;127H\u001b[7m\u001b[91mM\u001b[54;9H" - ], - [ - 0.000103, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.216526, - "\u001b[27m\u001b[m\u001b[93m\u001b[107ma\u001b[?25l" - ], - [ - 0.008316, - "\u001b[49;128H\u001b[7m\u001b[91ma\u001b[54;10H" - ], - [ - 0.000108, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.164608, - "\u001b[27m\u001b[m\u001b[93m\u001b[107mp\u001b[?25l" - ], - [ - 0.010964, - "\u001b[49;129H\u001b[7m\u001b[91mp\u001b[54;11H" - ], - [ - 5.8e-05, - "\u001b[?12l\u001b[?25h" - ], - [ - 0.303811, - "\r" - ], - [ - 5.4e-05, - "\u001b[?25l" - ], - [ - 0.01537, - "\u001b[27m\u001b[m\u001b[93m\u001b[107m\u001b[49;121H\u001b[7m\u001b[33mheaderMap\u001b[m\u001b[93m\u001b[107m\u001b[53;2H\u001b[1m\u001b[38;5;22m\u001b[48;5;148mNORMAL \u001b[49;121H\u001b[?12l\u001b[?25h" - ], - [ - 0.613627, - "\u001b[?25l\u001b[54;1H" - ], - [ - 0.009249, - "\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[1;1H\u001b[24M\u001b[1;54r\u001b[29;1H\u001b[96m\u001b[47m170 \r\n171 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m172 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m173 \r\n174 \u001b[m\u001b[93m\u001b[107m\u001b[32mtype\u001b[m\u001b[93m\u001b[107m imageOutputParams \u001b[32mstruct\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m175 \u001b[m\u001b[93m\u001b[107m ID\u001b[8C\u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[36m`json:\"id\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m176 \u001b[m\u001b[93m\u001b[107m Name \u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[36m`json:\"names\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m177 \u001b[m\u001b[93m\u001b[107m Digest digest.Digest \u001b[36m`json:\"digest\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m178 \u001b[m\u001b[93m\u001b[107m CreatedAt \u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[36m`json:\"created\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m179 \u001b[m\u001b[93m\u001b[107m Size \u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[36m`json:\"size\"`\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m180 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m181 \r\n182 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m toGeneric(params []imageOutputParams) []\u001b[32minterface\u001b[m\u001b[93m\u001b[107m{} {\r\n\u001b[96m\u001b[47m183 \u001b[m\u001b[93m\u001b[107m genericParams := \u001b[32mmake\u001b[m\u001b[93m\u001b[107m([]\u001b[32mi" - ], - [ - 4.4e-05, - "nterface\u001b[m\u001b[93m\u001b[107m{}, \u001b[32mlen\u001b[m\u001b[93m\u001b[107m(params))\r\n\u001b[96m\u001b[47m184 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m i, v := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m params {\r\n\u001b[96m\u001b[47m185 \u001b[m\u001b[93m\u001b[107m\u001b[8CgenericParams[i] = \u001b[32minterface\u001b[m\u001b[93m\u001b[107m{}(v)\r\n\u001b[96m\u001b[47m186 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m187 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m genericParams\r\n\u001b[96m\u001b[47m188 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m189 \r\n190 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (i *imageOutputParams) \u001b[7m\u001b[33mheaderMap\u001b[m\u001b[93m\u001b[107m() \u001b[33mmap\u001b[m\u001b[93m\u001b[107m[\u001b[33mstring\u001b[m\u001b[93m\u001b[107m]\u001b[33mstring\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m191 \u001b[m\u001b[93m\u001b[107m v := reflect.Indirect(reflect.ValueOf(i))\r\n\u001b[96m\u001b[47m192 \u001b[m\u001b[93m\u001b[107m values := \u001b[32mmake\u001b[m\u001b[93m\u001b[107m(\u001b[33mmap\u001b[m\u001b[93m\u001b[107m[\u001b[33mstring\u001b[m\u001b[93m\u001b[107m]\u001b[33mstring\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m193 \u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[K\u001b[53;175H\u001b[38;5;231m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b" - ], - [ - 2.6e-05, - "[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  94%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\b190\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:29\u001b[49;33H\u001b[?12l\u001b[?25h" - ], - [ - 2.596474, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m194 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m i := \u001b[36m0\u001b[m\u001b[93m\u001b[107m; i < v.NumField(); i++ {\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.204651, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m195 \u001b[m\u001b[93m\u001b[107m\u001b[8Ckey := v.Type().Field(i).Name\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  95%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.176184, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m196 \u001b[m\u001b[93m\u001b[107m\u001b[8Cvalue := key\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1 \u001b[49;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.187364, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m197 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m value == \u001b[36m\"ID\"\u001b[m\u001b[93m\u001b[107m || value == \u001b[36m\"Name\"\u001b[m\u001b[93m\u001b[107m {\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  96%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:29\u001b[49;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.159818, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m198 \u001b[m\u001b[93m\u001b[107m\u001b[12Cvalue = \u001b[36m\"Image\"\u001b[m\u001b[93m\u001b[107m + value\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.510264, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m199 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  97%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:20\u001b[49;24H\u001b[?12l\u001b[?25h" - ], - [ - 0.025799, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m200 \u001b[m\u001b[93m\u001b[107m\u001b[8Cvalues[key] = fmt.Sprintf(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[31m%s\u001b[m\u001b[93m\u001b[107m\u001b[36m \"\u001b[m\u001b[93m\u001b[107m, strings.ToUpper(splitCamelCase(value)))\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:29\u001b[49;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.03976, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m201 \u001b[m\u001b[93m\u001b[107m }\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  98%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.024833, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[47;49H\u001b[1m\u001b[31m\u001b[106m{\u001b[49;13H}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m202 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m values\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9 \u001b[49;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.052706, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[46;49H{\u001b[48;13H}\u001b[52;1H\u001b[96m\u001b[47m203 \u001b[m\u001b[93m\u001b[107m}\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  99%\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m200\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:29\u001b[49;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.031789, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[43;43H\u001b[1m\u001b[31m\u001b[106m{\u001b[50;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5 \u001b[50;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.028149, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[43;43H{\u001b[50;9H}\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m 100%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:17\u001b[51;21H\u001b[?12l\u001b[?25h" - ], - [ - 0.030014, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[39;63H\u001b[1m\u001b[31m\u001b[106m{\u001b[52;5H}\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1 \u001b[52;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.018881, - "\u001b[?5h" - ], - [ - 0.000174, - "\u001b[?2004l" - ], - [ - 0.100098, - "\u001b[?2004h" - ], - [ - 0.031161, - "\u001b[?5l" - ], - [ - 0.340032, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[39;63H{\u001b[52;5H}\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:17\u001b[51;21H\u001b[?12l\u001b[?25h" - ], - [ - 47.126026, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[43;43H\u001b[1m\u001b[31m\u001b[106m{\u001b[50;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  99%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5 \u001b[50;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.49885, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[43;43H{\u001b[50;9H}\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:29\u001b[49;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.03165, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[46;49H\u001b[1m\u001b[31m\u001b[106m{\u001b[48;13H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  98%\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m199\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9 \u001b[48;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.030139, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[46;49H{\u001b[48;13H}\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:29\u001b[47;33H\u001b[?12l\u001b[?25h" - ], - [ - 0.029042, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  97%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[46;33H" - ], - [ - 0.029773, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:20\u001b[45;24H" - ], - [ - 0.031993, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  96%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:29\u001b[44;33H" - ], - [ - 0.03237, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[43;33H" - ], - [ - 0.195643, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  95%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1 \u001b[42;5H" - ], - [ - 0.210797, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;207H\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:29\u001b[41;33H" - ], - [ - 0.517507, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  94%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[40;33H" - ], - [ - 0.244061, - "\u001b[53;210H8\u001b[40;32H" - ], - [ - 0.216928, - "\u001b[53;210H7\u001b[40;31H" - ], - [ - 0.181163, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\b\u001b[1m\u001b[31m\u001b[106m(\u001b[18C)\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m6\u001b[40;30H\u001b[?12l\u001b[?25h" - ], - [ - 315.572339, - "\u001b[?25l\u001b[54;1H\u001b[m\u001b[93m\u001b[107m:" - ], - [ - 4.9e-05, - "\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.220904, - "\b" - ], - [ - 0.002635, - "\u001b[?25l\u001b[40;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.335899, - "\u001b[?25l" - ], - [ - 0.0299, - "\u001b[54;1H\u001b[34m-- VISUAL --\u001b[m\u001b[93m\u001b[107m\u001b[53;1H\u001b[1m\u001b[38;5;94m\u001b[48;5;214m VISUAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[m\u001b[93m\u001b[107m\u001b[53;9H\u001b[38;5;214m\u001b[48;5;94m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;214m\u001b[48;5;94m↔\u001b[53;12H1 \u001b[m\u001b[93m\u001b[107m\u001b[38;5;94m\u001b[48;5;240m\u001b[53;15H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;17H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;26H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240ms\bimages.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;47H \u001b[40;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.729109, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[54;1H\u001b[K\u001b[54;1H:\u001b[?2004h'<,'>\u001b[?12l\u001b[?25h" - ], - [ - 0.225295, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.582167, - "\u001b[?25l\u001b[54;1H\u001b[K" - ], - [ - 0.007677, - "\u001b[53;1H\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;214m\u001b[48;5;94m\u001b[m\u001b[93m\u001b[107m\u001b[53;9H\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpo\u001b[m\u001b[93m\u001b[107m\u001b[2C\u001b[1m\u001b[38;5;231m\u001b[48;5;240mimages.go s\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;240m\u001b[48;5;236m\u001b[53;42H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mg\u001b[m\u001b[93m\u001b[107m\b\u001b[38;5;231m\u001b[48;5;236m     \u001b[40;30H\u001b[?12l\u001b[?25h" - ], - [ - 0.719145, - "\u001b[?25l\u001b[54;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.136951, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.082665, - "\r" - ], - [ - 0.02936, - "\u001b[?25l" - ], - [ - 0.000147, - "\u001b[?2004l" - ], - [ - 9.5e-05, - "\u001b[54;1H\u001b[K\u001b[54;1H" - ], - [ - 5.5e-05, - "\u001b[?2004l\u001b[?1l\u001b>" - ], - [ - 6.2e-05, - "\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.003739, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.062587, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master \u001b[39m \u001b[33m422s\u001b[39m\r\n" - ], - [ - 0.003799, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007\u001b]1;..cubator/cri-o\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.263929, - "v" - ], - [ - 0.112167, - "\bvi" - ], - [ - 0.109735, - " " - ], - [ - 0.650955, - "c" - ], - [ - 0.088996, - "m" - ], - [ - 0.11713, - "d\u001b[1m/\u001b[0m" - ], - [ - 0.07719, - "\b\u001b[0m/k" - ], - [ - 0.528816, - "\b \b" - ], - [ - 0.145274, - "\b \b" - ], - [ - 0.153621, - "\b \b" - ], - [ - 0.165982, - "\b \b" - ], - [ - 0.149926, - "\b \b" - ], - [ - 0.099378, - "c" - ], - [ - 0.113919, - "m" - ], - [ - 0.124791, - "d\u001b[1m/\u001b[0m" - ], - [ - 0.076941, - "\b\u001b[0m/k" - ], - [ - 0.118135, - "pod\u001b[1m/\u001b[0m" - ], - [ - 0.453118, - "\b\u001b[0m/f" - ], - [ - 0.087524, - "o" - ], - [ - 0.110424, - "rmats\u001b[1m/\u001b[0m" - ], - [ - 0.196023, - "\b\u001b[0m/f" - ], - [ - 0.110967, - "o" - ], - [ - 0.110584, - "rmats.go\u001b[1m \u001b[0m" - ], - [ - 0.502681, - "\b\u001b[0m \b\u001b[?1l\u001b>" - ], - [ - 3e-05, - "\u001b[?2004l\r\r\n" - ], - [ - 0.007009, - "\u001b]2;vim cmd/kpod/formats/formats.go\u0007\u001b]1;vi\u0007" - ], - [ - 0.333483, - "\u001b[?2004h\u001b[?1049h\u001b[?1h\u001b=\u001b[?2004h" - ], - [ - 0.000971, - "\u001b[1;54r\u001b[?12;25h\u001b[?12l\u001b[?25h\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[54;1H\"cmd/kpod/formats/formats.go\"" - ], - [ - 8.5e-05, - " 69L, 1356C" - ], - [ - 0.020264, - "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H" - ], - [ - 9.3e-05, - "\u001b[>c" - ], - [ - 0.007943, - "\u001b[1;1H\u001b[96m\u001b[47m 9 \r\n 10 \u001b[m\u001b[93m\u001b[107m \u001b[36m\"github.com/pkg/errors\"\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 11 \u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 12 \r\n 13 \u001b[m\u001b[93m\u001b[107m\u001b[96m// Writer interface for outputs\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 14 \u001b[m\u001b[93m\u001b[107m\u001b[32mtype\u001b[m\u001b[93m\u001b[107m Writer \u001b[32minterface\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 15 \u001b[m\u001b[93m\u001b[107m Out() \u001b[33merror\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 16 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 17 \r\n 18 \u001b[m\u001b[93m\u001b[107m\u001b[96m// JSONstruct for JSON output\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m\u001b[32mtype\u001b[m\u001b[93m\u001b[107m JSONstruct \u001b[32mstruct\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m Output []\u001b[32minterface\u001b[m\u001b[93m\u001b[107m{}\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 22 \r\n 23 \u001b[m\u001b[93m\u001b[107m\u001b[96m// StdoutTemplate for Go template output\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[32mtype\u001b[m\u001b[93m\u001b[107m StdoutTemplate \u001b[32mstruct\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m Output []\u001b[32minterface\u001b[m\u001b[93m\u001b[107m{}\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m Template \u001b[33mstring\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[" - ], - [ - 4.3e-05, - "93m\u001b[107m Fields \u001b[33mmap\u001b[m\u001b[93m\u001b[107m[\u001b[33mstring\u001b[m\u001b[93m\u001b[107m]\u001b[33mstring\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 28 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 29 \r\n 30 \u001b[m\u001b[93m\u001b[107m\u001b[96m// Out method for JSON\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 31 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (j JSONstruct) Out() \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 32 \u001b[m\u001b[93m\u001b[107m data, err := json.MarshalIndent(j.Output, \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m, \u001b[36m\" \"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 36 \u001b[m\u001b[93m\u001b[107m fmt.Printf(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[31m%s\\n\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, data)\r\n\u001b[96m\u001b[47m 37 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 39 \r\n 40 \u001b[m\u001b[93m\u001b[107m\u001b[96m// Out method for Go templates\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 41 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (t StdoutTemplate) Out() " - ], - [ - 0.071435, - "\u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 42 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m strings.HasPrefix(t.Template, \u001b[36m\"table\"\u001b[m\u001b[93m\u001b[107m) {\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m\u001b[8Ct.Template = strings.TrimSpace(t.Template[\u001b[36m5\u001b[m\u001b[93m\u001b[107m:])\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[8CheaderTmpl, err := template.New(\u001b[36m\"header\"\u001b[m\u001b[93m\u001b[107m).Funcs(headerFunctions).Parse(t.Template)\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Template parsing error\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[8Cerr = headerTmpl.Execute(os.Stdout, t.Fields)\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 50 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 51 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 52 \u001b[m\u001b[93m\u001b[107m\u001b[8Cfmt.Println()\r\n\u001b[96m\u001b[47m 53 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 54 \u001b[m\u001b[93m\u001b[107m tmpl, err := template.N" - ], - [ - 4.8e-05, - "ew(\u001b[36m\"image\"\u001b[m\u001b[93m\u001b[107m).Funcs(basicFunctions).Parse(t.Template)\r\n\u001b[96m\u001b[47m 55 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m \u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 56 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Template parsing error\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 57 \u001b[m\u001b[93m\u001b[107m \u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 58 \r\n 59 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, img := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m t.Output {\r\n\u001b[96m\u001b[47m 60 \u001b[m\u001b[93m\u001b[107m\u001b[8CbasicTmpl := tmpl.Funcs(basicFunctions)\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[53;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[53;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[53;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/formats/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mformats.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[53;51H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                   " - ], - [ - 0.027914, - "                                                                                         \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  83%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[53;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[53;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 57\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5  \u001b[49;9H\u001b[?12l\u001b[?25h" - ], - [ - 6.6e-05, - "\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" - ], - [ - 0.752985, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[1;2H\u001b[96m\u001b[47m10\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[36m\"github.com/pkg/errors\"\u001b[m\u001b[93m\u001b[107m\u001b[2;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C)\u001b[2;9H\u001b[K\u001b[3;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[3;5H\u001b[K\u001b[4;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// Writer interface for outputs\u001b[m\u001b[93m\u001b[107m\u001b[5;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mtype\u001b[m\u001b[93m\u001b[107m Writer \u001b[32minterface\u001b[m\u001b[93m\u001b[107m {\u001b[5;28H\u001b[K\u001b[6;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C Out() \u001b[33merror\u001b[m\u001b[93m\u001b[107m\u001b[6;20H\u001b[K\u001b[7;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[7;9H\u001b[K\u001b[8;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[8;5H\u001b[K\u001b[9;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// JSONstruct for JSON output\u001b[m\u001b[93m\u001b[107m\u001b[10;3H\u001b[96m\u001b[47m9\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mtype\u001b[m\u001b[93m\u001b[107m JSONstruct \u001b[32mstruct\u001b[m\u001b[93m\u001b[107m {\u001b[10;29H\u001b[K\u001b[11;2H\u001b[96m\u001b[47m20\u001b[m\u001b[93m\u001b[107m\u001b[1C Output []\u001b[32minterface\u001b[m\u001b[93m\u001b[107m{}\u001b[12;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[12;9H\u001b[K\u001b[13;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[13;5H\u001b[K\u001b[14;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// StdoutTemplate for Go template output\u001b[m\u001b[93m\u001b[107m\u001b[15;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[10" - ], - [ - 2.7e-05, - "7m\u001b[1C\u001b[32mtype\u001b[m\u001b[93m\u001b[107m StdoutTemplate \u001b[32mstruct\u001b[m\u001b[93m\u001b[107m {\u001b[15;33H\u001b[K\u001b[16;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[1C Output []\u001b[32minterface\u001b[m\u001b[93m\u001b[107m{}\u001b[16;32H\u001b[K\u001b[17;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5CTemplate \u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[17;24H\u001b[K\u001b[18;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5CFields \u001b[33mmap\u001b[m\u001b[93m\u001b[107m[\u001b[33mstring\u001b[m\u001b[93m\u001b[107m]\u001b[33mstring\u001b[m\u001b[93m\u001b[107m\u001b[19;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[19;9H\u001b[K\u001b[20;3H\u001b[96m\u001b[47m9\u001b[m\u001b[93m\u001b[107m\u001b[20;5H\u001b[K\u001b[21;2H\u001b[96m\u001b[47m30\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// Out method for JSON\u001b[m\u001b[93m\u001b[107m\u001b[22;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (j JSONstruct) Out() \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\u001b[23;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C data, err := json.MarshalIndent(j.Output, \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m, \u001b[36m\" \"\u001b[m\u001b[93m\u001b[107m)\u001b[24;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[24;24H\u001b[K\u001b[25;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5C \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\u001b[25;23H\u001b[K\u001b[26;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5C}\u001b[26;13H" - ], - [ - 0.002588, - "\u001b[K\u001b[27;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5Cfmt.Printf(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[31m%s\\n\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, data)\u001b[28;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\u001b[28;19H\u001b[K\u001b[29;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[1C}\u001b[29;9H\u001b[K\u001b[30;3H\u001b[96m\u001b[47m9\u001b[m\u001b[93m\u001b[107m\u001b[30;5H\u001b[K\u001b[31;2H\u001b[96m\u001b[47m40\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[96m// Out method for Go templates\u001b[m\u001b[93m\u001b[107m\u001b[32;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[1C\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (t StdoutTemplate) Out() \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\u001b[33;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[1C \u001b[32mif\u001b[m\u001b[93m\u001b[107m strings.HasPrefix(t.Template, \u001b[36m\"table\"\u001b[m\u001b[93m\u001b[107m) {\u001b[34;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C t.Template = strings.TrimSpace(t.Template[\u001b[36m5\u001b[m\u001b[93m\u001b[107m:])\u001b[35;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[9CheaderTmpl, err := template.New(\u001b[36m\"header\"\u001b[m\u001b[93m\u001b[107m).Funcs(headerFunctions).Parse(t.Template)\u001b[36;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[9C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[36;29H\u001b[K\u001b[37;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[9C \u001b[32mreturn\u001b[m\u001b[93m\u001b" - ], - [ - 3.4e-05, - "[107m errors.Wrapf(err, \u001b[36m\"Template parsing error\"\u001b[m\u001b[93m\u001b[107m)\u001b[38;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[9C}\u001b[38;17H\u001b[K\u001b[39;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[9Cerr = headerTmpl.Execute(os.Stdout, t.Fields)\u001b[40;3H\u001b[96m\u001b[47m9\u001b[m\u001b[93m\u001b[107m\u001b[9C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[40;28H\u001b[K\u001b[41;2H\u001b[96m\u001b[47m50\u001b[m\u001b[93m\u001b[107m\u001b[9C \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\u001b[41;27H\u001b[K\u001b[42;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[9C}\u001b[42;17H\u001b[K\u001b[43;3H\u001b[96m\u001b[47m2\u001b[m\u001b[93m\u001b[107m\u001b[9Cfmt.Println()\u001b[44;3H\u001b[96m\u001b[47m3\u001b[m\u001b[93m\u001b[107m\u001b[5C}\u001b[44;13H\u001b[K\u001b[45;3H\u001b[96m\u001b[47m4\u001b[m\u001b[93m\u001b[107m\u001b[5Ctmpl, err := template.New(\u001b[36m\"image\"\u001b[m\u001b[93m\u001b[107m).Funcs(basicFunctions).Parse(t.Template)\u001b[46;3H\u001b[96m\u001b[47m5\u001b[m\u001b[93m\u001b[107m\u001b[5C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[46;24H\u001b[K\u001b[47;3H\u001b[96m\u001b[47m6\u001b[m\u001b[93m\u001b[107m\u001b[5C \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Template parsing error\"\u001b[m\u001b[93m\u001b[107m)\u001b[48;3H\u001b[96m\u001b[47m7\u001b[m\u001b[93m\u001b[107m\u001b[5C}\u001b[48;13H\u001b[K\u001b[49;3H\u001b[96m\u001b[47m8\u001b[m\u001b[93m\u001b[107m\u001b[49;9H\u001b[K\u001b[50;3H\u001b[96m\u001b[47m9\u001b[m\u001b[93m\u001b[107m\u001b[5C" - ], - [ - 0.033553, - "\u001b[32mfor\u001b[m\u001b[93m\u001b[107m _, img := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m t.Output {\u001b[51;2H\u001b[96m\u001b[47m60\u001b[m\u001b[93m\u001b[107m\u001b[5C basicTmpl := tmpl.Funcs(basicFunctions)\u001b[52;3H\u001b[96m\u001b[47m1\u001b[m\u001b[93m\u001b[107m\u001b[9Cerr = basicTmpl.Execute(os.Stdout, img\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  84%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[49;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.379256, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[46;23H\u001b[1m\u001b[31m\u001b[106m{\u001b[48;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  83%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[48;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.137128, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[46;23H{\u001b[48;9H}\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  84%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[49;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.501192, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m 62 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\u001b[54;1H\u001b[K\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  86%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[49;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.045304, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m 63 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  87%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m60\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:8\u001b[49;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.024223, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m 64 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  88%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.040877, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m 65 \u001b[m\u001b[93m\u001b[107m\u001b[8Cfmt.Println()\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  90%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.038981, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m 66 \u001b[m\u001b[93m\u001b[107m }\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  91%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.022814, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m 67 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  93%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.03284, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[52;1H\u001b[96m\u001b[47m 68 \u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  94%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.045318, - "\u001b[?25l\u001b[1;52r\u001b[m\u001b[93m\u001b[107m\u001b[52;1H\r\n\u001b[1;54r\u001b[42;38H\u001b[1m\u001b[31m\u001b[106m{\u001b[49;9H}\u001b[m\u001b[93m\u001b[107m\r\n\r\n\r\n\u001b[96m\u001b[47m 69 \u001b[m\u001b[93m\u001b[107m}\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  96%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[49;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.028648, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[42;38H{\u001b[49;9H}\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  97%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[50;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.022824, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  99%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[51;5H\u001b[?12l\u001b[?25h" - ], - [ - 0.137956, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  97%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[50;9H" - ], - [ - 0.507979, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[42;38H\u001b[1m\u001b[31m\u001b[106m{\u001b[49;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  96%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[49;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.024442, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[42;38H{\u001b[49;9H}\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  94%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:8\u001b[48;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.037304, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  93%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[47;12H" - ], - [ - 0.030234, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  91%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[46;12H" - ], - [ - 0.035803, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;247m\u001b[48;5;240m  90%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[45;12H" - ], - [ - 0.027875, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  88%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[44;12H" - ], - [ - 0.031294, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  87%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[43;12H" - ], - [ - 0.028909, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  86%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m59\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[42;9H" - ], - [ - 0.028601, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  84%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:1\u001b[41;5H" - ], - [ - 0.0318, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[38;23H\u001b[1m\u001b[31m\u001b[106m{\u001b[40;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  83%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[40;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.04273, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[38;23H{\u001b[40;9H}\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  81%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:8\u001b[39;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.028264, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;144m\u001b[48;5;240m  80%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[38;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.031967, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  78%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[37;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.037706, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;51H\u001b[1m\u001b[31m\u001b[106m{\u001b[36;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  77%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[36;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.030776, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;51H{\u001b[36;9H}\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  75%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:8\u001b[35;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.028632, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  74%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[34;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.032475, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  72%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[33;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.026166, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  71%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m49\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[32;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.168545, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  70%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[31;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.49982, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  68%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[30;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.030812, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  67%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[29;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.030802, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  65%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[28;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.032574, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  64%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[27;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.029137, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  62%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[26;12H\u001b[?12l\u001b[?25h" - ], - [ - 0.037678, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  61%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[25;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.28215, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  62%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:8\u001b[26;12H" - ], - [ - 0.499953, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  64%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[27;12H" - ], - [ - 0.029953, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  65%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[28;12H" - ], - [ - 0.033793, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  67%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[29;12H" - ], - [ - 0.032469, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  68%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[30;12H" - ], - [ - 0.030452, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  70%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[31;12H" - ], - [ - 0.03474, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  71%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[32;12H" - ], - [ - 0.031553, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  72%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m50\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[33;12H" - ], - [ - 0.027886, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  74%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[34;12H" - ], - [ - 0.032183, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  75%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[35;12H" - ], - [ - 0.361308, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  74%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[34;12H" - ], - [ - 0.180436, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  72%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m0\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[33;12H" - ], - [ - 0.163593, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  71%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m49\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[32;12H" - ], - [ - 0.167433, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  70%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[31;12H" - ], - [ - 0.499694, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  68%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[30;12H" - ], - [ - 0.035176, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  67%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[29;12H" - ], - [ - 0.176855, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  65%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[28;12H" - ], - [ - 0.167552, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  64%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[27;12H" - ], - [ - 0.162525, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  62%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[26;12H" - ], - [ - 0.344534, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  61%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5\u001b[25;9H" - ], - [ - 0.475291, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[42C\u001b[1m\u001b[31m\u001b[106m{\u001b[36;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;209H\u001b[38;5;22m\u001b[48;5;252m47\u001b[25;51H\u001b[?12l\u001b[?25h" - ], - [ - 0.862743, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{\u001b[36;9H}\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m6\u001b[25;50H\u001b[?12l\u001b[?25h" - ], - [ - 0.211622, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;29H\u001b[1m\u001b[31m\u001b[106m(\u001b[19C)\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m5\u001b[25;49H\u001b[?12l\u001b[?25h" - ], - [ - 0.334139, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;29H(t\u001b[18C) \u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m6\u001b[25;50H\u001b[?12l\u001b[?25h" - ], - [ - 0.690098, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m \u001b[1m\u001b[31m\u001b[106m{\u001b[36;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;210H\u001b[38;5;22m\u001b[48;5;252m7\u001b[25;51H\u001b[?12l\u001b[?25h" - ], - [ - 0.829404, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{\u001b[36;9H}\u001b[53;195H\u001b[38;5;107m\u001b[48;5;240m  62%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[26;51H\u001b[?12l\u001b[?25h" - ], - [ - 0.187135, - "\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  64%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:\u001b[27;51H" - ], - [ - 0.223449, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[28;27H\u001b[1m\u001b[31m\u001b[106m{\u001b[30;13H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  65%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m5\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:23\u001b[28;27H\u001b[?12l\u001b[?25h" - ], - [ - 0.381099, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{\u001b[30;13H}\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  67%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m6\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:47\u001b[29;51H\u001b[?12l\u001b[?25h" - ], - [ - 0.445137, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[28;27H\u001b[1m\u001b[31m\u001b[106m{\u001b[30;13H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  68%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m7\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9 \u001b[30;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.207385, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[28;27H{\u001b[30;13H}\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  70%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:47\u001b[31;51H\u001b[?12l\u001b[?25h" - ], - [ - 0.222319, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[32;27H\u001b[1m\u001b[31m\u001b[106m{\u001b[34;13H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  71%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m9\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:23\u001b[32;27H\u001b[?12l\u001b[?25h" - ], - [ - 0.499981, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m{\u001b[34;13H}\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  72%\u001b[m\u001b[93m\u001b[107m\u001b[6C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m50\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:22\u001b[33;26H\u001b[?12l\u001b[?25h" - ], - [ - 0.029604, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[32;27H\u001b[1m\u001b[31m\u001b[106m{\u001b[34;13H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  74%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m1\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:9 \u001b[34;13H\u001b[?12l\u001b[?25h" - ], - [ - 0.162284, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[32;27H{\u001b[34;13H}\u001b[35;24H\u001b[1m\u001b[31m\u001b[106m()\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  75%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m2\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:21\u001b[35;25H\u001b[?12l\u001b[?25h" - ], - [ - 0.200133, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;51H\u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\u001b[35;24H()\u001b[36;9H\u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  77%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5 \u001b[36;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.196823, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;51H{\u001b[36;9H}\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  78%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m4\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:47\u001b[37;51H\u001b[?12l\u001b[?25h" - ], - [ - 0.356692, - "\u001b[?25l\u001b[m\u001b[93m\u001b[107m\u001b[25;51H\u001b[1m\u001b[31m\u001b[106m{\u001b[36;9H}\u001b[m\u001b[93m\u001b[107m\u001b[53;195H\u001b[38;5;108m\u001b[48;5;240m  77%\u001b[m\u001b[93m\u001b[107m\u001b[7C\u001b[1m\u001b[38;5;235m\u001b[48;5;252m3\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5 \u001b[36;9H\u001b[?12l\u001b[?25h" - ], - [ - 837.577075, - "\u001b[27m\u001b[23m\u001b[m\u001b[93m\u001b[107m\u001b[H\u001b[2J\u001b[?25l\u001b[1;1H\u001b[96m\u001b[47m 19 \u001b[m\u001b[93m\u001b[107m\u001b[32mtype\u001b[m\u001b[93m\u001b[107m JSONstruct \u001b[32mstruct\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 20 \u001b[m\u001b[93m\u001b[107m Output []\u001b[32minterface\u001b[m\u001b[93m\u001b[107m{}\r\n\u001b[96m\u001b[47m 21 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 22 \r\n 23 \u001b[m\u001b[93m\u001b[107m\u001b[96m// StdoutTemplate for Go template output\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 24 \u001b[m\u001b[93m\u001b[107m\u001b[32mtype\u001b[m\u001b[93m\u001b[107m StdoutTemplate \u001b[32mstruct\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 25 \u001b[m\u001b[93m\u001b[107m Output []\u001b[32minterface\u001b[m\u001b[93m\u001b[107m{}\r\n\u001b[96m\u001b[47m 26 \u001b[m\u001b[93m\u001b[107m Template \u001b[33mstring\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 27 \u001b[m\u001b[93m\u001b[107m Fields \u001b[33mmap\u001b[m\u001b[93m\u001b[107m[\u001b[33mstring\u001b[m\u001b[93m\u001b[107m]\u001b[33mstring\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 28 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 29 \r\n 30 \u001b[m\u001b[93m\u001b[107m\u001b[96m// Out method for JSON\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 31 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (j JSONstruct) Out() \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 32 \u001b[m\u001b[93m\u001b[107m data, err := json.MarshalIndent(j.Output, \u001b[36m\"\"\u001b[m\u001b[93m\u001b[107m, \u001b[36m\" \"\u001b[m\u001b[93m\u001b[107m)\r\n" - ], - [ - 7.7e-05, - "\u001b[96m\u001b[47m 33 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 34 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 35 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 36 \u001b[m\u001b[93m\u001b[107m fmt.Printf(\u001b[36m\"\u001b[m\u001b[93m\u001b[107m\u001b[31m%s\\n\u001b[m\u001b[93m\u001b[107m\u001b[36m\"\u001b[m\u001b[93m\u001b[107m, data)\r\n\u001b[96m\u001b[47m 37 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 38 \u001b[m\u001b[93m\u001b[107m}\r\n\u001b[96m\u001b[47m 39 \r\n 40 \u001b[m\u001b[93m\u001b[107m\u001b[96m// Out method for Go templates\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 41 \u001b[m\u001b[93m\u001b[107m\u001b[32mfunc\u001b[m\u001b[93m\u001b[107m (t StdoutTemplate) Out() \u001b[33merror\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 42 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m strings.HasPrefix(t.Template, \u001b[36m\"table\"\u001b[m\u001b[93m\u001b[107m) \u001b[1m\u001b[31m\u001b[106m{\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 43 \u001b[m\u001b[93m\u001b[107m\u001b[8Ct.Template = strings.TrimSpace(t.Template[\u001b[36m5\u001b[m\u001b[93m\u001b[107m:])\r\n\u001b[96m\u001b[47m 44 \u001b[m\u001b[93m\u001b[107m\u001b[8CheaderTmpl, err := template.New(\u001b[36m\"header\"\u001b[m\u001b[93m\u001b[107m).Funcs(headerFunctions).Parse(t.Template)\r\n\u001b[96m\u001b[47m 45 \u001b[m\u001b[93m\u001b[107m" - ], - [ - 0.006486, - "\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 46 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Template parsing error\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 47 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 48 \u001b[m\u001b[93m\u001b[107m\u001b[8Cerr = headerTmpl.Execute(os.Stdout, t.Fields)\r\n\u001b[96m\u001b[47m 49 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 50 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 51 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 52 \u001b[m\u001b[93m\u001b[107m\u001b[8Cfmt.Println()\r\n\u001b[96m\u001b[47m 53 \u001b[m\u001b[93m\u001b[107m \u001b[1m\u001b[31m\u001b[106m}\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 54 \u001b[m\u001b[93m\u001b[107m tmpl, err := template.New(\u001b[36m\"image\"\u001b[m\u001b[93m\u001b[107m).Funcs(basicFunctions).Parse(t.Template)\r\n\u001b[96m\u001b[47m 55 \u001b[m\u001b[93m\u001b[107m \u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 56 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m errors.Wrapf(err, \u001b[36m\"Template parsing error\"\u001b[m\u001b[93m\u001b[107m)\r\n\u001b[96m\u001b[47m 57 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 58 \r\n 59 \u001b[m\u001b[93m\u001b[107m \u001b[32mfor\u001b[m\u001b[93m\u001b[107m" - ], - [ - 6.6e-05, - " _, img := \u001b[32mrange\u001b[m\u001b[93m\u001b[107m t.Output {\r\n\u001b[96m\u001b[47m 60 \u001b[m\u001b[93m\u001b[107m\u001b[8CbasicTmpl := tmpl.Funcs(basicFunctions)\r\n\u001b[96m\u001b[47m 61 \u001b[m\u001b[93m\u001b[107m\u001b[8Cerr = basicTmpl.Execute(os.Stdout, img)\r\n\u001b[96m\u001b[47m 62 \u001b[m\u001b[93m\u001b[107m\u001b[8C\u001b[32mif\u001b[m\u001b[93m\u001b[107m err != \u001b[36mnil\u001b[m\u001b[93m\u001b[107m {\r\n\u001b[96m\u001b[47m 63 \u001b[m\u001b[93m\u001b[107m\u001b[12C\u001b[32mreturn\u001b[m\u001b[93m\u001b[107m err\r\n\u001b[96m\u001b[47m 64 \u001b[m\u001b[93m\u001b[107m\u001b[8C}\r\n\u001b[96m\u001b[47m 65 \u001b[m\u001b[93m\u001b[107m\u001b[8Cfmt.Println()\r\n\u001b[96m\u001b[47m 66 \u001b[m\u001b[93m\u001b[107m }\r\n\u001b[96m\u001b[47m 67 \u001b[m\u001b[93m\u001b[107m \u001b[32mreturn\u001b[m\u001b[93m\u001b[107m \u001b[36mnil\u001b[m\u001b[93m\u001b[107m\r\n\u001b[96m\u001b[47m 68 \u001b[m\u001b[93m\u001b[107m\r\n\u001b[1m\u001b[38;5;22m\u001b[48;5;148m NORMAL \u001b[m\u001b[93m\u001b[107m\u001b[38;5;148m\u001b[48;5;240m\u001b[51;10H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240m\u001b[51;12H master \u001b[m\u001b[93m\u001b[107m\u001b[38;5;245m\u001b[48;5;240m\u001b[51;21H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;250m\u001b[48;5;240mcmd/kpod/formats/\u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;231m\u001b[48;5;240mformats.go \u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m\u001b[51;51H \u001b[m\u001b[93m\u001b[107m\u001b[38;5;231m\u001b[48;5;236m                                   " - ], - [ - 2.6e-05, - "                                                                                         \u001b[m\u001b[93m\u001b[107m\u001b[38;5;247m\u001b[48;5;236munix\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;182H\u001b[38;5;247m\u001b[48;5;236m utf-8\u001b[m\u001b[93m\u001b[107m\u001b[38;5;244m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;190H\u001b[38;5;247m\u001b[48;5;236m go\u001b[m\u001b[93m\u001b[107m\u001b[38;5;240m\u001b[48;5;236m \u001b[m\u001b[93m\u001b[107m\u001b[51;195H\u001b[38;5;108m\u001b[48;5;240m  77%\u001b[m\u001b[93m\u001b[107m\u001b[38;5;252m\u001b[48;5;240m \u001b[m\u001b[93m\u001b[107m\u001b[51;202H\u001b[38;5;235m\u001b[48;5;252m \u001b[51;204H \u001b[m\u001b[93m\u001b[107m\u001b[1m\u001b[38;5;235m\u001b[48;5;252m 53\u001b[m\u001b[93m\u001b[107m\u001b[38;5;22m\u001b[48;5;252m:5  \u001b[35;9H\u001b[?12l\u001b[?25h" - ], - [ - 0.84171, - "\u001b[?25l\u001b[52;1H\u001b[m\u001b[93m\u001b[107m:\u001b[?2004h\u001b[?12l\u001b[?25h" - ], - [ - 0.167691, - "q\u001b[?25l\u001b[?12l\u001b[?25h" - ], - [ - 0.064539, - "\r" - ], - [ - 0.016979, - "\u001b[?25l\u001b[?2004l\u001b[52;1H\u001b[K\u001b[52;1H\u001b[?2004l\u001b[?1l\u001b>\u001b[?12l\u001b[?25h\u001b[?1049l" - ], - [ - 0.002329, - "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" - ], - [ - 0.02599, - "\r\n\u001b[34m\u001b[01mcri-o\u001b[22m \u001b[90mgit/master \u001b[39m \u001b[33m853s\u001b[39m\r\n" - ], - [ - 0.001018, - "\u001b]7;file://localhost.localdomain/home/ryan/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 0.000113, - "\u001b]2;ryan@localhost: ~/Development/Go/src/github.com/kubernetes-incubator/cri-o\u0007" - ], - [ - 2.3e-05, - "\u001b]1;..cubator/cri-o\u0007" - ], - [ - 8.7e-05, - "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J\u001b[35m❯\u001b[39m \u001b[K\u001b[208C\u001b[90m\u001b[39m\u001b[208D" - ], - [ - 6.7e-05, - "\u001b[?1h\u001b=" - ], - [ - 2.7e-05, - "\u001b[?2004h" - ], - [ - 0.177155, - "e" - ], - [ - 0.184267, - "\bex" - ], - [ - 0.095845, - "i" - ], - [ - 0.144511, - "t" - ], - [ - 0.087093, - "\u001b[?1l\u001b>" - ], - [ - 0.000252, - "\u001b[?2004l\r\r\n" - ], - [ - 0.009572, - "\u001b]2;exit\u0007\u001b]1;exit\u0007" - ] - ] -} \ No newline at end of file diff --git a/kubernetes.md b/kubernetes.md index c2aa8bf8..e95694ac 100644 --- a/kubernetes.md +++ b/kubernetes.md @@ -1,6 +1,6 @@ -# Running cri-o on kubernetes cluster +# Running CRI-O on kubernetes cluster -## Switching runtime from docker to cri-o +## Switching runtime from docker to CRI-O In standard docker kubernetes cluster, kubelet is running on each node as systemd service and is taking care of communication between runtime and api service. It is reponsible for starting microservices pods (such as `kube-proxy`, `kubedns`, etc. - they can be different for various ways of deploying k8s) and user pods. @@ -9,21 +9,19 @@ Configuration of kubelet determines which runtime is used and in what way. Kubelet itself is executed in docker container (as we can see in `kubelet.service`), but, what is important, **it's not** a kubernetes pod (at least for now), so we can keep kubelet running inside container (as well as directly on the host), and regardless of this, run pods in chosen runtime. -Below, you can find an instruction how to switch one or more nodes on running kubernetes cluster from docker to cri-o. +Below, you can find an instruction how to switch one or more nodes on running kubernetes cluster from docker to CRI-O. ### Preparing crio -You must prepare and install `crio` on each node you would like to switch. Here's the list of files that must be provided: +You must prepare and install `crio` on each node you would like to switch. +Besides the files installed by `make install install.config`, here's the list of files that must be provided: -| File path | Description | Location | -|--------------------------------------------|----------------------------|-----------------------------------------------------| -| `/etc/crio/crio.conf` | crio configuration | Generated on cri-o `make install` | -| `/etc/crio/seccomp.conf` | seccomp config | Example stored in cri-o repository | -| `/etc/containers/policy.json` | containers policy | Example stored in cri-o repository | -| `/bin/{crio, runc}` | `crio` and `runc` binaries | Built from cri-o repository | -| `/usr/local/libexec/crio/conmon` | `conmon` binary | Built from cri-o repository | -| `/opt/cni/bin/{flannel, bridge,...}` | CNI plugins binaries | Can be built from sources `containernetworking/cni` | -| `/etc/cni/net.d/10-mynet.conf` | Network config | Example stored in [README file](README.md) | +| File path | Description | Location | +|--------------------------------------------|-----------------------------|---------------------------------------------------------| +| `/etc/containers/policy.json` | containers policy | [Example](test/policy.json) stored in cri-o repository | +| `/bin/runc` | `runc` or other OCI runtime | Can be build from sources `opencontainers/runc` | +| `/opt/cni/bin/{flannel, bridge,...}` | CNI plugins binaries | Can be built from sources `containernetworking/plugins` | +| `/etc/cni/net.d/...` | CNI network config | Example [here](contrib/cni) | `crio` binary can be executed directly on host, inside the container or in any way. However, recommended way is to set it as a systemd service. @@ -79,7 +77,7 @@ KUBELET_ARGS="--pod-manifest-path=/etc/kubernetes/manifests You need to add following parameters to `KUBELET_ARGS`: * `--experimental-cri=true` - Use Container Runtime Interface. Will be true by default from kubernetes 1.6 release. * `--container-runtime=remote` - Use remote runtime with provided socket. -* `--container-runtime-endpoint=/var/run/crio.sock` - Socket for remote runtime (default `crio` socket localization). +* `--container-runtime-endpoint=/var/run/crio/crio.sock` - Socket for remote runtime (default `crio` socket localization). * `--runtime-request-timeout=10m` - Optional but useful. Some requests, especially pulling huge images, may take longer than default (2 minutes) and will cause an error. Kubelet is prepared now. @@ -95,7 +93,7 @@ If your cluster is using flannel network, your network configuration should be l ``` Then, kubelet will take parameters from `/run/flannel/subnet.env` - file generated by flannel kubelet microservice. -## Starting kubelet with cri-o +## Starting kubelet with CRI-O Start crio first, then kubelet. If you created `crio` service: ``` # systemctl start crio diff --git a/libkpod/config.go b/lib/config.go similarity index 96% rename from libkpod/config.go rename to lib/config.go index 123bece8..6a63b2b0 100644 --- a/libkpod/config.go +++ b/lib/config.go @@ -1,4 +1,4 @@ -package libkpod +package lib import ( "bytes" @@ -83,7 +83,7 @@ type RootConfig struct { LogDir string `toml:"log_dir"` // FileLocking specifies whether to use file-based or in-memory locking - // File-based locking is required when multiple users of libkpod are + // File-based locking is required when multiple users of lib are // present on the same system FileLocking bool `toml:"file_locking"` } @@ -121,6 +121,9 @@ type RuntimeConfig struct { // NoPivot instructs the runtime to not use `pivot_root`, but instead use `MS_MOVE` NoPivot bool `toml:"no_pivot"` + // EnableSharePidNamespace instructs the runtime to enable share pid namespace + EnableSharedPIDNamespace bool `toml:"enable_shared_pid_namespace"` + // Conmon is the path to conmon binary, used for managing the runtime. Conmon string `toml:"conmon"` @@ -145,6 +148,10 @@ type RuntimeConfig struct { // HooksDirPath location of oci hooks config files HooksDirPath string `toml:"hooks_dir_path"` + // DefaultMounts is the list of mounts to be mounted for each container + // The format of each mount is "host-path:container-path" + DefaultMounts []string `toml:"default_mounts"` + // Hooks List of hooks to run with container Hooks map[string]HookParams diff --git a/libkpod/config_test.go b/lib/config_test.go similarity index 98% rename from libkpod/config_test.go rename to lib/config_test.go index e6820d3c..59998382 100644 --- a/libkpod/config_test.go +++ b/lib/config_test.go @@ -1,4 +1,4 @@ -package libkpod +package lib import ( "io/ioutil" diff --git a/libkpod/container.go b/lib/container.go similarity index 98% rename from libkpod/container.go rename to lib/container.go index 7835952d..8264ab6a 100644 --- a/libkpod/container.go +++ b/lib/container.go @@ -1,10 +1,10 @@ -package libkpod +package lib import ( "fmt" cstorage "github.com/containers/storage" - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/pkg/registrar" "github.com/pkg/errors" diff --git a/libkpod/container_server.go b/lib/container_server.go similarity index 94% rename from libkpod/container_server.go rename to lib/container_server.go index 1bc94871..9a4704b7 100644 --- a/libkpod/container_server.go +++ b/lib/container_server.go @@ -1,4 +1,4 @@ -package libkpod +package lib import ( "encoding/json" @@ -12,13 +12,14 @@ import ( cstorage "github.com/containers/storage" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/truncindex" - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/pkg/annotations" "github.com/kubernetes-incubator/cri-o/pkg/registrar" "github.com/kubernetes-incubator/cri-o/pkg/storage" "github.com/opencontainers/runc/libcontainer" rspec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -167,7 +168,8 @@ func New(config *Config) (*ContainerServer, error) { state: &containerServerState{ containers: oci.NewMemoryStore(), infraContainers: oci.NewMemoryStore(), - sandboxes: make(map[string]*sandbox.Sandbox), + sandboxes: sandbox.NewMemoryStore(), + processLevels: make(map[string]int), }, config: config, }, nil @@ -323,6 +325,8 @@ func (c *ContainerServer) LoadSandbox(id string) error { return err } + spp := m.Annotations[annotations.SeccompProfilePath] + kubeAnnotations := make(map[string]string) if err = json.Unmarshal([]byte(m.Annotations[annotations.Annotations]), &kubeAnnotations); err != nil { return err @@ -337,6 +341,7 @@ func (c *ContainerServer) LoadSandbox(id string) error { } sb.AddHostnamePath(m.Annotations[annotations.HostnamePath]) sb.AddIP(ip) + sb.SetSeccompProfilePath(spp) // We add a netNS only if we can load a permanent one. // Otherwise, the sandbox will live in the host namespace. @@ -388,6 +393,7 @@ func (c *ContainerServer) LoadSandbox(id string) error { if err != nil { return err } + scontainer.SetSpec(&m) scontainer.SetMountPoint(m.Annotations[annotations.MountPoint]) if m.Annotations[annotations.Volumes] != "" { @@ -511,7 +517,10 @@ func (c *ContainerServer) LoadContainer(id string) error { if err != nil { return err } + ctr.SetSpec(&m) ctr.SetMountPoint(m.Annotations[annotations.MountPoint]) + spp := m.Annotations[annotations.SeccompProfilePath] + ctr.SetSeccompProfilePath(spp) c.ContainerStateFromDisk(ctr) @@ -608,68 +617,53 @@ func (c *ContainerServer) Shutdown() error { type containerServerState struct { containers oci.ContainerStorer infraContainers oci.ContainerStorer - sandboxes map[string]*sandbox.Sandbox + sandboxes sandbox.Storer + // processLevels The number of sandboxes using the same SELinux MCS level. Need to release MCS Level, when count reaches 0 + processLevels map[string]int } // AddContainer adds a container to the container state store func (c *ContainerServer) AddContainer(ctr *oci.Container) { - c.stateLock.Lock() - defer c.stateLock.Unlock() - sandbox := c.state.sandboxes[ctr.Sandbox()] + sandbox := c.state.sandboxes.Get(ctr.Sandbox()) sandbox.AddContainer(ctr) c.state.containers.Add(ctr.ID(), ctr) } // AddInfraContainer adds a container to the container state store func (c *ContainerServer) AddInfraContainer(ctr *oci.Container) { - c.stateLock.Lock() - defer c.stateLock.Unlock() c.state.infraContainers.Add(ctr.ID(), ctr) } // GetContainer returns a container by its ID func (c *ContainerServer) GetContainer(id string) *oci.Container { - c.stateLock.Lock() - defer c.stateLock.Unlock() return c.state.containers.Get(id) } // GetInfraContainer returns a container by its ID func (c *ContainerServer) GetInfraContainer(id string) *oci.Container { - c.stateLock.Lock() - defer c.stateLock.Unlock() return c.state.infraContainers.Get(id) } // HasContainer checks if a container exists in the state func (c *ContainerServer) HasContainer(id string) bool { - c.stateLock.Lock() - defer c.stateLock.Unlock() - ctr := c.state.containers.Get(id) - return ctr != nil + return c.state.containers.Get(id) != nil } // RemoveContainer removes a container from the container state store func (c *ContainerServer) RemoveContainer(ctr *oci.Container) { - c.stateLock.Lock() - defer c.stateLock.Unlock() sbID := ctr.Sandbox() - sb := c.state.sandboxes[sbID] + sb := c.state.sandboxes.Get(sbID) sb.RemoveContainer(ctr) c.state.containers.Delete(ctr.ID()) } // RemoveInfraContainer removes a container from the container state store func (c *ContainerServer) RemoveInfraContainer(ctr *oci.Container) { - c.stateLock.Lock() - defer c.stateLock.Unlock() c.state.infraContainers.Delete(ctr.ID()) } // listContainers returns a list of all containers stored by the server state func (c *ContainerServer) listContainers() []*oci.Container { - c.stateLock.Lock() - defer c.stateLock.Unlock() return c.state.containers.List() } @@ -693,54 +687,52 @@ func (c *ContainerServer) ListContainers(filters ...func(*oci.Container) bool) ( // AddSandbox adds a sandbox to the sandbox state store func (c *ContainerServer) AddSandbox(sb *sandbox.Sandbox) { + c.state.sandboxes.Add(sb.ID(), sb) + c.stateLock.Lock() - defer c.stateLock.Unlock() - c.state.sandboxes[sb.ID()] = sb + c.state.processLevels[selinux.NewContext(sb.ProcessLabel())["level"]]++ + c.stateLock.Unlock() } // GetSandbox returns a sandbox by its ID func (c *ContainerServer) GetSandbox(id string) *sandbox.Sandbox { - c.stateLock.Lock() - defer c.stateLock.Unlock() - return c.state.sandboxes[id] + return c.state.sandboxes.Get(id) } // GetSandboxContainer returns a sandbox's infra container func (c *ContainerServer) GetSandboxContainer(id string) *oci.Container { - c.stateLock.Lock() - defer c.stateLock.Unlock() - sb, ok := c.state.sandboxes[id] - if !ok { - return nil - } + sb := c.state.sandboxes.Get(id) return sb.InfraContainer() } // HasSandbox checks if a sandbox exists in the state func (c *ContainerServer) HasSandbox(id string) bool { - c.stateLock.Lock() - defer c.stateLock.Unlock() - _, ok := c.state.sandboxes[id] - return ok + return c.state.sandboxes.Get(id) != nil } // RemoveSandbox removes a sandbox from the state store func (c *ContainerServer) RemoveSandbox(id string) { + sb := c.state.sandboxes.Get(id) + processLabel := sb.ProcessLabel() + level := selinux.NewContext(processLabel)["level"] + c.stateLock.Lock() - defer c.stateLock.Unlock() - delete(c.state.sandboxes, id) + pl, ok := c.state.processLevels[level] + if ok { + c.state.processLevels[level] = pl - 1 + if c.state.processLevels[level] == 0 { + label.ReleaseLabel(processLabel) + delete(c.state.processLevels, level) + } + } + c.stateLock.Unlock() + + c.state.sandboxes.Delete(id) } // ListSandboxes lists all sandboxes in the state store func (c *ContainerServer) ListSandboxes() []*sandbox.Sandbox { - c.stateLock.Lock() - defer c.stateLock.Unlock() - sbArray := make([]*sandbox.Sandbox, 0, len(c.state.sandboxes)) - for _, sb := range c.state.sandboxes { - sbArray = append(sbArray, sb) - } - - return sbArray + return c.state.sandboxes.List() } // LibcontainerStats gets the stats for the container with the given id from runc/libcontainer diff --git a/libkpod/hooks.go b/lib/hooks.go similarity index 98% rename from libkpod/hooks.go rename to lib/hooks.go index f353cdcd..fab563f0 100644 --- a/libkpod/hooks.go +++ b/lib/hooks.go @@ -1,4 +1,4 @@ -package libkpod +package lib import ( "encoding/json" @@ -27,6 +27,7 @@ type HookParams struct { Cmds []string `json:"cmd"` Annotations []string `json:"annotation"` HasBindMounts bool `json:"hasbindmounts"` + Arguments []string `json:"arguments"` } // readHook reads hooks json files, verifies it and returns the json config diff --git a/libkpod/kill.go b/lib/kill.go similarity index 98% rename from libkpod/kill.go rename to lib/kill.go index b2c3219a..356932f1 100644 --- a/libkpod/kill.go +++ b/lib/kill.go @@ -1,4 +1,4 @@ -package libkpod +package lib import ( "github.com/docker/docker/pkg/signal" diff --git a/libkpod/logs.go b/lib/logs.go similarity index 99% rename from libkpod/logs.go rename to lib/logs.go index 00b0f016..d287b153 100644 --- a/libkpod/logs.go +++ b/lib/logs.go @@ -1,4 +1,4 @@ -package libkpod +package lib import ( "path" diff --git a/libkpod/pause.go b/lib/pause.go similarity index 98% rename from libkpod/pause.go rename to lib/pause.go index 29871d32..70087a3a 100644 --- a/libkpod/pause.go +++ b/lib/pause.go @@ -1,4 +1,4 @@ -package libkpod +package lib import ( "github.com/kubernetes-incubator/cri-o/oci" diff --git a/libkpod/remove.go b/lib/remove.go similarity index 87% rename from libkpod/remove.go rename to lib/remove.go index a3aa6eea..e020637f 100644 --- a/libkpod/remove.go +++ b/lib/remove.go @@ -1,4 +1,4 @@ -package libkpod +package lib import ( "os" @@ -6,10 +6,11 @@ import ( "github.com/kubernetes-incubator/cri-o/oci" "github.com/pkg/errors" + "golang.org/x/net/context" ) // Remove removes a container -func (c *ContainerServer) Remove(container string, force bool) (string, error) { +func (c *ContainerServer) Remove(ctx context.Context, container string, force bool) (string, error) { ctr, err := c.LookupContainer(container) if err != nil { return "", err @@ -22,7 +23,7 @@ func (c *ContainerServer) Remove(container string, force bool) (string, error) { return "", errors.Errorf("cannot remove paused container %s", ctrID) case oci.ContainerStateCreated, oci.ContainerStateRunning: if force { - _, err = c.ContainerStop(container, -1) + _, err = c.ContainerStop(ctx, container, 10) if err != nil { return "", errors.Wrapf(err, "unable to stop container %s", ctrID) } diff --git a/libkpod/rename.go b/lib/rename.go similarity index 99% rename from libkpod/rename.go rename to lib/rename.go index 7c0279bf..d03c3b13 100644 --- a/libkpod/rename.go +++ b/lib/rename.go @@ -1,4 +1,4 @@ -package libkpod +package lib import ( "encoding/json" diff --git a/lib/sandbox/history.go b/lib/sandbox/history.go new file mode 100644 index 00000000..84d0291d --- /dev/null +++ b/lib/sandbox/history.go @@ -0,0 +1,31 @@ +package sandbox + +import "sort" + +// History is a convenience type for storing a list of sandboxes, +// sorted by creation date in descendant order. +type History []*Sandbox + +// Len returns the number of sandboxes in the history. +func (history *History) Len() int { + return len(*history) +} + +// Less compares two sandboxes and returns true if the second one +// was created before the first one. +func (history *History) Less(i, j int) bool { + sandboxes := *history + // FIXME: state access should be serialized + return sandboxes[j].created.Before(sandboxes[i].created) +} + +// Swap switches sandboxes i and j positions in the history. +func (history *History) Swap(i, j int) { + sandboxes := *history + sandboxes[i], sandboxes[j] = sandboxes[j], sandboxes[i] +} + +// sort orders the history by creation date in descendant order. +func (history *History) sort() { + sort.Sort(history) +} diff --git a/lib/sandbox/memory_store.go b/lib/sandbox/memory_store.go new file mode 100644 index 00000000..17533bf7 --- /dev/null +++ b/lib/sandbox/memory_store.go @@ -0,0 +1,93 @@ +package sandbox + +import "sync" + +// memoryStore implements a Store in memory. +type memoryStore struct { + s map[string]*Sandbox + sync.RWMutex +} + +// NewMemoryStore initializes a new memory store. +func NewMemoryStore() Storer { + return &memoryStore{ + s: make(map[string]*Sandbox), + } +} + +// Add appends a new sandbox to the memory store. +// It overrides the id if it existed before. +func (c *memoryStore) Add(id string, cont *Sandbox) { + c.Lock() + c.s[id] = cont + c.Unlock() +} + +// Get returns a sandbox from the store by id. +func (c *memoryStore) Get(id string) *Sandbox { + var res *Sandbox + c.RLock() + res = c.s[id] + c.RUnlock() + return res +} + +// Delete removes a sandbox from the store by id. +func (c *memoryStore) Delete(id string) { + c.Lock() + delete(c.s, id) + c.Unlock() +} + +// List returns a sorted list of sandboxes from the store. +// The sandboxes are ordered by creation date. +func (c *memoryStore) List() []*Sandbox { + sandboxes := History(c.all()) + sandboxes.sort() + return sandboxes +} + +// Size returns the number of sandboxes in the store. +func (c *memoryStore) Size() int { + c.RLock() + defer c.RUnlock() + return len(c.s) +} + +// First returns the first sandbox found in the store by a given filter. +func (c *memoryStore) First(filter StoreFilter) *Sandbox { + for _, cont := range c.all() { + if filter(cont) { + return cont + } + } + return nil +} + +// ApplyAll calls the reducer function with every sandbox in the store. +// This operation is asynchronous in the memory store. +// NOTE: Modifications to the store MUST NOT be done by the StoreReducer. +func (c *memoryStore) ApplyAll(apply StoreReducer) { + wg := new(sync.WaitGroup) + for _, cont := range c.all() { + wg.Add(1) + go func(sandbox *Sandbox) { + apply(sandbox) + wg.Done() + }(cont) + } + + wg.Wait() +} + +func (c *memoryStore) all() []*Sandbox { + c.RLock() + sandboxes := make([]*Sandbox, 0, len(c.s)) + for _, cont := range c.s { + sandboxes = append(sandboxes, cont) + } + c.RUnlock() + return sandboxes +} + +var _ Storer = &memoryStore{} diff --git a/libkpod/sandbox/sandbox.go b/lib/sandbox/sandbox.go similarity index 96% rename from libkpod/sandbox/sandbox.go rename to lib/sandbox/sandbox.go index d7d6569d..7624b072 100644 --- a/libkpod/sandbox/sandbox.go +++ b/lib/sandbox/sandbox.go @@ -7,6 +7,7 @@ import ( "os" "path/filepath" "sync" + "time" "github.com/containernetworking/plugins/pkg/ns" "github.com/docker/docker/pkg/mount" @@ -156,7 +157,9 @@ type Sandbox struct { portMappings []*hostport.PortMapping stopped bool // ipv4 or ipv6 cache - ip string + ip string + seccompProfilePath string + created time.Time } const ( @@ -201,10 +204,21 @@ func New(id, namespace, name, kubeName, logDir string, labels, annotations map[s sb.resolvPath = resolvPath sb.hostname = hostname sb.portMappings = portMappings + sb.created = time.Now() return sb, nil } +// SetSeccompProfilePath sets the seccomp profile path +func (s *Sandbox) SetSeccompProfilePath(pp string) { + s.seccompProfilePath = pp +} + +// SeccompProfilePath returns the seccomp profile path +func (s *Sandbox) SeccompProfilePath() string { + return s.seccompProfilePath +} + // AddIP stores the ip in the sandbox func (s *Sandbox) AddIP(ip string) { s.ip = ip diff --git a/lib/sandbox/store.go b/lib/sandbox/store.go new file mode 100644 index 00000000..83d705cd --- /dev/null +++ b/lib/sandbox/store.go @@ -0,0 +1,27 @@ +package sandbox + +// StoreFilter defines a function to filter +// sandboxes in the store. +type StoreFilter func(*Sandbox) bool + +// StoreReducer defines a function to +// manipulate sandboxes in the store +type StoreReducer func(*Sandbox) + +// Storer defines an interface that any container store must implement. +type Storer interface { + // Add appends a new sandbox to the store. + Add(string, *Sandbox) + // Get returns a sandbox from the store by the identifier it was stored with. + Get(string) *Sandbox + // Delete removes a sandbox from the store by the identifier it was stored with. + Delete(string) + // List returns a list of sandboxes from the store. + List() []*Sandbox + // Size returns the number of sandboxes in the store. + Size() int + // First returns the first sandbox found in the store by a given filter. + First(StoreFilter) *Sandbox + // ApplyAll calls the reducer function with every sandbox in the store. + ApplyAll(StoreReducer) +} diff --git a/libkpod/stats.go b/lib/stats.go similarity index 99% rename from libkpod/stats.go rename to lib/stats.go index f4d645d6..229d8409 100644 --- a/libkpod/stats.go +++ b/lib/stats.go @@ -1,4 +1,4 @@ -package libkpod +package lib import ( "path/filepath" diff --git a/libkpod/stop.go b/lib/stop.go similarity index 78% rename from libkpod/stop.go rename to lib/stop.go index 06712d45..7dbbd066 100644 --- a/libkpod/stop.go +++ b/lib/stop.go @@ -1,12 +1,13 @@ -package libkpod +package lib import ( "github.com/kubernetes-incubator/cri-o/oci" "github.com/pkg/errors" + "golang.org/x/net/context" ) // ContainerStop stops a running container with a grace period (i.e., timeout). -func (c *ContainerServer) ContainerStop(container string, timeout int64) (string, error) { +func (c *ContainerServer) ContainerStop(ctx context.Context, container string, timeout int64) (string, error) { ctr, err := c.LookupContainer(container) if err != nil { return "", errors.Wrapf(err, "failed to find container %s", container) @@ -20,7 +21,7 @@ func (c *ContainerServer) ContainerStop(container string, timeout int64) (string return "", errors.Errorf("cannot stop paused container %s", ctrID) default: if cStatus.Status != oci.ContainerStateStopped { - if err := c.runtime.StopContainer(ctr, timeout); err != nil { + if err := c.runtime.StopContainer(ctx, ctr, timeout); err != nil { return "", errors.Wrapf(err, "failed to stop container %s", ctrID) } if err := c.storageRuntimeServer.StopContainer(ctrID); err != nil { diff --git a/libkpod/testdata/config.toml b/lib/testdata/config.toml similarity index 100% rename from libkpod/testdata/config.toml rename to lib/testdata/config.toml diff --git a/libkpod/wait.go b/lib/wait.go similarity index 98% rename from libkpod/wait.go rename to lib/wait.go index c7ba5732..c7b84c04 100644 --- a/libkpod/wait.go +++ b/lib/wait.go @@ -1,4 +1,4 @@ -package libkpod +package lib import ( "github.com/kubernetes-incubator/cri-o/oci" diff --git a/libkpod/container_data.go b/libkpod/container_data.go deleted file mode 100644 index 2ade63ba..00000000 --- a/libkpod/container_data.go +++ /dev/null @@ -1,210 +0,0 @@ -package libkpod - -import ( - "encoding/json" - "os" - "time" - - "k8s.io/apimachinery/pkg/fields" - pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" - - "github.com/kubernetes-incubator/cri-o/libpod/driver" - "github.com/kubernetes-incubator/cri-o/libpod/images" - "github.com/kubernetes-incubator/cri-o/oci" - "github.com/opencontainers/image-spec/specs-go/v1" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" -) - -// ContainerData handles the data used when inspecting a container -type ContainerData struct { - ID string - Name string - LogPath string - Labels fields.Set - Annotations fields.Set - State *ContainerState - Metadata *pb.ContainerMetadata - BundlePath string - StopSignal string - FromImage string `json:"Image,omitempty"` - FromImageID string `json:"ImageID"` - MountPoint string `json:"Mountpoint,omitempty"` - MountLabel string - Mounts []specs.Mount - AppArmorProfile string - ImageAnnotations map[string]string `json:"Annotations,omitempty"` - ImageCreatedBy string `json:"CreatedBy,omitempty"` - Config v1.ImageConfig `json:"Config,omitempty"` - SizeRw uint `json:"SizeRw,omitempty"` - SizeRootFs uint `json:"SizeRootFs,omitempty"` - Args []string - ResolvConfPath string - HostnamePath string - HostsPath string - GraphDriver driverData -} - -type driverData struct { - Name string - Data map[string]string -} - -// ContainerState represents the status of a container. -type ContainerState struct { - specs.State - Created time.Time `json:"created"` - Started time.Time `json:"started,omitempty"` - Finished time.Time `json:"finished,omitempty"` - ExitCode int32 `json:"exitCode"` - OOMKilled bool `json:"oomKilled,omitempty"` - Error string `json:"error,omitempty"` -} - -// GetContainerData gets the ContainerData for a container with the given name in the given store. -// If size is set to true, it will also determine the size of the container -func (c *ContainerServer) GetContainerData(name string, size bool) (*ContainerData, error) { - ctr, err := c.inspectContainer(name) - if err != nil { - return nil, errors.Wrapf(err, "error reading build container %q", name) - } - container, err := c.store.Container(name) - if err != nil { - return nil, errors.Wrapf(err, "error reading container data") - } - - // The runtime configuration won't exist if the container has never been started by cri-o or kpod, - // so treat a not-exist error as non-fatal. - m := getBlankSpec() - config, err := c.store.FromContainerDirectory(ctr.ID(), "config.json") - if err != nil && !os.IsNotExist(errors.Cause(err)) { - return nil, err - } - if len(config) > 0 { - if err = json.Unmarshal(config, &m); err != nil { - return nil, err - } - } - - if container.ImageID == "" { - return nil, errors.Errorf("error reading container image data: container is not based on an image") - } - imageData, err := images.GetData(c.store, container.ImageID) - if err != nil { - return nil, errors.Wrapf(err, "error reading container image data") - } - - driverName, err := driver.GetDriverName(c.store) - if err != nil { - return nil, err - } - topLayer, err := c.GetContainerTopLayerID(ctr.ID()) - if err != nil { - return nil, err - } - layer, err := c.store.Layer(topLayer) - if err != nil { - return nil, err - } - driverMetadata, err := driver.GetDriverMetadata(c.store, topLayer) - if err != nil { - return nil, err - } - imageName := "" - if len(imageData.Tags) > 0 { - imageName = imageData.Tags[0] - } else if len(imageData.Digests) > 0 { - imageName = imageData.Digests[0] - } - data := &ContainerData{ - ID: ctr.ID(), - Name: ctr.Name(), - LogPath: ctr.LogPath(), - Labels: ctr.Labels(), - Annotations: ctr.Annotations(), - State: c.State(ctr), - Metadata: ctr.Metadata(), - BundlePath: ctr.BundlePath(), - StopSignal: ctr.GetStopSignal(), - Args: m.Process.Args, - FromImage: imageName, - FromImageID: container.ImageID, - MountPoint: layer.MountPoint, - ImageAnnotations: imageData.Annotations, - ImageCreatedBy: imageData.CreatedBy, - Config: imageData.Config, - GraphDriver: driverData{ - Name: driverName, - Data: driverMetadata, - }, - MountLabel: m.Linux.MountLabel, - Mounts: m.Mounts, - AppArmorProfile: m.Process.ApparmorProfile, - ResolvConfPath: "", - HostnamePath: "", - HostsPath: "", - } - - if size { - sizeRootFs, err := c.GetContainerRootFsSize(data.ID) - if err != nil { - - return nil, errors.Wrapf(err, "error reading size for container %q", name) - } - data.SizeRootFs = uint(sizeRootFs) - sizeRw, err := c.GetContainerRwSize(data.ID) - if err != nil { - return nil, errors.Wrapf(err, "error reading RWSize for container %q", name) - } - data.SizeRw = uint(sizeRw) - } - - return data, nil -} - -// Get an oci.Container and update its status -func (c *ContainerServer) inspectContainer(container string) (*oci.Container, error) { - ociCtr, err := c.LookupContainer(container) - if err != nil { - return nil, err - } - // call runtime.UpdateStatus() - err = c.Runtime().UpdateStatus(ociCtr) - if err != nil { - return nil, err - } - return ociCtr, nil -} - -func getBlankSpec() specs.Spec { - return specs.Spec{ - Process: &specs.Process{}, - Root: &specs.Root{}, - Mounts: []specs.Mount{}, - Hooks: &specs.Hooks{}, - Annotations: make(map[string]string), - Linux: &specs.Linux{}, - Solaris: &specs.Solaris{}, - Windows: &specs.Windows{}, - } -} - -// State copies the crio container state to ContainerState type for kpod -func (c *ContainerServer) State(ctr *oci.Container) *ContainerState { - crioState := ctr.State() - specState := specs.State{ - Version: crioState.Version, - ID: crioState.ID, - Status: crioState.Status, - Pid: crioState.Pid, - Bundle: crioState.Bundle, - Annotations: crioState.Annotations, - } - cState := &ContainerState{ - Started: crioState.Started, - Created: crioState.Created, - Finished: crioState.Finished, - } - cState.State = specState - return cState -} diff --git a/libpod/common/common.go b/libpod/common/common.go deleted file mode 100644 index 332d4c9c..00000000 --- a/libpod/common/common.go +++ /dev/null @@ -1,99 +0,0 @@ -package common - -import ( - "io" - "strings" - "syscall" - - cp "github.com/containers/image/copy" - "github.com/containers/image/signature" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -var ( - // ErrNoPassword is returned if the user did not supply a password - ErrNoPassword = errors.Wrapf(syscall.EINVAL, "password was not supplied") -) - -// GetCopyOptions constructs a new containers/image/copy.Options{} struct from the given parameters -func GetCopyOptions(reportWriter io.Writer, signaturePolicyPath string, srcDockerRegistry, destDockerRegistry *DockerRegistryOptions, signing SigningOptions) *cp.Options { - if srcDockerRegistry == nil { - srcDockerRegistry = &DockerRegistryOptions{} - } - if destDockerRegistry == nil { - destDockerRegistry = &DockerRegistryOptions{} - } - srcContext := srcDockerRegistry.GetSystemContext(signaturePolicyPath) - destContext := destDockerRegistry.GetSystemContext(signaturePolicyPath) - return &cp.Options{ - RemoveSignatures: signing.RemoveSignatures, - SignBy: signing.SignBy, - ReportWriter: reportWriter, - SourceCtx: srcContext, - DestinationCtx: destContext, - } -} - -// GetSystemContext Constructs a new containers/image/types.SystemContext{} struct from the given signaturePolicy path -func GetSystemContext(signaturePolicyPath, authFilePath string) *types.SystemContext { - sc := &types.SystemContext{} - if signaturePolicyPath != "" { - sc.SignaturePolicyPath = signaturePolicyPath - } - sc.AuthFilePath = authFilePath - return sc -} - -// CopyStringStringMap deep copies a map[string]string and returns the result -func CopyStringStringMap(m map[string]string) map[string]string { - n := map[string]string{} - for k, v := range m { - n[k] = v - } - return n -} - -// IsTrue determines whether the given string equals "true" -func IsTrue(str string) bool { - return str == "true" -} - -// IsFalse determines whether the given string equals "false" -func IsFalse(str string) bool { - return str == "false" -} - -// IsValidBool determines whether the given string equals "true" or "false" -func IsValidBool(str string) bool { - return IsTrue(str) || IsFalse(str) -} - -// GetPolicyContext creates a signature policy context for the given signature policy path -func GetPolicyContext(path string) (*signature.PolicyContext, error) { - policy, err := signature.DefaultPolicy(&types.SystemContext{SignaturePolicyPath: path}) - if err != nil { - return nil, err - } - return signature.NewPolicyContext(policy) -} - -// ParseRegistryCreds takes a credentials string in the form USERNAME:PASSWORD -// and returns a DockerAuthConfig -func ParseRegistryCreds(creds string) (*types.DockerAuthConfig, error) { - if creds == "" { - return nil, errors.New("no credentials supplied") - } - if !strings.Contains(creds, ":") { - return &types.DockerAuthConfig{ - Username: creds, - Password: "", - }, ErrNoPassword - } - v := strings.SplitN(creds, ":", 2) - cfg := &types.DockerAuthConfig{ - Username: v[0], - Password: v[1], - } - return cfg, nil -} diff --git a/libpod/common/docker_registry_options.go b/libpod/common/docker_registry_options.go deleted file mode 100644 index fdbaa059..00000000 --- a/libpod/common/docker_registry_options.go +++ /dev/null @@ -1,33 +0,0 @@ -package common - -import "github.com/containers/image/types" - -// DockerRegistryOptions encapsulates settings that affect how we connect or -// authenticate to a remote registry. -type DockerRegistryOptions struct { - // DockerRegistryCreds is the user name and password to supply in case - // we need to pull an image from a registry, and it requires us to - // authenticate. - DockerRegistryCreds *types.DockerAuthConfig - // DockerCertPath is the location of a directory containing CA - // certificates which will be used to verify the registry's certificate - // (all files with names ending in ".crt"), and possibly client - // certificates and private keys (pairs of files with the same name, - // except for ".cert" and ".key" suffixes). - DockerCertPath string - // DockerInsecureSkipTLSVerify turns off verification of TLS - // certificates and allows connecting to registries without encryption. - DockerInsecureSkipTLSVerify bool -} - -// GetSystemContext constructs a new system context from the given signaturePolicy path and the -// values in the DockerRegistryOptions -func (o DockerRegistryOptions) GetSystemContext(signaturePolicyPath string) *types.SystemContext { - sc := &types.SystemContext{ - SignaturePolicyPath: signaturePolicyPath, - DockerAuthConfig: o.DockerRegistryCreds, - DockerCertPath: o.DockerCertPath, - DockerInsecureSkipTLSVerify: o.DockerInsecureSkipTLSVerify, - } - return sc -} diff --git a/libpod/common/output_interfaces.go b/libpod/common/output_interfaces.go deleted file mode 100644 index 805d0c79..00000000 --- a/libpod/common/output_interfaces.go +++ /dev/null @@ -1 +0,0 @@ -package common diff --git a/libpod/common/signing_options.go b/libpod/common/signing_options.go deleted file mode 100644 index b7e14be8..00000000 --- a/libpod/common/signing_options.go +++ /dev/null @@ -1,10 +0,0 @@ -package common - -// SigningOptions encapsulates settings that control whether or not we strip or -// add signatures to images when writing them. -type SigningOptions struct { - // RemoveSignatures directs us to remove any signatures which are already present. - RemoveSignatures bool - // SignBy is a key identifier of some kind, indicating that a signature should be generated using the specified private key and stored with the image. - SignBy string -} diff --git a/libpod/container.go b/libpod/container.go deleted file mode 100644 index 45ee5647..00000000 --- a/libpod/container.go +++ /dev/null @@ -1,129 +0,0 @@ -package libpod - -import ( - "sync" - - "github.com/containers/storage" - "github.com/docker/docker/pkg/stringid" - spec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/ulule/deepcopier" -) - -// Container is a single OCI container -type Container struct { - id string - name string - - spec *spec.Spec - pod *Pod - - valid bool - lock sync.RWMutex -} - -// ID returns the container's ID -func (c *Container) ID() string { - // No locking needed, ID will never mutate after a container is created - return c.id -} - -// Name returns the container's name -func (c *Container) Name() string { - // Name can potentially be changed while a container is running - // So lock access to it - c.lock.RLock() - defer c.lock.RUnlock() - - return c.name -} - -// Spec returns the container's OCI runtime spec -func (c *Container) Spec() *spec.Spec { - // The spec can potentially be altered when storage is configured and to - // add annotations at container create time - // As such, access to it is locked - c.lock.RLock() - defer c.lock.RUnlock() - - spec := new(spec.Spec) - deepcopier.Copy(c.spec).To(spec) - - return spec -} - -// Make a new container -func newContainer(rspec *spec.Spec) (*Container, error) { - if rspec == nil { - return nil, errors.Wrapf(ErrInvalidArg, "must provide a valid runtime spec to create container") - } - - ctr := new(Container) - ctr.id = stringid.GenerateNonCryptoID() - ctr.name = ctr.id // TODO generate unique human-readable names - - ctr.spec = new(spec.Spec) - deepcopier.Copy(rspec).To(ctr.spec) - - return ctr, nil -} - -// Create creates a container in the OCI runtime -func (c *Container) Create() error { - return ErrNotImplemented -} - -// Start starts a container -func (c *Container) Start() error { - return ErrNotImplemented -} - -// Stop stops a container -func (c *Container) Stop() error { - return ErrNotImplemented -} - -// Kill sends a signal to a container -func (c *Container) Kill(signal uint) error { - return ErrNotImplemented -} - -// Exec starts a new process inside the container -// Returns fully qualified URL of streaming server for executed process -func (c *Container) Exec(cmd []string, tty bool, stdin bool) (string, error) { - return "", ErrNotImplemented -} - -// Attach attaches to a container -// Returns fully qualified URL of streaming server for the container -func (c *Container) Attach(stdin, tty bool) (string, error) { - return "", ErrNotImplemented -} - -// Mount mounts a container's filesystem on the host -// The path where the container has been mounted is returned -func (c *Container) Mount() (string, error) { - return "", ErrNotImplemented -} - -// Status gets a container's status -// TODO this should return relevant information about container state -func (c *Container) Status() error { - return ErrNotImplemented -} - -// Export exports a container's root filesystem as a tar archive -// The archive will be saved as a file at the given path -func (c *Container) Export(path string) error { - return ErrNotImplemented -} - -// Commit commits the changes between a container and its image, creating a new -// image -// If the container was not created from an image (for example, -// WithRootFSFromPath will create a container from a directory on the system), -// a new base image will be created from the contents of the container's -// filesystem -func (c *Container) Commit() (*storage.Image, error) { - return nil, ErrNotImplemented -} diff --git a/libpod/copy_data.go b/libpod/copy_data.go deleted file mode 100644 index 002b28ff..00000000 --- a/libpod/copy_data.go +++ /dev/null @@ -1,664 +0,0 @@ -package libpod - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "time" - - "github.com/containers/image/docker/reference" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/archive" - "github.com/docker/docker/pkg/ioutils" - "github.com/kubernetes-incubator/cri-o/cmd/kpod/docker" - "github.com/kubernetes-incubator/cri-o/libpod/common" - "github.com/kubernetes-incubator/cri-o/libpod/driver" - digest "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/specs-go/v1" - ociv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -const ( - // Package is used to identify working containers - Package = "kpod" - containerType = Package + " 0.0.1" - stateFile = Package + ".json" - // OCIv1ImageManifest is the MIME type of an OCIv1 image manifest, - // suitable for specifying as a value of the PreferredManifestType - // member of a CommitOptions structure. It is also the default. - OCIv1ImageManifest = v1.MediaTypeImageManifest -) - -// Data handles the data used when inspecting a container -// nolint -type Data struct { - ID string - Tags []string - Digests []string - ManifestDigest digest.Digest - Comment string - Created *time.Time - Container string - Author string - Config ociv1.ImageConfig - Architecture string - OS string - Annotations map[string]string - CreatedBy string - Size uint - VirtualSize uint - GraphDriver driver.Data - RootFS ociv1.RootFS -} - -// CopyData stores the basic data used when copying a container or image -type CopyData struct { - store storage.Store - - // Type is used to help identify a build container's metadata. It - // should not be modified. - Type string `json:"type"` - // FromImage is the name of the source image which was used to create - // the container, if one was used. It should not be modified. - FromImage string `json:"image,omitempty"` - // FromImageID is the ID of the source image which was used to create - // the container, if one was used. It should not be modified. - FromImageID string `json:"image-id"` - // Config is the source image's configuration. It should not be - // modified. - Config []byte `json:"config,omitempty"` - // Manifest is the source image's manifest. It should not be modified. - Manifest []byte `json:"manifest,omitempty"` - - // Container is the name of the build container. It should not be modified. - Container string `json:"container-name,omitempty"` - // ContainerID is the ID of the build container. It should not be modified. - ContainerID string `json:"container-id,omitempty"` - // MountPoint is the last location where the container's root - // filesystem was mounted. It should not be modified. - MountPoint string `json:"mountpoint,omitempty"` - - // ImageAnnotations is a set of key-value pairs which is stored in the - // image's manifest. - ImageAnnotations map[string]string `json:"annotations,omitempty"` - // ImageCreatedBy is a description of how this container was built. - ImageCreatedBy string `json:"created-by,omitempty"` - - // Image metadata and runtime settings, in multiple formats. - OCIv1 v1.Image `json:"ociv1,omitempty"` - Docker docker.V2Image `json:"docker,omitempty"` -} - -func (c *CopyData) initConfig() { - image := ociv1.Image{} - dimage := docker.V2Image{} - if len(c.Config) > 0 { - // Try to parse the image config. If we fail, try to start over from scratch - if err := json.Unmarshal(c.Config, &dimage); err == nil && dimage.DockerVersion != "" { - image, err = makeOCIv1Image(&dimage) - if err != nil { - image = ociv1.Image{} - } - } else { - if err := json.Unmarshal(c.Config, &image); err != nil { - if dimage, err = makeDockerV2S2Image(&image); err != nil { - dimage = docker.V2Image{} - } - } - } - c.OCIv1 = image - c.Docker = dimage - } else { - // Try to dig out the image configuration from the manifest - manifest := docker.V2S1Manifest{} - if err := json.Unmarshal(c.Manifest, &manifest); err == nil && manifest.SchemaVersion == 1 { - if dimage, err = makeDockerV2S1Image(manifest); err == nil { - if image, err = makeOCIv1Image(&dimage); err != nil { - image = ociv1.Image{} - } - } - } - c.OCIv1 = image - c.Docker = dimage - } - - if len(c.Manifest) > 0 { - // Attempt to recover format-specific data from the manifest - v1Manifest := ociv1.Manifest{} - if json.Unmarshal(c.Manifest, &v1Manifest) == nil { - c.ImageAnnotations = v1Manifest.Annotations - } - } - - c.fixupConfig() -} - -func (c *CopyData) fixupConfig() { - if c.Docker.Config != nil { - // Prefer image-level settings over those from the container it was built from - c.Docker.ContainerConfig = *c.Docker.Config - } - c.Docker.Config = &c.Docker.ContainerConfig - c.Docker.DockerVersion = "" - now := time.Now().UTC() - if c.Docker.Created.IsZero() { - c.Docker.Created = now - } - if c.OCIv1.Created.IsZero() { - c.OCIv1.Created = &now - } - if c.OS() == "" { - c.SetOS(runtime.GOOS) - } - if c.Architecture() == "" { - c.SetArchitecture(runtime.GOARCH) - } - if c.WorkDir() == "" { - c.SetWorkDir(string(filepath.Separator)) - } -} - -// OS returns a name of the OS on which a container built using this image -//is intended to be run. -func (c *CopyData) OS() string { - return c.OCIv1.OS -} - -// SetOS sets the name of the OS on which a container built using this image -// is intended to be run. -func (c *CopyData) SetOS(os string) { - c.OCIv1.OS = os - c.Docker.OS = os -} - -// Architecture returns a name of the architecture on which a container built -// using this image is intended to be run. -func (c *CopyData) Architecture() string { - return c.OCIv1.Architecture -} - -// SetArchitecture sets the name of the architecture on which ta container built -// using this image is intended to be run. -func (c *CopyData) SetArchitecture(arch string) { - c.OCIv1.Architecture = arch - c.Docker.Architecture = arch -} - -// WorkDir returns the default working directory for running commands in a container -// built using this image. -func (c *CopyData) WorkDir() string { - return c.OCIv1.Config.WorkingDir -} - -// SetWorkDir sets the location of the default working directory for running commands -// in a container built using this image. -func (c *CopyData) SetWorkDir(there string) { - c.OCIv1.Config.WorkingDir = there - c.Docker.Config.WorkingDir = there -} - -// makeOCIv1Image builds the best OCIv1 image structure we can from the -// contents of the docker image structure. -func makeOCIv1Image(dimage *docker.V2Image) (ociv1.Image, error) { - config := dimage.Config - if config == nil { - config = &dimage.ContainerConfig - } - dimageCreatedTime := dimage.Created.UTC() - image := ociv1.Image{ - Created: &dimageCreatedTime, - Author: dimage.Author, - Architecture: dimage.Architecture, - OS: dimage.OS, - Config: ociv1.ImageConfig{ - User: config.User, - ExposedPorts: map[string]struct{}{}, - Env: config.Env, - Entrypoint: config.Entrypoint, - Cmd: config.Cmd, - Volumes: config.Volumes, - WorkingDir: config.WorkingDir, - Labels: config.Labels, - }, - RootFS: ociv1.RootFS{ - Type: "", - DiffIDs: []digest.Digest{}, - }, - History: []ociv1.History{}, - } - for port, what := range config.ExposedPorts { - image.Config.ExposedPorts[string(port)] = what - } - RootFS := docker.V2S2RootFS{} - if dimage.RootFS != nil { - RootFS = *dimage.RootFS - } - if RootFS.Type == docker.TypeLayers { - image.RootFS.Type = docker.TypeLayers - for _, id := range RootFS.DiffIDs { - image.RootFS.DiffIDs = append(image.RootFS.DiffIDs, digest.Digest(id.String())) - } - } - for _, history := range dimage.History { - historyCreatedTime := history.Created.UTC() - ohistory := ociv1.History{ - Created: &historyCreatedTime, - CreatedBy: history.CreatedBy, - Author: history.Author, - Comment: history.Comment, - EmptyLayer: history.EmptyLayer, - } - image.History = append(image.History, ohistory) - } - return image, nil -} - -// makeDockerV2S2Image builds the best docker image structure we can from the -// contents of the OCI image structure. -func makeDockerV2S2Image(oimage *ociv1.Image) (docker.V2Image, error) { - image := docker.V2Image{ - V1Image: docker.V1Image{Created: oimage.Created.UTC(), - Author: oimage.Author, - Architecture: oimage.Architecture, - OS: oimage.OS, - ContainerConfig: docker.Config{ - User: oimage.Config.User, - ExposedPorts: docker.PortSet{}, - Env: oimage.Config.Env, - Entrypoint: oimage.Config.Entrypoint, - Cmd: oimage.Config.Cmd, - Volumes: oimage.Config.Volumes, - WorkingDir: oimage.Config.WorkingDir, - Labels: oimage.Config.Labels, - }, - }, - RootFS: &docker.V2S2RootFS{ - Type: "", - DiffIDs: []digest.Digest{}, - }, - History: []docker.V2S2History{}, - } - for port, what := range oimage.Config.ExposedPorts { - image.ContainerConfig.ExposedPorts[docker.Port(port)] = what - } - if oimage.RootFS.Type == docker.TypeLayers { - image.RootFS.Type = docker.TypeLayers - for _, id := range oimage.RootFS.DiffIDs { - d, err := digest.Parse(id.String()) - if err != nil { - return docker.V2Image{}, err - } - image.RootFS.DiffIDs = append(image.RootFS.DiffIDs, d) - } - } - for _, history := range oimage.History { - dhistory := docker.V2S2History{ - Created: history.Created.UTC(), - CreatedBy: history.CreatedBy, - Author: history.Author, - Comment: history.Comment, - EmptyLayer: history.EmptyLayer, - } - image.History = append(image.History, dhistory) - } - image.Config = &image.ContainerConfig - return image, nil -} - -// makeDockerV2S1Image builds the best docker image structure we can from the -// contents of the V2S1 image structure. -func makeDockerV2S1Image(manifest docker.V2S1Manifest) (docker.V2Image, error) { - // Treat the most recent (first) item in the history as a description of the image. - if len(manifest.History) == 0 { - return docker.V2Image{}, errors.Errorf("error parsing image configuration from manifest") - } - dimage := docker.V2Image{} - err := json.Unmarshal([]byte(manifest.History[0].V1Compatibility), &dimage) - if err != nil { - return docker.V2Image{}, err - } - if dimage.DockerVersion == "" { - return docker.V2Image{}, errors.Errorf("error parsing image configuration from history") - } - // The DiffID list is intended to contain the sums of _uncompressed_ blobs, and these are most - // likely compressed, so leave the list empty to avoid potential confusion later on. We can - // construct a list with the correct values when we prep layers for pushing, so we don't lose. - // information by leaving this part undone. - rootFS := &docker.V2S2RootFS{ - Type: docker.TypeLayers, - DiffIDs: []digest.Digest{}, - } - // Build a filesystem history. - history := []docker.V2S2History{} - for i := range manifest.History { - h := docker.V2S2History{ - Created: time.Now().UTC(), - Author: "", - CreatedBy: "", - Comment: "", - EmptyLayer: false, - } - dcompat := docker.V1Compatibility{} - if err2 := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), &dcompat); err2 == nil { - h.Created = dcompat.Created.UTC() - h.Author = dcompat.Author - h.Comment = dcompat.Comment - if len(dcompat.ContainerConfig.Cmd) > 0 { - h.CreatedBy = fmt.Sprintf("%v", dcompat.ContainerConfig.Cmd) - } - h.EmptyLayer = dcompat.ThrowAway - } - // Prepend this layer to the list, because a v2s1 format manifest's list is in reverse order - // compared to v2s2, which lists earlier layers before later ones. - history = append([]docker.V2S2History{h}, history...) - } - dimage.RootFS = rootFS - dimage.History = history - return dimage, nil -} - -// Annotations gets the anotations of the container or image -func (c *CopyData) Annotations() map[string]string { - return common.CopyStringStringMap(c.ImageAnnotations) -} - -// Save the CopyData to disk -func (c *CopyData) Save() error { - buildstate, err := json.Marshal(c) - if err != nil { - return err - } - cdir, err := c.store.ContainerDirectory(c.ContainerID) - if err != nil { - return err - } - return ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0600) - -} - -// GetContainerCopyData gets the copy data for a container -func (r *Runtime) GetContainerCopyData(name string) (*CopyData, error) { - var data *CopyData - var err error - if name != "" { - data, err = openCopyData(r.store, name) - if os.IsNotExist(errors.Cause(err)) { - data, err = r.importCopyData(r.store, name, "") - } - } - if err != nil { - return nil, errors.Wrapf(err, "error reading build container") - } - if data == nil { - return nil, errors.Errorf("error finding build container") - } - return data, nil - -} - -// GetImageCopyData gets the copy data for an image -func (r *Runtime) GetImageCopyData(image string) (*CopyData, error) { - if image == "" { - return nil, errors.Errorf("image name must be specified") - } - img, err := r.GetImage(image) - if err != nil { - return nil, errors.Wrapf(err, "error locating image %q for importing settings", image) - } - - systemContext := common.GetSystemContext("", "") - data, err := r.ImportCopyDataFromImage(systemContext, img.ID, "", "") - if err != nil { - return nil, errors.Wrapf(err, "error reading image") - } - if data == nil { - return nil, errors.Errorf("error mocking up build configuration") - } - return data, nil - -} - -func (r *Runtime) importCopyData(store storage.Store, container, signaturePolicyPath string) (*CopyData, error) { - if container == "" { - return nil, errors.Errorf("container name must be specified") - } - - c, err := store.Container(container) - if err != nil { - return nil, err - } - - systemContext := common.GetSystemContext(signaturePolicyPath, "") - - data, err := r.ImportCopyDataFromImage(systemContext, c.ImageID, container, c.ID) - if err != nil { - return nil, err - } - - if data.FromImageID != "" { - if d, err2 := digest.Parse(data.FromImageID); err2 == nil { - data.Docker.Parent = docker.ID(d) - } else { - data.Docker.Parent = docker.ID(digest.NewDigestFromHex(digest.Canonical.String(), data.FromImageID)) - } - } - if data.FromImage != "" { - data.Docker.ContainerConfig.Image = data.FromImage - } - - err = data.Save() - if err != nil { - return nil, errors.Wrapf(err, "error saving CopyData state") - } - - return data, nil -} - -func openCopyData(store storage.Store, container string) (*CopyData, error) { - cdir, err := store.ContainerDirectory(container) - if err != nil { - return nil, err - } - buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile)) - if err != nil { - return nil, err - } - c := &CopyData{} - err = json.Unmarshal(buildstate, &c) - if err != nil { - return nil, err - } - if c.Type != containerType { - return nil, errors.Errorf("container is not a %s container", Package) - } - c.store = store - c.fixupConfig() - return c, nil - -} - -// ImportCopyDataFromImage creates copy data for an image with the given parameters -func (r *Runtime) ImportCopyDataFromImage(systemContext *types.SystemContext, imageID, containerName, containerID string) (*CopyData, error) { - manifest := []byte{} - config := []byte{} - imageName := "" - - if imageID != "" { - ref, err := is.Transport.ParseStoreReference(r.store, "@"+imageID) - if err != nil { - return nil, errors.Wrapf(err, "no such image %q", "@"+imageID) - } - src, err2 := ref.NewImage(systemContext) - if err2 != nil { - return nil, errors.Wrapf(err2, "error instantiating image") - } - defer src.Close() - config, err = src.ConfigBlob() - if err != nil { - return nil, errors.Wrapf(err, "error reading image configuration") - } - manifest, _, err = src.Manifest() - if err != nil { - return nil, errors.Wrapf(err, "error reading image manifest") - } - if img, err3 := r.store.Image(imageID); err3 == nil { - if len(img.Names) > 0 { - imageName = img.Names[0] - } - } - } - - data := &CopyData{ - store: r.store, - Type: containerType, - FromImage: imageName, - FromImageID: imageID, - Config: config, - Manifest: manifest, - Container: containerName, - ContainerID: containerID, - ImageAnnotations: map[string]string{}, - ImageCreatedBy: "", - } - - data.initConfig() - - return data, nil - -} - -// MakeImageRef converts a CopyData struct into a types.ImageReference -func (c *CopyData) MakeImageRef(manifestType string, compress archive.Compression, names []string, layerID string, historyTimestamp *time.Time) (types.ImageReference, error) { - var name reference.Named - if len(names) > 0 { - if parsed, err := reference.ParseNamed(names[0]); err == nil { - name = parsed - } - } - if manifestType == "" { - manifestType = OCIv1ImageManifest - } - oconfig, err := json.Marshal(&c.OCIv1) - if err != nil { - return nil, errors.Wrapf(err, "error encoding OCI-format image configuration") - } - dconfig, err := json.Marshal(&c.Docker) - if err != nil { - return nil, errors.Wrapf(err, "error encoding docker-format image configuration") - } - created := time.Now().UTC() - if historyTimestamp != nil { - created = historyTimestamp.UTC() - } - ref := &CopyRef{ - store: c.store, - compression: compress, - name: name, - names: names, - layerID: layerID, - addHistory: false, - oconfig: oconfig, - dconfig: dconfig, - created: created, - createdBy: c.ImageCreatedBy, - annotations: c.ImageAnnotations, - preferredManifestType: manifestType, - exporting: true, - } - return ref, nil -} - -// GetData gets the Data for a container with the given name in the given store. -func (r *Runtime) GetData(name string) (*Data, error) { - img, err := r.GetImage(name) - if err != nil { - return nil, errors.Wrapf(err, "error reading image %q", name) - } - - imgRef, err := r.GetImageRef("@" + img.ID) - if err != nil { - return nil, errors.Wrapf(err, "error reading image reference %q", img.ID) - } - defer imgRef.Close() - - tags, digests, err := ParseImageNames(img.Names) - if err != nil { - return nil, errors.Wrapf(err, "error parsing image names for %q", name) - } - - driverName, err := driver.GetDriverName(r.store) - if err != nil { - return nil, errors.Wrapf(err, "error reading name of storage driver") - } - - topLayerID := img.TopLayer - - driverMetadata, err := driver.GetDriverMetadata(r.store, topLayerID) - if err != nil { - return nil, errors.Wrapf(err, "error asking storage driver %q for metadata", driverName) - } - - layer, err := r.store.Layer(topLayerID) - if err != nil { - return nil, errors.Wrapf(err, "error reading information about layer %q", topLayerID) - } - size, err := r.store.DiffSize(layer.Parent, layer.ID) - if err != nil { - return nil, errors.Wrapf(err, "error determining size of layer %q", layer.ID) - } - - imgSize, err := imgRef.Size() - if err != nil { - return nil, errors.Wrapf(err, "error determining size of image %q", transports.ImageName(imgRef.Reference())) - } - - manifest, manifestType, err := imgRef.Manifest() - if err != nil { - return nil, errors.Wrapf(err, "error reading manifest for image %q", img.ID) - } - manifestDigest := digest.Digest("") - if len(manifest) > 0 { - manifestDigest = digest.Canonical.FromBytes(manifest) - } - annotations := annotations(manifest, manifestType) - - config, err := imgRef.OCIConfig() - if err != nil { - return nil, errors.Wrapf(err, "error reading image configuration for %q", img.ID) - } - historyComment := "" - historyCreatedBy := "" - if len(config.History) > 0 { - historyComment = config.History[len(config.History)-1].Comment - historyCreatedBy = config.History[len(config.History)-1].CreatedBy - } - - return &Data{ - ID: img.ID, - Tags: tags, - Digests: digests, - ManifestDigest: manifestDigest, - Comment: historyComment, - Created: config.Created, - Author: config.Author, - Config: config.Config, - Architecture: config.Architecture, - OS: config.OS, - Annotations: annotations, - CreatedBy: historyCreatedBy, - Size: uint(size), - VirtualSize: uint(size + imgSize), - GraphDriver: driver.Data{ - Name: driverName, - Data: driverMetadata, - }, - RootFS: config.RootFS, - }, nil -} diff --git a/libpod/copy_ref.go b/libpod/copy_ref.go deleted file mode 100644 index 5c70817c..00000000 --- a/libpod/copy_ref.go +++ /dev/null @@ -1,445 +0,0 @@ -package libpod - -import ( - "bytes" - "context" - "encoding/json" - "io" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - is "github.com/containers/image/storage" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/archive" - "github.com/docker/docker/pkg/ioutils" - "github.com/kubernetes-incubator/cri-o/cmd/kpod/docker" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go" - "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// CopyRef handles image references used for copying images to/from remotes -type CopyRef struct { - store storage.Store - compression archive.Compression - name reference.Named - names []string - layerID string - addHistory bool - oconfig []byte - dconfig []byte - created time.Time - createdBy string - annotations map[string]string - preferredManifestType string - exporting bool -} - -type copySource struct { - path string - ref *CopyRef - store storage.Store - layerID string - names []string - addHistory bool - compression archive.Compression - config []byte - configDigest digest.Digest - manifest []byte - manifestType string - exporting bool -} - -// NewImage creates a new image from the given system context -func (c *CopyRef) NewImage(sc *types.SystemContext) (types.Image, error) { - src, err := c.NewImageSource(sc) - if err != nil { - return nil, err - } - return image.FromSource(src) -} - -// NewImageSource creates a new image source from the given system context and manifest -func (c *CopyRef) NewImageSource(sc *types.SystemContext) (src types.ImageSource, err error) { - // Decide which type of manifest and configuration output we're going to provide. - manifestType := c.preferredManifestType - // If it's not a format we support, return an error. - // Try to provide a manifest and configuration in the same format the current ones are in. - if manifestType != v1.MediaTypeImageManifest && manifestType != docker.V2S2MediaTypeManifest { - return nil, errors.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)", - manifestType, v1.MediaTypeImageManifest, docker.V2S2MediaTypeManifest) - } - // Start building the list of layers using the read-write layer. - layers := []string{} - layerID := c.layerID - layer, err := c.store.Layer(layerID) - if err != nil { - return nil, errors.Wrapf(err, "unable to read layer %q", layerID) - } - // Walk the list of parent layers, prepending each as we go. - for layer != nil { - layers = append(append([]string{}, layerID), layers...) - layerID = layer.Parent - if layerID == "" { - err = nil - break - } - layer, err = c.store.Layer(layerID) - if err != nil { - return nil, errors.Wrapf(err, "unable to read layer %q", layerID) - } - } - logrus.Debugf("layer list: %q", layers) - - // Make a temporary directory to hold blobs. - path, err := ioutil.TempDir(os.TempDir(), "kpod") - if err != nil { - return nil, err - } - logrus.Debugf("using %q to hold temporary data", path) - defer func() { - if src == nil { - err2 := os.RemoveAll(path) - if err2 != nil { - logrus.Errorf("error removing %q: %v", path, err) - } - } - }() - - // Build fresh copies of the configurations so that we don't mess with the values in the Builder - // object itself. - oimage := v1.Image{} - err = json.Unmarshal(c.oconfig, &oimage) - if err != nil { - return nil, err - } - dimage := docker.V2Image{} - err = json.Unmarshal(c.dconfig, &dimage) - if err != nil { - return nil, err - } - - // Start building manifests. - omanifest := v1.Manifest{ - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - Config: v1.Descriptor{ - MediaType: v1.MediaTypeImageConfig, - }, - Layers: []v1.Descriptor{}, - Annotations: c.annotations, - } - dmanifest := docker.V2S2Manifest{ - V2Versioned: docker.V2Versioned{ - SchemaVersion: 2, - MediaType: docker.V2S2MediaTypeManifest, - }, - Config: docker.V2S2Descriptor{ - MediaType: docker.V2S2MediaTypeImageConfig, - }, - Layers: []docker.V2S2Descriptor{}, - } - - oimage.RootFS.Type = docker.TypeLayers - oimage.RootFS.DiffIDs = []digest.Digest{} - dimage.RootFS = &docker.V2S2RootFS{} - dimage.RootFS.Type = docker.TypeLayers - dimage.RootFS.DiffIDs = []digest.Digest{} - - // Extract each layer and compute its digests, both compressed (if requested) and uncompressed. - for _, layerID := range layers { - omediaType := v1.MediaTypeImageLayer - dmediaType := docker.V2S2MediaTypeUncompressedLayer - // Figure out which media type we want to call this. Assume no compression. - if c.compression != archive.Uncompressed { - switch c.compression { - case archive.Gzip: - omediaType = v1.MediaTypeImageLayerGzip - dmediaType = docker.V2S2MediaTypeLayer - logrus.Debugf("compressing layer %q with gzip", layerID) - case archive.Bzip2: - // Until the image specs define a media type for bzip2-compressed layers, even if we know - // how to decompress them, we can't try to compress layers with bzip2. - return nil, errors.New("media type for bzip2-compressed layers is not defined") - default: - logrus.Debugf("compressing layer %q with unknown compressor(?)", layerID) - } - } - // If we're not re-exporting the data, just fake up layer and diff IDs for the manifest. - if !c.exporting { - fakeLayerDigest := digest.NewDigestFromHex(digest.Canonical.String(), layerID) - // Add a note in the manifest about the layer. The blobs should be identified by their - // possibly-compressed blob digests, but just use the layer IDs here. - olayerDescriptor := v1.Descriptor{ - MediaType: omediaType, - Digest: fakeLayerDigest, - Size: -1, - } - omanifest.Layers = append(omanifest.Layers, olayerDescriptor) - dlayerDescriptor := docker.V2S2Descriptor{ - MediaType: dmediaType, - Digest: fakeLayerDigest, - Size: -1, - } - dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor) - // Add a note about the diffID, which should be uncompressed digest of the blob, but - // just use the layer ID here. - oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, fakeLayerDigest) - dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, fakeLayerDigest) - continue - } - // Start reading the layer. - rc, err := c.store.Diff("", layerID, nil) - if err != nil { - return nil, errors.Wrapf(err, "error extracting layer %q", layerID) - } - defer rc.Close() - // Set up to decompress the layer, in case it's coming out compressed. Due to implementation - // differences, the result may not match the digest the blob had when it was originally imported, - // so we have to recompute all of this anyway if we want to be sure the digests we use will be - // correct. - uncompressed, err := archive.DecompressStream(rc) - if err != nil { - return nil, errors.Wrapf(err, "error decompressing layer %q", layerID) - } - defer uncompressed.Close() - srcHasher := digest.Canonical.Digester() - reader := io.TeeReader(uncompressed, srcHasher.Hash()) - // Set up to write the possibly-recompressed blob. - layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600) - if err != nil { - return nil, errors.Wrapf(err, "error opening file for layer %q", layerID) - } - destHasher := digest.Canonical.Digester() - counter := ioutils.NewWriteCounter(layerFile) - multiWriter := io.MultiWriter(counter, destHasher.Hash()) - // Compress the layer, if we're compressing it. - writer, err := archive.CompressStream(multiWriter, c.compression) - if err != nil { - return nil, errors.Wrapf(err, "error compressing layer %q", layerID) - } - size, err := io.Copy(writer, reader) - if err != nil { - return nil, errors.Wrapf(err, "error storing layer %q to file", layerID) - } - writer.Close() - layerFile.Close() - if c.compression == archive.Uncompressed { - if size != counter.Count { - return nil, errors.Errorf("error storing layer %q to file: inconsistent layer size (copied %d, wrote %d)", layerID, size, counter.Count) - } - } else { - size = counter.Count - } - logrus.Debugf("layer %q size is %d bytes", layerID, size) - // Rename the layer so that we can more easily find it by digest later. - err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String())) - if err != nil { - return nil, errors.Wrapf(err, "error storing layer %q to file", layerID) - } - // Add a note in the manifest about the layer. The blobs are identified by their possibly- - // compressed blob digests. - olayerDescriptor := v1.Descriptor{ - MediaType: omediaType, - Digest: destHasher.Digest(), - Size: size, - } - omanifest.Layers = append(omanifest.Layers, olayerDescriptor) - dlayerDescriptor := docker.V2S2Descriptor{ - MediaType: dmediaType, - Digest: destHasher.Digest(), - Size: size, - } - dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor) - // Add a note about the diffID, which is always an uncompressed value. - oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest()) - dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest()) - } - - if c.addHistory { - // Build history notes in the image configurations. - onews := v1.History{ - Created: &c.created, - CreatedBy: c.createdBy, - Author: oimage.Author, - EmptyLayer: false, - } - oimage.History = append(oimage.History, onews) - dnews := docker.V2S2History{ - Created: c.created, - CreatedBy: c.createdBy, - Author: dimage.Author, - EmptyLayer: false, - } - dimage.History = append(dimage.History, dnews) - } - - // Encode the image configuration blob. - oconfig, err := json.Marshal(&oimage) - if err != nil { - return nil, err - } - logrus.Debugf("OCIv1 config = %s", oconfig) - - // Add the configuration blob to the manifest. - omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig) - omanifest.Config.Size = int64(len(oconfig)) - omanifest.Config.MediaType = v1.MediaTypeImageConfig - - // Encode the manifest. - omanifestbytes, err := json.Marshal(&omanifest) - if err != nil { - return nil, err - } - logrus.Debugf("OCIv1 manifest = %s", omanifestbytes) - - // Encode the image configuration blob. - dconfig, err := json.Marshal(&dimage) - if err != nil { - return nil, err - } - logrus.Debugf("Docker v2s2 config = %s", dconfig) - - // Add the configuration blob to the manifest. - dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig) - dmanifest.Config.Size = int64(len(dconfig)) - dmanifest.Config.MediaType = docker.V2S2MediaTypeImageConfig - - // Encode the manifest. - dmanifestbytes, err := json.Marshal(&dmanifest) - if err != nil { - return nil, err - } - logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes) - - // Decide which manifest and configuration blobs we'll actually output. - var config []byte - var manifest []byte - switch manifestType { - case v1.MediaTypeImageManifest: - manifest = omanifestbytes - config = oconfig - case docker.V2S2MediaTypeManifest: - manifest = dmanifestbytes - config = dconfig - default: - panic("unreachable code: unsupported manifest type") - } - src = ©Source{ - path: path, - ref: c, - store: c.store, - layerID: c.layerID, - names: c.names, - addHistory: c.addHistory, - compression: c.compression, - config: config, - configDigest: digest.Canonical.FromBytes(config), - manifest: manifest, - manifestType: manifestType, - exporting: c.exporting, - } - return src, nil -} - -// NewImageDestination creates a new image destination from the given system context -func (c *CopyRef) NewImageDestination(sc *types.SystemContext) (types.ImageDestination, error) { - return nil, errors.Errorf("can't write to a container") -} - -// DockerReference gets the docker reference for the given CopyRef -func (c *CopyRef) DockerReference() reference.Named { - return c.name -} - -// StringWithinTransport returns the first name of the copyRef -func (c *CopyRef) StringWithinTransport() string { - if len(c.names) > 0 { - return c.names[0] - } - return "" -} - -// DeleteImage deletes an image in the CopyRef -func (c *CopyRef) DeleteImage(*types.SystemContext) error { - // we were never here - return nil -} - -// PolicyConfigurationIdentity returns the policy configuration for the CopyRef -func (c *CopyRef) PolicyConfigurationIdentity() string { - return "" -} - -// PolicyConfigurationNamespaces returns the policy configuration namespace for the CopyRef -func (c *CopyRef) PolicyConfigurationNamespaces() []string { - return nil -} - -// Transport returns an ImageTransport for the given CopyRef -func (c *CopyRef) Transport() types.ImageTransport { - return is.Transport -} - -func (cs *copySource) Close() error { - err := os.RemoveAll(cs.path) - if err != nil { - logrus.Errorf("error removing %q: %v", cs.path, err) - } - return err -} - -func (cs *copySource) Reference() types.ImageReference { - return cs.ref -} - -func (cs *copySource) GetSignatures(context.Context) ([][]byte, error) { - return nil, nil -} - -func (cs *copySource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - return []byte{}, "", errors.Errorf("TODO") -} - -func (cs *copySource) GetManifest() ([]byte, string, error) { - return cs.manifest, cs.manifestType, nil -} - -func (cs *copySource) GetBlob(blob types.BlobInfo) (reader io.ReadCloser, size int64, err error) { - if blob.Digest == cs.configDigest { - logrus.Debugf("start reading config") - reader := bytes.NewReader(cs.config) - closer := func() error { - logrus.Debugf("finished reading config") - return nil - } - return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil - } - layerFile, err := os.OpenFile(filepath.Join(cs.path, blob.Digest.String()), os.O_RDONLY, 0600) - if err != nil { - logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err) - return nil, -1, err - } - size = -1 - st, err := layerFile.Stat() - if err != nil { - logrus.Warnf("error reading size of layer %q: %v", blob.Digest.String(), err) - } else { - size = st.Size() - } - logrus.Debugf("reading layer %q", blob.Digest.String()) - closer := func() error { - layerFile.Close() - logrus.Debugf("finished reading layer %q", blob.Digest.String()) - return nil - } - return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil -} diff --git a/libpod/diff.go b/libpod/diff.go deleted file mode 100644 index 055bb7fe..00000000 --- a/libpod/diff.go +++ /dev/null @@ -1,53 +0,0 @@ -package libpod - -import ( - "github.com/containers/storage/pkg/archive" - "github.com/kubernetes-incubator/cri-o/libpod/layers" - "github.com/pkg/errors" -) - -// GetDiff returns the differences between the two images, layers, or containers -func (r *Runtime) GetDiff(from, to string) ([]archive.Change, error) { - toLayer, err := r.getLayerID(to) - if err != nil { - return nil, err - } - fromLayer := "" - if from != "" { - fromLayer, err = r.getLayerID(from) - if err != nil { - return nil, err - } - } - return r.store.Changes(fromLayer, toLayer) -} - -// GetLayerID gets a full layer id given a full or partial id -// If the id matches a container or image, the id of the top layer is returned -// If the id matches a layer, the top layer id is returned -func (r *Runtime) getLayerID(id string) (string, error) { - var toLayer string - toImage, err := r.GetImage(id) - if err != nil { - toCtr, err := r.store.Container(id) - if err != nil { - toLayer, err = layers.FullID(r.store, id) - if err != nil { - return "", errors.Errorf("layer, image, or container %s does not exist", id) - } - } else { - toLayer = toCtr.LayerID - } - } else { - toLayer = toImage.TopLayer - } - return toLayer, nil -} - -func (r *Runtime) getLayerParent(layerID string) (string, error) { - layer, err := r.store.Layer(layerID) - if err != nil { - return "", err - } - return layer.Parent, nil -} diff --git a/libpod/driver/driver.go b/libpod/driver/driver.go deleted file mode 100644 index 4db55852..00000000 --- a/libpod/driver/driver.go +++ /dev/null @@ -1,27 +0,0 @@ -package driver - -import cstorage "github.com/containers/storage" - -// Data handles the data for a storage driver -type Data struct { - Name string - Data map[string]string -} - -// GetDriverName returns the name of the driver for the given store -func GetDriverName(store cstorage.Store) (string, error) { - driver, err := store.GraphDriver() - if err != nil { - return "", err - } - return driver.String(), nil -} - -// GetDriverMetadata returns the metadata regarding the driver for the layer in the given store -func GetDriverMetadata(store cstorage.Store, layerID string) (map[string]string, error) { - driver, err := store.GraphDriver() - if err != nil { - return nil, err - } - return driver.Metadata(layerID) -} diff --git a/libpod/errors.go b/libpod/errors.go deleted file mode 100644 index d50db574..00000000 --- a/libpod/errors.go +++ /dev/null @@ -1,53 +0,0 @@ -package libpod - -import ( - "errors" -) - -var ( - // ErrNoSuchCtr indicates the requested container does not exist - ErrNoSuchCtr = errors.New("no such container") - // ErrNoSuchPod indicates the requested pod does not exist - ErrNoSuchPod = errors.New("no such pod") - // ErrNoSuchImage indicates the requested image does not exist - ErrNoSuchImage = errors.New("no such image") - - // ErrCtrExists indicates a container with the same name or ID already - // exists - ErrCtrExists = errors.New("container already exists") - // ErrPodExists indicates a pod with the same name or ID already exists - ErrPodExists = errors.New("pod already exists") - // ErrImageExists indicated an image with the same ID already exists - ErrImageExists = errors.New("image already exists") - - // ErrRuntimeFinalized indicates that the runtime has already been - // created and cannot be modified - ErrRuntimeFinalized = errors.New("runtime has been finalized") - // ErrCtrFinalized indicates that the container has already been created - // and cannot be modified - ErrCtrFinalized = errors.New("container has been finalized") - // ErrPodFinalized indicates that the pod has already been created and - // cannot be modified - ErrPodFinalized = errors.New("pod has been finalized") - - // ErrInvalidArg indicates that an invalid argument was passed - ErrInvalidArg = errors.New("invalid argument") - - // ErrRuntimeStopped indicates that the runtime has already been shut - // down and no further operations can be performed on it - ErrRuntimeStopped = errors.New("runtime has already been stopped") - // ErrCtrStopped indicates that the requested container is not running - // and the requested operation cannot be performed until it is started - ErrCtrStopped = errors.New("container is stopped") - - // ErrCtrRemoved indicates that the container has already been removed - // and no further operations can be performed on it - ErrCtrRemoved = errors.New("container has already been removed") - // ErrPodRemoved indicates that the pod has already been removed and no - // further operations can be performed on it - ErrPodRemoved = errors.New("pod has already been removed") - - // ErrNotImplemented indicates that the requested functionality is not - // yet present - ErrNotImplemented = errors.New("not yet implemented") -) diff --git a/libpod/images/image_data.go b/libpod/images/image_data.go deleted file mode 100644 index 12fb1e77..00000000 --- a/libpod/images/image_data.go +++ /dev/null @@ -1,203 +0,0 @@ -package images - -import ( - "encoding/json" - "time" - - "github.com/containers/image/docker/reference" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/kubernetes-incubator/cri-o/libpod/driver" - digest "github.com/opencontainers/go-digest" - ociv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// Data handles the data used when inspecting a container -// nolint -type Data struct { - ID string - Tags []string - Digests []string - Digest digest.Digest - Comment string - Created *time.Time - Container string - Author string - Config ociv1.ImageConfig - Architecture string - OS string - Annotations map[string]string - CreatedBy string - Size uint - VirtualSize uint - GraphDriver driver.Data - RootFS ociv1.RootFS -} - -// ParseImageNames parses the names we've stored with an image into a list of -// tagged references and a list of references which contain digests. -func ParseImageNames(names []string) (tags, digests []string, err error) { - for _, name := range names { - if named, err := reference.ParseNamed(name); err == nil { - if digested, ok := named.(reference.Digested); ok { - canonical, err := reference.WithDigest(named, digested.Digest()) - if err == nil { - digests = append(digests, canonical.String()) - } - } else { - if reference.IsNameOnly(named) { - named = reference.TagNameOnly(named) - } - if tagged, ok := named.(reference.Tagged); ok { - namedTagged, err := reference.WithTag(named, tagged.Tag()) - if err == nil { - tags = append(tags, namedTagged.String()) - } - } - } - } - } - return tags, digests, nil -} - -func annotations(manifest []byte, manifestType string) map[string]string { - annotations := make(map[string]string) - switch manifestType { - case ociv1.MediaTypeImageManifest: - var m ociv1.Manifest - if err := json.Unmarshal(manifest, &m); err == nil { - for k, v := range m.Annotations { - annotations[k] = v - } - } - } - return annotations -} - -// GetData gets the Data for a container with the given name in the given store. -func GetData(store storage.Store, name string) (*Data, error) { - img, err := FindImage(store, name) - if err != nil { - return nil, errors.Wrapf(err, "error reading image %q", name) - } - - imgRef, err := FindImageRef(store, "@"+img.ID) - if err != nil { - return nil, errors.Wrapf(err, "error reading image %q", img.ID) - } - defer imgRef.Close() - - tags, digests, err := ParseImageNames(img.Names) - if err != nil { - return nil, errors.Wrapf(err, "error parsing image names for %q", name) - } - - driverName, err := driver.GetDriverName(store) - if err != nil { - return nil, errors.Wrapf(err, "error reading name of storage driver") - } - - topLayerID := img.TopLayer - - driverMetadata, err := driver.GetDriverMetadata(store, topLayerID) - if err != nil { - return nil, errors.Wrapf(err, "error asking storage driver %q for metadata", driverName) - } - - layer, err := store.Layer(topLayerID) - if err != nil { - return nil, errors.Wrapf(err, "error reading information about layer %q", topLayerID) - } - size, err := store.DiffSize(layer.Parent, layer.ID) - if err != nil { - return nil, errors.Wrapf(err, "error determining size of layer %q", layer.ID) - } - - imgSize, err := imgRef.Size() - if err != nil { - return nil, errors.Wrapf(err, "error determining size of image %q", transports.ImageName(imgRef.Reference())) - } - - manifest, manifestType, err := imgRef.Manifest() - if err != nil { - return nil, errors.Wrapf(err, "error reading manifest for image %q", img.ID) - } - manifestDigest := digest.Digest("") - if len(manifest) > 0 { - manifestDigest = digest.Canonical.FromBytes(manifest) - } - annotations := annotations(manifest, manifestType) - - config, err := imgRef.OCIConfig() - if err != nil { - return nil, errors.Wrapf(err, "error reading image configuration for %q", img.ID) - } - historyComment := "" - historyCreatedBy := "" - if len(config.History) > 0 { - historyComment = config.History[len(config.History)-1].Comment - historyCreatedBy = config.History[len(config.History)-1].CreatedBy - } - - return &Data{ - ID: img.ID, - Tags: tags, - Digests: digests, - Digest: manifestDigest, - Comment: historyComment, - Created: config.Created, - Author: config.Author, - Config: config.Config, - Architecture: config.Architecture, - OS: config.OS, - Annotations: annotations, - CreatedBy: historyCreatedBy, - Size: uint(size), - VirtualSize: uint(size + imgSize), - GraphDriver: driver.Data{ - Name: driverName, - Data: driverMetadata, - }, - RootFS: config.RootFS, - }, nil -} - -// FindImage searches for a *storage.Image with a matching the given name or ID in the given store. -func FindImage(store storage.Store, image string) (*storage.Image, error) { - var img *storage.Image - ref, err := is.Transport.ParseStoreReference(store, image) - if err == nil { - img, err = is.Transport.GetStoreImage(store, ref) - } - if err != nil { - img2, err2 := store.Image(image) - if err2 != nil { - if ref == nil { - return nil, errors.Wrapf(err, "error parsing reference to image %q", image) - } - return nil, errors.Wrapf(err, "unable to locate image %q", image) - } - img = img2 - } - return img, nil -} - -// FindImageRef searches for and returns a new types.Image matching the given name or ID in the given store. -func FindImageRef(store storage.Store, image string) (types.Image, error) { - img, err := FindImage(store, image) - if err != nil { - return nil, errors.Wrapf(err, "unable to locate image %q", image) - } - ref, err := is.Transport.ParseStoreReference(store, "@"+img.ID) - if err != nil { - return nil, errors.Wrapf(err, "error parsing reference to image %q", img.ID) - } - imgRef, err := ref.NewImage(nil) - if err != nil { - return nil, errors.Wrapf(err, "error reading image %q", img.ID) - } - return imgRef, nil -} diff --git a/libpod/layers/layer.go b/libpod/layers/layer.go deleted file mode 100644 index 865cbe70..00000000 --- a/libpod/layers/layer.go +++ /dev/null @@ -1,12 +0,0 @@ -package layers - -import cstorage "github.com/containers/storage" - -// FullID gets the full id of a layer given a partial id or name -func FullID(store cstorage.Store, id string) (string, error) { - layer, err := store.Layer(id) - if err != nil { - return "", err - } - return layer.ID, nil -} diff --git a/libpod/options.go b/libpod/options.go deleted file mode 100644 index 03248274..00000000 --- a/libpod/options.go +++ /dev/null @@ -1,272 +0,0 @@ -package libpod - -import ( - "fmt" - - "github.com/containers/storage" - "github.com/containers/storage/pkg/idtools" - "github.com/pkg/errors" -) - -var ( - ctrNotImplemented = func(c *Container) error { - return fmt.Errorf("NOT IMPLEMENTED") - } -) - -const ( - // IPCNamespace represents the IPC namespace - IPCNamespace = "ipc" - // MountNamespace represents the mount namespace - MountNamespace = "mount" - // NetNamespace represents the network namespace - NetNamespace = "net" - // PIDNamespace represents the PID namespace - PIDNamespace = "pid" - // UserNamespace represents the user namespace - UserNamespace = "user" - // UTSNamespace represents the UTS namespace - UTSNamespace = "uts" -) - -// Runtime Creation Options - -// WithStorageConfig uses the given configuration to set up container storage -// If this is not specified, the system default configuration will be used -// instead -func WithStorageConfig(config storage.StoreOptions) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return ErrRuntimeFinalized - } - - rt.config.StorageConfig.RunRoot = config.RunRoot - rt.config.StorageConfig.GraphRoot = config.GraphRoot - rt.config.StorageConfig.GraphDriverName = config.GraphDriverName - - rt.config.StorageConfig.GraphDriverOptions = make([]string, len(config.GraphDriverOptions)) - copy(rt.config.StorageConfig.GraphDriverOptions, config.GraphDriverOptions) - - rt.config.StorageConfig.UIDMap = make([]idtools.IDMap, len(config.UIDMap)) - copy(rt.config.StorageConfig.UIDMap, config.UIDMap) - - rt.config.StorageConfig.GIDMap = make([]idtools.IDMap, len(config.UIDMap)) - copy(rt.config.StorageConfig.GIDMap, config.GIDMap) - - return nil - } -} - -// WithImageConfig uses the given configuration to set up image handling -// If this is not specified, the system default configuration will be used -// instead -func WithImageConfig(defaultTransport string, insecureRegistries, registries []string) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return ErrRuntimeFinalized - } - - rt.config.ImageDefaultTransport = defaultTransport - - rt.config.InsecureRegistries = make([]string, len(insecureRegistries)) - copy(rt.config.InsecureRegistries, insecureRegistries) - - rt.config.Registries = make([]string, len(registries)) - copy(rt.config.Registries, registries) - - return nil - } -} - -// WithSignaturePolicy specifies the path of a file which decides how trust is -// managed for images we've pulled. -// If this is not specified, the system default configuration will be used -// instead -func WithSignaturePolicy(path string) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return ErrRuntimeFinalized - } - - rt.config.SignaturePolicyPath = path - - return nil - } -} - -// WithOCIRuntime specifies an OCI runtime to use for running containers -func WithOCIRuntime(runtimePath string) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return ErrRuntimeFinalized - } - - rt.config.RuntimePath = runtimePath - - return nil - } -} - -// WithConmonPath specifies the path to the conmon binary which manages the -// runtime -func WithConmonPath(path string) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return ErrRuntimeFinalized - } - - rt.config.ConmonPath = path - - return nil - } -} - -// WithConmonEnv specifies the environment variable list for the conmon process -func WithConmonEnv(environment []string) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return ErrRuntimeFinalized - } - - rt.config.ConmonEnvVars = make([]string, len(environment)) - copy(rt.config.ConmonEnvVars, environment) - - return nil - } -} - -// WithCgroupManager specifies the manager implementation name which is used to -// handle cgroups for containers -func WithCgroupManager(manager string) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return ErrRuntimeFinalized - } - - rt.config.CgroupManager = manager - - return nil - } -} - -// WithSELinux enables SELinux on the container server -func WithSELinux() RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return ErrRuntimeFinalized - } - - rt.config.SelinuxEnabled = true - - return nil - } -} - -// WithPidsLimit specifies the maximum number of processes each container is -// restricted to -func WithPidsLimit(limit int64) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return ErrRuntimeFinalized - } - - rt.config.PidsLimit = limit - - return nil - } -} - -// Container Creation Options - -// WithRootFSFromPath uses the given path as a container's root filesystem -// No further setup is performed on this path -func WithRootFSFromPath(path string) CtrCreateOption { - return ctrNotImplemented -} - -// WithRootFSFromImage sets up a fresh root filesystem using the given image -// If useImageConfig is specified, image volumes, environment variables, and -// other configuration from the image will be added to the config -func WithRootFSFromImage(image string, useImageConfig bool) CtrCreateOption { - return ctrNotImplemented -} - -// WithSharedNamespaces sets a container to share namespaces with another -// container. If the from container belongs to a pod, the new container will -// be added to the pod. -// By default no namespaces are shared. To share a namespace, add the Namespace -// string constant to the map as a key -func WithSharedNamespaces(from *Container, namespaces map[string]string) CtrCreateOption { - return ctrNotImplemented -} - -// WithPod adds the container to a pod -func (r *Runtime) WithPod(pod *Pod) CtrCreateOption { - return func(ctr *Container) error { - if !ctr.valid { - return ErrCtrFinalized - } - - if ctr.pod != nil { - return fmt.Errorf("container has already been added to a pod") - } - - exists, err := r.state.HasPod(pod.ID()) - if err != nil { - return errors.Wrapf(err, "error searching state for pod %s", pod.ID()) - } else if !exists { - return errors.Wrapf(ErrNoSuchPod, "pod %s cannot be found in state", pod.ID()) - } - - if err := pod.addContainer(ctr); err != nil { - return errors.Wrapf(err, "error adding container to pod") - } - - ctr.pod = pod - - return nil - } -} - -// WithLabels adds labels to the pod -func WithLabels(labels map[string]string) CtrCreateOption { - return ctrNotImplemented -} - -// WithAnnotations adds annotations to the pod -func WithAnnotations(annotations map[string]string) CtrCreateOption { - return ctrNotImplemented -} - -// WithName sets the container's name -func WithName(name string) CtrCreateOption { - return func(ctr *Container) error { - if !ctr.valid { - return ErrCtrFinalized - } - - ctr.name = name - - return nil - } -} - -// WithStopSignal sets the signal that will be sent to stop the container -func WithStopSignal(signal uint) CtrCreateOption { - return ctrNotImplemented -} - -// Pod Creation Options - -// WithPodName sets the name of the pod -func WithPodName(name string) PodCreateOption { - return func(pod *Pod) error { - if pod.valid { - return ErrPodFinalized - } - - pod.name = name - - return nil - } -} diff --git a/libpod/pod.go b/libpod/pod.go deleted file mode 100644 index 451747aa..00000000 --- a/libpod/pod.go +++ /dev/null @@ -1,117 +0,0 @@ -package libpod - -import ( - "sync" - - "github.com/docker/docker/pkg/stringid" - "github.com/pkg/errors" -) - -// Pod represents a group of containers that may share namespaces -type Pod struct { - id string - name string - - containers map[string]*Container - - valid bool - lock sync.RWMutex -} - -// ID retrieves the pod's ID -func (p *Pod) ID() string { - return p.id -} - -// Name retrieves the pod's name -func (p *Pod) Name() string { - return p.name -} - -// Creates a new pod -func newPod() (*Pod, error) { - pod := new(Pod) - pod.id = stringid.GenerateNonCryptoID() - pod.name = pod.id // TODO generate human-readable name here - - pod.containers = make(map[string]*Container) - - return pod, nil -} - -// Adds a container to the pod -// Does not check that container's pod ID is set correctly, or attempt to set -// pod ID after adding -func (p *Pod) addContainer(ctr *Container) error { - p.lock.Lock() - defer p.lock.Unlock() - - if !p.valid { - return ErrPodRemoved - } - - if _, ok := p.containers[ctr.id]; ok { - return errors.Wrapf(ErrCtrExists, "container with ID %s already exists in pod %s", ctr.id, p.id) - } - - p.containers[ctr.id] = ctr - - return nil -} - -// Removes a container from the pod -// Does not perform any checks on the container -func (p *Pod) removeContainer(ctr *Container) error { - p.lock.Lock() - defer p.lock.Unlock() - - if !p.valid { - return ErrPodRemoved - } - - if _, ok := p.containers[ctr.id]; !ok { - return errors.Wrapf(ErrNoSuchCtr, "no container with id %s in pod %s", ctr.id, p.id) - } - - delete(p.containers, ctr.id) - - return nil -} - -// Start starts all containers within a pod that are not already running -func (p *Pod) Start() error { - return ErrNotImplemented -} - -// Stop stops all containers within a pod that are not already stopped -func (p *Pod) Stop() error { - return ErrNotImplemented -} - -// Kill sends a signal to all running containers within a pod -func (p *Pod) Kill(signal uint) error { - return ErrNotImplemented -} - -// GetContainers retrieves the containers in the pod -func (p *Pod) GetContainers() ([]*Container, error) { - p.lock.RLock() - defer p.lock.RUnlock() - - if !p.valid { - return nil, ErrPodRemoved - } - - ctrs := make([]*Container, 0, len(p.containers)) - for _, ctr := range p.containers { - ctrs = append(ctrs, ctr) - } - - return ctrs, nil -} - -// Status gets the status of all containers in the pod -// TODO This should return a summary of the states of all containers in the pod -func (p *Pod) Status() error { - return ErrNotImplemented -} diff --git a/libpod/runtime.go b/libpod/runtime.go deleted file mode 100644 index cfa96527..00000000 --- a/libpod/runtime.go +++ /dev/null @@ -1,132 +0,0 @@ -package libpod - -import ( - "sync" - - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/kubernetes-incubator/cri-o/server/apparmor" - "github.com/kubernetes-incubator/cri-o/server/seccomp" - "github.com/pkg/errors" - "github.com/ulule/deepcopier" -) - -// A RuntimeOption is a functional option which alters the Runtime created by -// NewRuntime -type RuntimeOption func(*Runtime) error - -// Runtime is the core libpod runtime -type Runtime struct { - config *RuntimeConfig - state State - store storage.Store - imageContext *types.SystemContext - apparmorEnabled bool - seccompEnabled bool - valid bool - lock sync.RWMutex -} - -// RuntimeConfig contains configuration options used to set up the runtime -type RuntimeConfig struct { - StorageConfig storage.StoreOptions - ImageDefaultTransport string - InsecureRegistries []string - Registries []string - SignaturePolicyPath string - RuntimePath string - ConmonPath string - ConmonEnvVars []string - CgroupManager string - SelinuxEnabled bool - PidsLimit int64 -} - -var ( - defaultRuntimeConfig = RuntimeConfig{ - // Leave this empty so containers/storage will use its defaults - StorageConfig: storage.StoreOptions{}, - ImageDefaultTransport: "docker://", - RuntimePath: "/usr/bin/runc", - ConmonPath: "/usr/local/libexec/crio/conmon", - ConmonEnvVars: []string{ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - }, - CgroupManager: "cgroupfs", - SelinuxEnabled: false, - PidsLimit: 1024, - } -) - -// NewRuntime creates a new container runtime -// Options can be passed to override the default configuration for the runtime -func NewRuntime(options ...RuntimeOption) (*Runtime, error) { - runtime := new(Runtime) - runtime.config = new(RuntimeConfig) - - // Copy the default configuration - deepcopier.Copy(defaultRuntimeConfig).To(runtime.config) - - // Overwrite it with user-given configuration options - for _, opt := range options { - if err := opt(runtime); err != nil { - return nil, errors.Wrapf(err, "error configuring runtime") - } - } - - // Set up containers/storage - store, err := storage.GetStore(runtime.config.StorageConfig) - if err != nil { - return nil, err - } - runtime.store = store - - // Set up containers/image - runtime.imageContext = &types.SystemContext{ - SignaturePolicyPath: runtime.config.SignaturePolicyPath, - } - - runtime.seccompEnabled = seccomp.IsEnabled() - runtime.apparmorEnabled = apparmor.IsEnabled() - - // Mark the runtime as valid - ready to be used, cannot be modified - // further - runtime.valid = true - - return runtime, nil -} - -// GetConfig returns a copy of the configuration used by the runtime -func (r *Runtime) GetConfig() *RuntimeConfig { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.valid { - return nil - } - - config := new(RuntimeConfig) - - // Copy so the caller won't be able to modify the actual config - deepcopier.Copy(r.config).To(config) - - return config -} - -// Shutdown shuts down the runtime and associated containers and storage -// If force is true, containers and mounted storage will be shut down before -// cleaning up; if force is false, an error will be returned if there are -// still containers running or mounted -func (r *Runtime) Shutdown(force bool) error { - r.lock.Lock() - defer r.lock.Unlock() - - if !r.valid { - return ErrRuntimeStopped - } - - r.valid = false - - _, err := r.store.Shutdown(force) - return err -} diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go deleted file mode 100644 index 4b506233..00000000 --- a/libpod/runtime_ctr.go +++ /dev/null @@ -1,160 +0,0 @@ -package libpod - -import ( - "github.com/containers/storage" - spec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" -) - -// Contains the public Runtime API for containers - -// A CtrCreateOption is a functional option which alters the Container created -// by NewContainer -type CtrCreateOption func(*Container) error - -// ContainerFilter is a function to determine whether a container is included -// in command output. Containers to be outputted are tested using the function. -// A true return will include the container, a false return will exclude it. -type ContainerFilter func(*Container) bool - -// NewContainer creates a new container from a given OCI config -func (r *Runtime) NewContainer(spec *spec.Spec, options ...CtrCreateOption) (*Container, error) { - r.lock.Lock() - defer r.lock.Unlock() - - if !r.valid { - return nil, ErrRuntimeStopped - } - - ctr, err := newContainer(spec) - if err != nil { - return nil, err - } - - for _, option := range options { - if err := option(ctr); err != nil { - return nil, errors.Wrapf(err, "error running container create option") - } - } - - ctr.valid = true - - if err := r.state.AddContainer(ctr); err != nil { - // If we joined a pod, remove ourself from it - if ctr.pod != nil { - if err2 := ctr.pod.removeContainer(ctr); err2 != nil { - return nil, errors.Wrapf(err, "error adding new container to state, container could not be removed from pod %s", ctr.pod.ID()) - } - } - - // TODO: Might be worth making an effort to detect duplicate IDs - // We can recover from that by generating a new ID for the - // container - return nil, errors.Wrapf(err, "error adding new container to state") - } - - return ctr, nil -} - -// RemoveContainer removes the given container -// If force is specified, the container will be stopped first -// Otherwise, RemoveContainer will return an error if the container is running -func (r *Runtime) RemoveContainer(c *Container, force bool) error { - return ErrNotImplemented -} - -// GetContainer retrieves a container by its ID -func (r *Runtime) GetContainer(id string) (*Container, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.valid { - return nil, ErrRuntimeStopped - } - - return r.state.GetContainer(id) -} - -// HasContainer checks if a container with the given ID is present -func (r *Runtime) HasContainer(id string) (bool, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.valid { - return false, ErrRuntimeStopped - } - - return r.state.HasContainer(id) -} - -// LookupContainer looks up a container by its name or a partial ID -// If a partial ID is not unique, an error will be returned -func (r *Runtime) LookupContainer(idOrName string) (*Container, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.valid { - return nil, ErrRuntimeStopped - } - - return r.state.LookupContainer(idOrName) -} - -// GetContainers retrieves all containers from the state -// Filters can be provided which will determine what containers are included in -// the output. Multiple filters are handled by ANDing their output, so only -// containers matching all filters are returned -func (r *Runtime) GetContainers(filters ...ContainerFilter) ([]*Container, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.valid { - return nil, ErrRuntimeStopped - } - - ctrs, err := r.state.GetAllContainers() - if err != nil { - return nil, err - } - - ctrsFiltered := make([]*Container, 0, len(ctrs)) - - for _, ctr := range ctrs { - include := true - for _, filter := range filters { - include = include && filter(ctr) - } - - if include { - ctrsFiltered = append(ctrsFiltered, ctr) - } - } - - return ctrsFiltered, nil -} - -// getContainersWithImage returns a list of containers referencing imageID -func (r *Runtime) getContainersWithImage(imageID string) ([]storage.Container, error) { - var matchingContainers []storage.Container - containers, err := r.store.Containers() - if err != nil { - return nil, err - } - - for _, ctr := range containers { - if ctr.ImageID == imageID { - matchingContainers = append(matchingContainers, ctr) - } - } - return matchingContainers, nil -} - -// removeMultipleContainers deletes a list of containers from the store -func (r *Runtime) removeMultipleContainers(containers []storage.Container) error { - for _, ctr := range containers { - if err := r.store.DeleteContainer(ctr.ID); err != nil { - return errors.Wrapf(err, "could not remove container %q", ctr) - } - } - return nil -} diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go deleted file mode 100644 index 69fa5606..00000000 --- a/libpod/runtime_img.go +++ /dev/null @@ -1,679 +0,0 @@ -package libpod - -import ( - "encoding/json" - "fmt" - "io" - "strings" - "syscall" - "time" - - cp "github.com/containers/image/copy" - dockerarchive "github.com/containers/image/docker/archive" - "github.com/containers/image/docker/reference" - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/manifest" - ociarchive "github.com/containers/image/oci/archive" - "github.com/containers/image/signature" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/transports/alltransports" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/archive" - "github.com/kubernetes-incubator/cri-o/libpod/common" - digest "github.com/opencontainers/go-digest" - ociv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// Runtime API - -const ( - // DefaultRegistry is a prefix that we apply to an image name - // to check docker hub first for the image - DefaultRegistry = "docker://" -) - -var ( - // DockerArchive is the transport we prepend to an image name - // when saving to docker-archive - DockerArchive = dockerarchive.Transport.Name() - // OCIArchive is the transport we prepend to an image name - // when saving to oci-archive - OCIArchive = ociarchive.Transport.Name() -) - -// CopyOptions contains the options given when pushing or pulling images -type CopyOptions struct { - // Compression specifies the type of compression which is applied to - // layer blobs. The default is to not use compression, but - // archive.Gzip is recommended. - Compression archive.Compression - // DockerRegistryOptions encapsulates settings that affect how we - // connect or authenticate to a remote registry to which we want to - // push the image. - common.DockerRegistryOptions - // SigningOptions encapsulates settings that control whether or not we - // strip or add signatures to the image when pushing (uploading) the - // image to a registry. - common.SigningOptions - - // SigningPolicyPath this points to a alternative signature policy file, used mainly for testing - SignaturePolicyPath string -} - -// Image API - -// ImageFilterParams contains the filter options that may be given when outputting images -type ImageFilterParams struct { - Dangling string - Label string - BeforeImage time.Time - SinceImage time.Time - ReferencePattern string - ImageName string - ImageInput string -} - -// ImageFilter is a function to determine whether an image is included in -// command output. Images to be outputted are tested using the function. A true -// return will include the image, a false return will exclude it. -type ImageFilter func(*storage.Image, *types.ImageInspectInfo) bool - -// PullImage pulls an image from configured registries -// By default, only the latest tag (or a specific tag if requested) will be -// pulled. If allTags is true, all tags for the requested image will be pulled. -// Signature validation will be performed if the Runtime has been appropriately -// configured -func (r *Runtime) PullImage(imgName string, allTags bool, signaturePolicyPath string, reportWriter io.Writer) error { - r.lock.Lock() - defer r.lock.Unlock() - - if !r.valid { - return ErrRuntimeStopped - } - - // PullImage copies the image from the source to the destination - var ( - images []string - ) - - if signaturePolicyPath == "" { - signaturePolicyPath = r.config.SignaturePolicyPath - } - - sc := common.GetSystemContext(signaturePolicyPath, "") - - srcRef, err := alltransports.ParseImageName(imgName) - if err != nil { - defaultName := DefaultRegistry + imgName - srcRef2, err2 := alltransports.ParseImageName(defaultName) - if err2 != nil { - return errors.Errorf("error parsing image name %q: %v", defaultName, err2) - } - srcRef = srcRef2 - } - - splitArr := strings.Split(imgName, ":") - archFile := splitArr[len(splitArr)-1] - - // supports pulling from docker-archive, oci, and registries - if srcRef.Transport().Name() == DockerArchive { - tarSource := tarfile.NewSource(archFile) - manifest, err := tarSource.LoadTarManifest() - if err != nil { - return errors.Errorf("error retrieving manifest.json: %v", err) - } - // to pull all the images stored in one tar file - for i := range manifest { - if manifest[i].RepoTags != nil { - images = append(images, manifest[i].RepoTags[0]) - } else { - // create an image object and use the hex value of the digest as the image ID - // for parsing the store reference - newImg, err := srcRef.NewImage(sc) - if err != nil { - return err - } - defer newImg.Close() - digest := newImg.ConfigInfo().Digest - if err := digest.Validate(); err == nil { - images = append(images, "@"+digest.Hex()) - } else { - return errors.Wrapf(err, "error getting config info") - } - } - } - } else if srcRef.Transport().Name() == OCIArchive { - // retrieve the manifest from index.json to access the image name - manifest, err := ociarchive.LoadManifestDescriptor(srcRef) - if err != nil { - return errors.Wrapf(err, "error loading manifest for %q", srcRef) - } - - if manifest.Annotations == nil || manifest.Annotations["org.opencontainers.image.ref.name"] == "" { - return errors.Errorf("error, archive doesn't have a name annotation. Cannot store image with no name") - } - images = append(images, manifest.Annotations["org.opencontainers.image.ref.name"]) - } else { - images = append(images, imgName) - } - - policy, err := signature.DefaultPolicy(r.imageContext) - if err != nil { - return err - } - - policyContext, err := signature.NewPolicyContext(policy) - if err != nil { - return err - } - defer policyContext.Destroy() - - copyOptions := common.GetCopyOptions(reportWriter, signaturePolicyPath, nil, nil, common.SigningOptions{}) - for _, image := range images { - reference := image - if srcRef.DockerReference() != nil { - reference = srcRef.DockerReference().String() - } - destRef, err := is.Transport.ParseStoreReference(r.store, reference) - if err != nil { - return errors.Errorf("error parsing dest reference name: %v", err) - } - if err = cp.Image(policyContext, destRef, srcRef, copyOptions); err != nil { - return errors.Errorf("error loading image %q: %v", image, err) - } - } - return nil -} - -// PushImage pushes the given image to a location described by the given path -func (r *Runtime) PushImage(source string, destination string, options CopyOptions, reportWriter io.Writer) error { - r.lock.Lock() - defer r.lock.Unlock() - - if !r.valid { - return ErrRuntimeStopped - } - - // PushImage pushes the src image to the destination - //func PushImage(source, destination string, options CopyOptions) error { - if source == "" || destination == "" { - return errors.Wrapf(syscall.EINVAL, "source and destination image names must be specified") - } - - // Get the destination Image Reference - dest, err := alltransports.ParseImageName(destination) - if err != nil { - return errors.Wrapf(err, "error getting destination imageReference for %q", destination) - } - - signaturePolicyPath := r.config.SignaturePolicyPath - if options.SignaturePolicyPath != "" { - signaturePolicyPath = options.SignaturePolicyPath - } - - policyContext, err := common.GetPolicyContext(signaturePolicyPath) - if err != nil { - return errors.Wrapf(err, "Could not get default policy context for signature policy path %q", signaturePolicyPath) - } - defer policyContext.Destroy() - // Look up the image name and its layer, then build the imagePushData from - // the image - img, err := r.getImage(source) - if err != nil { - return errors.Wrapf(err, "error locating image %q for importing settings", source) - } - cd, err := r.ImportCopyDataFromImage(r.imageContext, img.ID, "", "") - if err != nil { - return err - } - // Give the image we're producing the same ancestors as its source image - cd.FromImage = cd.Docker.ContainerConfig.Image - cd.FromImageID = string(cd.Docker.Parent) - - // Prep the layers and manifest for export - src, err := cd.MakeImageRef(manifest.GuessMIMEType(cd.Manifest), options.Compression, img.Names, img.TopLayer, nil) - if err != nil { - return errors.Wrapf(err, "error copying layers and metadata") - } - - copyOptions := common.GetCopyOptions(reportWriter, signaturePolicyPath, nil, &options.DockerRegistryOptions, options.SigningOptions) - - // Copy the image to the remote destination - err = cp.Image(policyContext, dest, src, copyOptions) - if err != nil { - return errors.Wrapf(err, "Error copying image to the remote destination") - } - return nil -} - -// TagImage adds a tag to the given image -func (r *Runtime) TagImage(image *storage.Image, tag string) error { - r.lock.Lock() - defer r.lock.Unlock() - - if !r.valid { - return ErrRuntimeStopped - } - - tags, err := r.store.Names(image.ID) - if err != nil { - return err - } - for _, key := range tags { - if key == tag { - return nil - } - } - tags = append(tags, tag) - return r.store.SetNames(image.ID, tags) -} - -// UntagImage removes a tag from the given image -func (r *Runtime) UntagImage(image *storage.Image, tag string) (string, error) { - r.lock.Lock() - defer r.lock.Unlock() - - if !r.valid { - return "", ErrRuntimeStopped - } - - tags, err := r.store.Names(image.ID) - if err != nil { - return "", err - } - for i, key := range tags { - if key == tag { - tags[i] = tags[len(tags)-1] - tags = tags[:len(tags)-1] - break - } - } - if err = r.store.SetNames(image.ID, tags); err != nil { - return "", err - } - return tag, nil -} - -// RemoveImage deletes an image from local storage -// Images being used by running containers can only be removed if force=true -func (r *Runtime) RemoveImage(image *storage.Image, force bool) (string, error) { - r.lock.Lock() - defer r.lock.Unlock() - - if !r.valid { - return "", ErrRuntimeStopped - } - - containersWithImage, err := r.getContainersWithImage(image.ID) - if err != nil { - return "", errors.Wrapf(err, "error getting containers for image %q", image.ID) - } - if len(containersWithImage) > 0 && len(image.Names) <= 1 { - if force { - if err := r.removeMultipleContainers(containersWithImage); err != nil { - return "", err - } - } else { - for _, ctr := range containersWithImage { - return "", fmt.Errorf("Could not remove image %q (must force) - container %q is using its reference image", image.ID, ctr.ImageID) - } - } - } - - if len(image.Names) > 1 && !force { - return "", fmt.Errorf("unable to delete %s (must force) - image is referred to in multiple tags", image.ID) - } - // If it is forced, we have to untag the image so that it can be deleted - image.Names = image.Names[:0] - - _, err = r.store.DeleteImage(image.ID, true) - if err != nil { - return "", err - } - return image.ID, nil -} - -// GetImage retrieves an image matching the given name or hash from system -// storage -// If no matching image can be found, an error is returned -func (r *Runtime) GetImage(image string) (*storage.Image, error) { - r.lock.Lock() - defer r.lock.Unlock() - - if !r.valid { - return nil, ErrRuntimeStopped - } - return r.getImage(image) -} - -func (r *Runtime) getImage(image string) (*storage.Image, error) { - var img *storage.Image - ref, err := is.Transport.ParseStoreReference(r.store, image) - if err == nil { - img, err = is.Transport.GetStoreImage(r.store, ref) - } - if err != nil { - img2, err2 := r.store.Image(image) - if err2 != nil { - if ref == nil { - return nil, errors.Wrapf(err, "error parsing reference to image %q", image) - } - return nil, errors.Wrapf(err, "unable to locate image %q", image) - } - img = img2 - } - return img, nil -} - -// GetImageRef searches for and returns a new types.Image matching the given name or ID in the given store. -func (r *Runtime) GetImageRef(image string) (types.Image, error) { - r.lock.Lock() - defer r.lock.Unlock() - - if !r.valid { - return nil, ErrRuntimeStopped - } - return r.getImageRef(image) - -} - -func (r *Runtime) getImageRef(image string) (types.Image, error) { - img, err := r.getImage(image) - if err != nil { - return nil, errors.Wrapf(err, "unable to locate image %q", image) - } - ref, err := is.Transport.ParseStoreReference(r.store, "@"+img.ID) - if err != nil { - return nil, errors.Wrapf(err, "error parsing reference to image %q", img.ID) - } - imgRef, err := ref.NewImage(nil) - if err != nil { - return nil, errors.Wrapf(err, "error reading image %q", img.ID) - } - return imgRef, nil -} - -// GetImages retrieves all images present in storage -// Filters can be provided which will determine which images are included in the -// output. Multiple filters are handled by ANDing their output, so only images -// matching all filters are included -func (r *Runtime) GetImages(params *ImageFilterParams, filters ...ImageFilter) ([]*storage.Image, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.valid { - return nil, ErrRuntimeStopped - } - - images, err := r.store.Images() - if err != nil { - return nil, err - } - - var imagesFiltered []*storage.Image - - for _, img := range images { - info, err := r.getImageInspectInfo(img) - if err != nil { - return nil, err - } - var names []string - if len(img.Names) > 0 { - names = img.Names - } else { - names = append(names, "") - } - for _, name := range names { - include := true - if params != nil { - params.ImageName = name - } - for _, filter := range filters { - include = include && filter(&img, info) - } - - if include { - newImage := img - newImage.Names = []string{name} - imagesFiltered = append(imagesFiltered, &newImage) - } - } - } - - return imagesFiltered, nil -} - -// GetHistory gets the history of an image and information about its layers -func (r *Runtime) GetHistory(image string) ([]ociv1.History, []types.BlobInfo, string, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.valid { - return nil, nil, "", ErrRuntimeStopped - } - - img, err := r.getImage(image) - if err != nil { - return nil, nil, "", errors.Wrapf(err, "no such image %q", image) - } - - src, err := r.getImageRef(image) - if err != nil { - return nil, nil, "", errors.Wrapf(err, "error instantiating image %q", image) - } - - oci, err := src.OCIConfig() - if err != nil { - return nil, nil, "", err - } - - return oci.History, src.LayerInfos(), img.ID, nil -} - -// ImportImage imports an OCI format image archive into storage as an image -func (r *Runtime) ImportImage(path string) (*storage.Image, error) { - return nil, ErrNotImplemented -} - -// GetImageInspectInfo returns the inspect information of an image -func (r *Runtime) GetImageInspectInfo(image storage.Image) (*types.ImageInspectInfo, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.valid { - return nil, ErrRuntimeStopped - } - return r.getImageInspectInfo(image) -} - -func (r *Runtime) getImageInspectInfo(image storage.Image) (*types.ImageInspectInfo, error) { - img, err := r.getImageRef(image.ID) - if err != nil { - return nil, err - } - return img.Inspect() -} - -// ParseImageFilter takes a set of images and a filter string as input, and returns the libpod.ImageFilterParams struct -func (r *Runtime) ParseImageFilter(imageInput, filter string) (*ImageFilterParams, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.valid { - return nil, ErrRuntimeStopped - } - - if filter == "" && imageInput == "" { - return nil, nil - } - - var params ImageFilterParams - params.ImageInput = imageInput - - if filter == "" && imageInput != "" { - return ¶ms, nil - } - - images, err := r.store.Images() - if err != nil { - return nil, err - } - - filterStrings := strings.Split(filter, ",") - for _, param := range filterStrings { - pair := strings.SplitN(param, "=", 2) - switch strings.TrimSpace(pair[0]) { - case "dangling": - if common.IsValidBool(pair[1]) { - params.Dangling = pair[1] - } else { - return nil, fmt.Errorf("invalid filter: '%s=[%s]'", pair[0], pair[1]) - } - case "label": - params.Label = pair[1] - case "before": - if img, err := findImageInSlice(images, pair[1]); err == nil { - info, err := r.GetImageInspectInfo(img) - if err != nil { - return nil, err - } - params.BeforeImage = info.Created - } else { - return nil, fmt.Errorf("no such id: %s", pair[0]) - } - case "since": - if img, err := findImageInSlice(images, pair[1]); err == nil { - info, err := r.GetImageInspectInfo(img) - if err != nil { - return nil, err - } - params.SinceImage = info.Created - } else { - return nil, fmt.Errorf("no such id: %s``", pair[0]) - } - case "reference": - params.ReferencePattern = pair[1] - default: - return nil, fmt.Errorf("invalid filter: '%s'", pair[0]) - } - } - return ¶ms, nil -} - -// InfoAndDigestAndSize returns the inspection info and size of the image in the given -// store and the digest of its manifest, if it has one, or "" if it doesn't. -func (r *Runtime) InfoAndDigestAndSize(img storage.Image) (*types.ImageInspectInfo, digest.Digest, int64, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.valid { - return nil, "", -1, ErrRuntimeStopped - } - - imgRef, err := r.getImageRef("@" + img.ID) - if err != nil { - return nil, "", -1, errors.Wrapf(err, "error reading image %q", img.ID) - } - defer imgRef.Close() - return infoAndDigestAndSize(imgRef) -} - -func infoAndDigestAndSize(imgRef types.Image) (*types.ImageInspectInfo, digest.Digest, int64, error) { - imgSize, err := imgRef.Size() - if err != nil { - return nil, "", -1, errors.Wrapf(err, "error reading size of image %q", transports.ImageName(imgRef.Reference())) - } - manifest, _, err := imgRef.Manifest() - if err != nil { - return nil, "", -1, errors.Wrapf(err, "error reading manifest for image %q", transports.ImageName(imgRef.Reference())) - } - manifestDigest := digest.Digest("") - if len(manifest) > 0 { - manifestDigest = digest.Canonical.FromBytes(manifest) - } - info, err := imgRef.Inspect() - if err != nil { - return nil, "", -1, errors.Wrapf(err, "error inspecting image %q", transports.ImageName(imgRef.Reference())) - } - return info, manifestDigest, imgSize, nil -} - -// MatchesID returns true if argID is a full or partial match for id -func MatchesID(id, argID string) bool { - return strings.HasPrefix(argID, id) -} - -// MatchesReference returns true if argName is a full or partial match for name -// Partial matches will register only if they match the most specific part of the name available -// For example, take the image docker.io/library/redis:latest -// redis, library/redis, docker.io/library/redis, redis:latest, etc. will match -// But redis:alpine, ry/redis, library, and io/library/redis will not -func MatchesReference(name, argName string) bool { - if argName == "" { - return false - } - splitName := strings.Split(name, ":") - // If the arg contains a tag, we handle it differently than if it does not - if strings.Contains(argName, ":") { - splitArg := strings.Split(argName, ":") - return strings.HasSuffix(splitName[0], splitArg[0]) && (splitName[1] == splitArg[1]) - } - return strings.HasSuffix(splitName[0], argName) -} - -// ParseImageNames parses the names we've stored with an image into a list of -// tagged references and a list of references which contain digests. -func ParseImageNames(names []string) (tags, digests []string, err error) { - for _, name := range names { - if named, err := reference.ParseNamed(name); err == nil { - if digested, ok := named.(reference.Digested); ok { - canonical, err := reference.WithDigest(named, digested.Digest()) - if err == nil { - digests = append(digests, canonical.String()) - } - } else { - if reference.IsNameOnly(named) { - named = reference.TagNameOnly(named) - } - if tagged, ok := named.(reference.Tagged); ok { - namedTagged, err := reference.WithTag(named, tagged.Tag()) - if err == nil { - tags = append(tags, namedTagged.String()) - } - } - } - } - } - return tags, digests, nil -} - -func annotations(manifest []byte, manifestType string) map[string]string { - annotations := make(map[string]string) - switch manifestType { - case ociv1.MediaTypeImageManifest: - var m ociv1.Manifest - if err := json.Unmarshal(manifest, &m); err == nil { - for k, v := range m.Annotations { - annotations[k] = v - } - } - } - return annotations -} - -func findImageInSlice(images []storage.Image, ref string) (storage.Image, error) { - for _, image := range images { - if MatchesID(image.ID, ref) { - return image, nil - } - for _, name := range image.Names { - if MatchesReference(name, ref) { - return image, nil - } - } - } - return storage.Image{}, errors.New("could not find image") -} diff --git a/libpod/runtime_pod.go b/libpod/runtime_pod.go deleted file mode 100644 index e14c1b45..00000000 --- a/libpod/runtime_pod.go +++ /dev/null @@ -1,122 +0,0 @@ -package libpod - -import ( - "github.com/pkg/errors" -) - -// Contains the public Runtime API for pods - -// A PodCreateOption is a functional option which alters the Pod created by -// NewPod -type PodCreateOption func(*Pod) error - -// PodFilter is a function to determine whether a pod is included in command -// output. Pods to be outputted are tested using the function. A true return -// will include the pod, a false return will exclude it. -type PodFilter func(*Pod) bool - -// NewPod makes a new, empty pod -func (r *Runtime) NewPod(options ...PodCreateOption) (*Pod, error) { - r.lock.Lock() - defer r.lock.Unlock() - - if !r.valid { - return nil, ErrRuntimeStopped - } - - pod, err := newPod() - if err != nil { - return nil, errors.Wrapf(err, "error creating pod") - } - - for _, option := range options { - if err := option(pod); err != nil { - return nil, errors.Wrapf(err, "error running pod create option") - } - } - - pod.valid = true - - if err := r.state.AddPod(pod); err != nil { - return nil, errors.Wrapf(err, "error adding pod to state") - } - - return nil, ErrNotImplemented -} - -// RemovePod removes a pod and all containers in it -// If force is specified, all containers in the pod will be stopped first -// Otherwise, RemovePod will return an error if any container in the pod is running -// Remove acts atomically, removing all containers or no containers -func (r *Runtime) RemovePod(p *Pod, force bool) error { - return ErrNotImplemented -} - -// GetPod retrieves a pod by its ID -func (r *Runtime) GetPod(id string) (*Pod, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.valid { - return nil, ErrRuntimeStopped - } - - return r.state.GetPod(id) -} - -// HasPod checks to see if a pod with the given ID exists -func (r *Runtime) HasPod(id string) (bool, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.valid { - return false, ErrRuntimeStopped - } - - return r.state.HasPod(id) -} - -// LookupPod retrieves a pod by its name or a partial ID -// If a partial ID is not unique, an error will be returned -func (r *Runtime) LookupPod(idOrName string) (*Pod, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.valid { - return nil, ErrRuntimeStopped - } - - return r.state.LookupPod(idOrName) -} - -// Pods retrieves all pods -// Filters can be provided which will determine which pods are included in the -// output. Multiple filters are handled by ANDing their output, so only pods -// matching all filters are returned -func (r *Runtime) Pods(filters ...PodFilter) ([]*Pod, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.valid { - return nil, ErrRuntimeStopped - } - - pods, err := r.state.GetAllPods() - if err != nil { - return nil, err - } - - podsFiltered := make([]*Pod, 0, len(pods)) - for _, pod := range pods { - include := true - for _, filter := range filters { - include = include && filter(pod) - } - - if include { - podsFiltered = append(podsFiltered, pod) - } - } - - return podsFiltered, nil -} diff --git a/libpod/state.go b/libpod/state.go deleted file mode 100644 index e7194c32..00000000 --- a/libpod/state.go +++ /dev/null @@ -1,37 +0,0 @@ -package libpod - -// State is a storage backend for libpod's current state -type State interface { - // Accepts full ID of container - GetContainer(id string) (*Container, error) - // Accepts full or partial IDs (as long as they are unique) and names - LookupContainer(idOrName string) (*Container, error) - // Checks if a container with the given ID is present in the state - HasContainer(id string) (bool, error) - // Adds container to state - // If the container belongs to a pod, that pod must already be present - // in the state when the container is added - AddContainer(ctr *Container) error - // Removes container from state - // If the container belongs to a pod, it will be removed from the pod - // as well - RemoveContainer(ctr *Container) error - // Retrieves all containers presently in state - GetAllContainers() ([]*Container, error) - - // Accepts full ID of pod - GetPod(id string) (*Pod, error) - // Accepts full or partial IDs (as long as they are unique) and names - LookupPod(idOrName string) (*Pod, error) - // Checks if a pod with the given ID is present in the state - HasPod(id string) (bool, error) - // Adds pod to state - // Any containers within the pod not already in the state will be added - // with it - AddPod(pod *Pod) error - // Removes pod from state - // All containers within the pod will also be removed - RemovePod(pod *Pod) error - // Retrieves all pods presently in state - GetAllPods() ([]*Pod, error) -} diff --git a/oci/container.go b/oci/container.go index 197c3d85..c71152bf 100644 --- a/oci/container.go +++ b/oci/container.go @@ -42,12 +42,14 @@ type Container struct { // this is the /var/run/storage/... directory, erased on reboot bundlePath string // this is the /var/lib/storage/... directory - dir string - stopSignal string - imageName string - imageRef string - volumes []ContainerVolume - mountPoint string + dir string + stopSignal string + imageName string + imageRef string + volumes []ContainerVolume + mountPoint string + seccompProfilePath string + spec *specs.Spec } // ContainerVolume is a bind mount for the container. @@ -99,6 +101,16 @@ func NewContainer(id string, name string, bundlePath string, logPath string, net return c, nil } +// SetSpec loads the OCI spec in the container struct +func (c *Container) SetSpec(s *specs.Spec) { + c.spec = s +} + +// Spec returns a copy of the spec for the container +func (c *Container) Spec() specs.Spec { + return *c.spec +} + // GetStopSignal returns the container's own stop signal configured from the // image configuration or the default one. func (c *Container) GetStopSignal() string { @@ -145,6 +157,16 @@ func (c *Container) ID() string { return c.id } +// SetSeccompProfilePath sets the seccomp profile path +func (c *Container) SetSeccompProfilePath(pp string) { + c.seccompProfilePath = pp +} + +// SeccompProfilePath returns the seccomp profile path +func (c *Container) SeccompProfilePath() string { + return c.seccompProfilePath +} + // BundlePath returns the bundlePath of the container. func (c *Container) BundlePath() string { return c.bundlePath diff --git a/oci/memory_store.go b/oci/memory_store.go index 6223ce7f..3f0cac55 100644 --- a/oci/memory_store.go +++ b/oci/memory_store.go @@ -25,8 +25,9 @@ func (c *memoryStore) Add(id string, cont *Container) { // Get returns a container from the store by id. func (c *memoryStore) Get(id string) *Container { + var res *Container c.RLock() - res := c.s[id] + res = c.s[id] c.RUnlock() return res } diff --git a/oci/oci.go b/oci/oci.go index 2e72b9cf..658079a3 100644 --- a/oci/oci.go +++ b/oci/oci.go @@ -17,6 +17,7 @@ import ( "github.com/kubernetes-incubator/cri-o/utils" rspec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" + "golang.org/x/net/context" "golang.org/x/sys/unix" kwait "k8s.io/apimachinery/pkg/util/wait" ) @@ -39,6 +40,12 @@ const ( SystemdCgroupsManager = "systemd" // ContainerExitsDir is the location of container exit dirs ContainerExitsDir = "/var/run/crio/exits" + // ContainerAttachSocketDir is the location for container attach sockets + ContainerAttachSocketDir = "/var/run/crio" + + // killContainerTimeout is the timeout that we wait for the container to + // be SIGKILLed. + killContainerTimeout = 2 * time.Minute ) // New creates a new Runtime with options provided @@ -172,6 +179,7 @@ func (r *Runtime) CreateContainer(c *Container, cgroupParent string) (err error) args = append(args, "-p", filepath.Join(c.bundlePath, "pidfile")) args = append(args, "-l", c.logPath) args = append(args, "--exit-dir", r.containerExitsDir) + args = append(args, "--socket-dir-path", ContainerAttachSocketDir) if r.logSizeMax >= 0 { args = append(args, "--log-size-max", fmt.Sprintf("%v", r.logSizeMax)) } @@ -224,11 +232,12 @@ func (r *Runtime) CreateContainer(c *Container, cgroupParent string) (err error) if err != nil { logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err) } else { - // XXX: this defer does nothing as the cgroup can't be deleted cause - // it contains the conmon pid in tasks - // we need to remove this defer and delete the cgroup once conmon exits - // maybe need a conmon monitor? - defer control.Delete() + // Here we should defer a crio-connmon- cgroup hierarchy deletion, but it will + // always fail as conmon's pid is still there. + // Fortunately, kubelet takes care of deleting this for us, so the leak will + // only happens in corner case where one does a manual deletion of the container + // through e.g. runc. This should be handled by implementing a conmon monitoring + // routine that does the cgroup cleanup once conmon is terminated. if err := control.Add(cgroups.Process{Pid: cmd.Process.Pid}); err != nil { logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err) } @@ -350,9 +359,9 @@ func parseLog(log []byte) (stdout, stderr []byte) { continue } - // The format of log lines is "DATE pipe REST". - parts := bytes.SplitN(line, []byte{' '}, 3) - if len(parts) < 3 { + // The format of log lines is "DATE pipe LogTag REST". + parts := bytes.SplitN(line, []byte{' '}, 4) + if len(parts) < 4 { // Ignore the line if it's formatted incorrectly, but complain // about it so it can be debugged. logrus.Warnf("hit invalid log format: %q", string(line)) @@ -360,7 +369,15 @@ func parseLog(log []byte) (stdout, stderr []byte) { } pipe := string(parts[1]) - content := parts[2] + content := parts[3] + + linetype := string(parts[2]) + if linetype == "P" { + contentLen := len(content) + if content[contentLen-1] == '\n' { + content = content[:contentLen-1] + } + } switch pipe { case "stdout": @@ -406,15 +423,6 @@ func (r *Runtime) ExecSync(c *Container, command []string, timeout int64) (resp os.RemoveAll(logPath) }() - f, err := ioutil.TempFile("", "exec-process") - if err != nil { - return nil, ExecSyncError{ - ExitCode: -1, - Err: err, - } - } - defer os.RemoveAll(f.Name()) - var args []string args = append(args, "-c", c.id) args = append(args, "-r", r.Path(c)) @@ -428,28 +436,18 @@ func (r *Runtime) ExecSync(c *Container, command []string, timeout int64) (resp args = append(args, fmt.Sprintf("%d", timeout)) } args = append(args, "-l", logPath) + args = append(args, "--socket-dir-path", ContainerAttachSocketDir) - pspec := rspec.Process{ - Env: r.conmonEnv, - Args: command, - Cwd: "/", - } - processJSON, err := json.Marshal(pspec) + processFile, err := PrepareProcessExec(c, command, false) if err != nil { return nil, ExecSyncError{ ExitCode: -1, Err: err, } } + defer os.RemoveAll(processFile.Name()) - if err := ioutil.WriteFile(f.Name(), processJSON, 0644); err != nil { - return nil, ExecSyncError{ - ExitCode: -1, - Err: err, - } - } - - args = append(args, "--exec-process-spec", f.Name()) + args = append(args, "--exec-process-spec", processFile.Name()) cmd := exec.Command(r.conmonPath, args...) @@ -541,25 +539,26 @@ func (r *Runtime) ExecSync(c *Container, command []string, timeout int64) (resp }, nil } -// StopContainer stops a container. Timeout is given in seconds. -func (r *Runtime) StopContainer(c *Container, timeout int64) error { - c.opLock.Lock() - defer c.opLock.Unlock() +// UpdateContainer updates container resources +func (r *Runtime) UpdateContainer(c *Container, res *rspec.LinuxResources) error { + cmd := exec.Command(r.Path(c), "update", "--resources", "-", c.id) + var stdout bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + jsonResources, err := json.Marshal(res) + if err != nil { + return err + } + cmd.Stdin = bytes.NewReader(jsonResources) - // Check if the process is around before sending a signal - err := unix.Kill(c.state.Pid, 0) - if err == unix.ESRCH { - c.state.Finished = time.Now() - return nil + if err := cmd.Run(); err != nil { + return fmt.Errorf("updating resources for container %q failed: %v %v (%v)", c.id, stderr.String(), stdout.String(), err) } + return nil +} - if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, r.Path(c), "kill", c.id, c.GetStopSignal()); err != nil { - return fmt.Errorf("failed to stop container %s, %v", c.id, err) - } - if timeout == -1 { - // default 10 seconds delay - timeout = 10 - } +func waitContainerStop(ctx context.Context, c *Container, timeout time.Duration) error { done := make(chan struct{}) // we could potentially re-use "done" channel to exit the loop on timeout // but we use another channel "chControl" so that we won't never incur in the @@ -587,7 +586,10 @@ func (r *Runtime) StopContainer(c *Container, timeout int64) error { select { case <-done: return nil - case <-time.After(time.Duration(timeout) * time.Second): + case <-ctx.Done(): + close(chControl) + return ctx.Err() + case <-time.After(timeout): close(chControl) err := unix.Kill(c.state.Pid, unix.SIGKILL) if err != nil && err != unix.ESRCH { @@ -596,10 +598,39 @@ func (r *Runtime) StopContainer(c *Container, timeout int64) error { } c.state.Finished = time.Now() - return nil } +// StopContainer stops a container. Timeout is given in seconds. +func (r *Runtime) StopContainer(ctx context.Context, c *Container, timeout int64) error { + c.opLock.Lock() + defer c.opLock.Unlock() + + // Check if the process is around before sending a signal + err := unix.Kill(c.state.Pid, 0) + if err == unix.ESRCH { + c.state.Finished = time.Now() + return nil + } + + if timeout > 0 { + if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, r.Path(c), "kill", c.id, c.GetStopSignal()); err != nil { + return fmt.Errorf("failed to stop container %s, %v", c.id, err) + } + err = waitContainerStop(ctx, c, time.Duration(timeout)*time.Second) + if err == nil { + return nil + } + logrus.Warnf("Stop container %q timed out: %v", c.ID(), err) + } + + if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, r.Path(c), "kill", "--all", c.id, "KILL"); err != nil { + return fmt.Errorf("failed to stop container %s, %v", c.id, err) + } + + return waitContainerStop(ctx, c, killContainerTimeout) +} + // DeleteContainer deletes a container. func (r *Runtime) DeleteContainer(c *Container) error { c.opLock.Lock() @@ -621,7 +652,7 @@ func (r *Runtime) SetStartFailed(c *Container, err error) { func (r *Runtime) UpdateStatus(c *Container) error { c.opLock.Lock() defer c.opLock.Unlock() - out, err := exec.Command(r.Path(c), "state", c.id).CombinedOutput() + out, err := exec.Command(r.Path(c), "state", c.id).Output() if err != nil { // there are many code paths that could lead to have a bad state in the // underlying runtime. @@ -724,3 +755,27 @@ func (r *Runtime) UnpauseContainer(c *Container) error { _, err := utils.ExecCmd(r.Path(c), "resume", c.id) return err } + +// PrepareProcessExec returns the path of the process.json used in runc exec -p +// caller is responsible to close the returned *os.File if needed. +func PrepareProcessExec(c *Container, cmd []string, tty bool) (*os.File, error) { + f, err := ioutil.TempFile("", "exec-process-") + if err != nil { + return nil, err + } + + pspec := c.Spec().Process + pspec.Args = cmd + if tty { + pspec.Terminal = true + } + processJSON, err := json.Marshal(pspec) + if err != nil { + return nil, err + } + + if err := ioutil.WriteFile(f.Name(), processJSON, 0644); err != nil { + return nil, err + } + return f, nil +} diff --git a/pause/Makefile b/pause/Makefile index da24f7fe..f0951af7 100644 --- a/pause/Makefile +++ b/pause/Makefile @@ -5,9 +5,9 @@ override LIBS += override CFLAGS += -std=c99 -Os -Wall -Wextra -static pause: $(obj) - $(CC) -o $@ $^ $(CFLAGS) $(LIBS) - strip $@ + $(CC) -o ../bin/$@ $^ $(CFLAGS) $(LIBS) + strip ../bin/$@ .PHONY: clean clean: - rm -f $(obj) pause + rm -f $(obj) ../bin/pause diff --git a/pkg/annotations/annotations.go b/pkg/annotations/annotations.go index 151d9390..9b5b1352 100644 --- a/pkg/annotations/annotations.go +++ b/pkg/annotations/annotations.go @@ -22,6 +22,9 @@ const ( // IP is the container ipv4 or ipv6 address IP = "io.kubernetes.cri-o.IP" + // SeccompProfilePath is the node seccomp profile path + SeccompProfilePath = "io.kubernetes.cri-o.SeccompProfilePath" + // Image is the container image ID annotation Image = "io.kubernetes.cri-o.Image" diff --git a/pkg/storage/image.go b/pkg/storage/image.go index 5aca3e8f..5994d952 100644 --- a/pkg/storage/image.go +++ b/pkg/storage/image.go @@ -2,29 +2,43 @@ package storage import ( "errors" - "fmt" "net" - "path/filepath" - "regexp" + "path" "strings" "github.com/containers/image/copy" "github.com/containers/image/docker/reference" "github.com/containers/image/image" + "github.com/containers/image/manifest" "github.com/containers/image/signature" istorage "github.com/containers/image/storage" "github.com/containers/image/transports/alltransports" "github.com/containers/image/types" "github.com/containers/storage" - distreference "github.com/docker/distribution/reference" + digest "github.com/opencontainers/go-digest" +) + +const ( + minimumTruncatedIDLength = 3 +) + +var ( + // ErrCannotParseImageID is returned when we try to ResolveNames for an image ID + ErrCannotParseImageID = errors.New("cannot parse an image ID") + // ErrImageMultiplyTagged is returned when we try to remove an image that still has multiple names + ErrImageMultiplyTagged = errors.New("image still has multiple names applied") ) // ImageResult wraps a subset of information about an image: its ID, its names, // and the size, if known, or nil if it isn't. type ImageResult struct { - ID string - Names []string - Size *uint64 + ID string + Name string + RepoTags []string + RepoDigests []string + Size *uint64 + Digest digest.Digest + ConfigDigest digest.Digest } type indexInfo struct { @@ -40,6 +54,11 @@ type imageService struct { registries []string } +// sizer knows its size. +type sizer interface { + Size() (int64, error) +} + // ImageServer wraps up various CRI-related activities into a reusable // implementation. type ImageServer interface { @@ -47,8 +66,14 @@ type ImageServer interface { ListImages(systemContext *types.SystemContext, filter string) ([]ImageResult, error) // ImageStatus returns status of an image which matches the filter. ImageStatus(systemContext *types.SystemContext, filter string) (*ImageResult, error) + // PrepareImage returns an Image where the config digest can be grabbed + // for further analysis. Call Close() on the resulting image. + PrepareImage(systemContext *types.SystemContext, imageName string, options *copy.Options) (types.Image, error) // PullImage imports an image from the specified location. PullImage(systemContext *types.SystemContext, imageName string, options *copy.Options) (types.ImageReference, error) + // UntagImage removes a name from the specified image, and if it was + // the only name the image had, removes the image. + UntagImage(systemContext *types.SystemContext, imageName string) error // RemoveImage deletes the specified image. RemoveImage(systemContext *types.SystemContext, imageName string) error // GetStore returns the reference to the storage library Store which @@ -78,6 +103,66 @@ func (svc *imageService) getRef(name string) (types.ImageReference, error) { return ref, nil } +func sortNamesByType(names []string) (bestName string, tags, digests []string) { + for _, name := range names { + if len(name) > 72 && name[len(name)-72:len(name)-64] == "@sha256:" { + digests = append(digests, name) + } else { + tags = append(tags, name) + } + } + if len(digests) > 0 { + bestName = digests[0] + } + if len(tags) > 0 { + bestName = tags[0] + } + return bestName, tags, digests +} + +func (svc *imageService) makeRepoDigests(knownRepoDigests, tags []string, imageID string) (imageDigest digest.Digest, repoDigests []string) { + // Look up the image's digest. + img, err := svc.store.Image(imageID) + if err != nil { + return "", knownRepoDigests + } + imageDigest = img.Digest + if imageDigest == "" { + imgDigest, err := svc.store.ImageBigDataDigest(imageID, storage.ImageDigestBigDataKey) + if err != nil || imgDigest == "" { + return "", knownRepoDigests + } + imageDigest = imgDigest + } + // If there are no names to convert to canonical references, we're done. + if len(tags) == 0 { + return imageDigest, knownRepoDigests + } + // We only want to supplement what's already explicitly in the list, so keep track of values + // that we already know. + digestMap := make(map[string]struct{}) + repoDigests = knownRepoDigests + for _, repoDigest := range knownRepoDigests { + digestMap[repoDigest] = struct{}{} + } + // For each tagged name, parse the name, and if we can extract a named reference, convert + // it into a canonical reference using the digest and add it to the list. + for _, tag := range tags { + if ref, err2 := reference.ParseAnyReference(tag); err2 == nil { + if name, ok := ref.(reference.Named); ok { + trimmed := reference.TrimNamed(name) + if imageRef, err3 := reference.WithDigest(trimmed, imageDigest); err3 == nil { + if _, ok := digestMap[imageRef.String()]; !ok { + repoDigests = append(repoDigests, imageRef.String()) + digestMap[imageRef.String()] = struct{}{} + } + } + } + } + } + return imageDigest, repoDigests +} + func (svc *imageService) ListImages(systemContext *types.SystemContext, filter string) ([]ImageResult, error) { results := []ImageResult{} if filter != "" { @@ -86,16 +171,26 @@ func (svc *imageService) ListImages(systemContext *types.SystemContext, filter s return nil, err } if image, err := istorage.Transport.GetStoreImage(svc.store, ref); err == nil { - img, err := ref.NewImage(systemContext) + img, err := ref.NewImageSource(systemContext) if err != nil { return nil, err } size := imageSize(img) + configDigest, err := imageConfigDigest(img, nil) img.Close() + if err != nil { + return nil, err + } + name, tags, digests := sortNamesByType(image.Names) + imageDigest, repoDigests := svc.makeRepoDigests(digests, tags, image.ID) results = append(results, ImageResult{ - ID: image.ID, - Names: image.Names, - Size: size, + ID: image.ID, + Name: name, + RepoTags: tags, + RepoDigests: repoDigests, + Size: size, + Digest: imageDigest, + ConfigDigest: configDigest, }) } } else { @@ -108,16 +203,26 @@ func (svc *imageService) ListImages(systemContext *types.SystemContext, filter s if err != nil { return nil, err } - img, err := ref.NewImage(systemContext) + img, err := ref.NewImageSource(systemContext) if err != nil { return nil, err } size := imageSize(img) + configDigest, err := imageConfigDigest(img, nil) img.Close() + if err != nil { + return nil, err + } + name, tags, digests := sortNamesByType(image.Names) + imageDigest, repoDigests := svc.makeRepoDigests(digests, tags, image.ID) results = append(results, ImageResult{ - ID: image.ID, - Names: image.Names, - Size: size, + ID: image.ID, + Name: name, + RepoTags: tags, + RepoDigests: repoDigests, + Size: size, + Digest: imageDigest, + ConfigDigest: configDigest, }) } } @@ -142,30 +247,56 @@ func (svc *imageService) ImageStatus(systemContext *types.SystemContext, nameOrI return nil, err } - img, err := ref.NewImage(systemContext) + img, err := ref.NewImageSource(systemContext) if err != nil { return nil, err } + defer img.Close() size := imageSize(img) - img.Close() + configDigest, err := imageConfigDigest(img, nil) + if err != nil { + return nil, err + } - return &ImageResult{ - ID: image.ID, - Names: image.Names, - Size: size, - }, nil + name, tags, digests := sortNamesByType(image.Names) + imageDigest, repoDigests := svc.makeRepoDigests(digests, tags, image.ID) + result := ImageResult{ + ID: image.ID, + Name: name, + RepoTags: tags, + RepoDigests: repoDigests, + Size: size, + Digest: imageDigest, + ConfigDigest: configDigest, + } + + return &result, nil } -func imageSize(img types.Image) *uint64 { - if sum, err := img.Size(); err == nil { - usum := uint64(sum) - return &usum +func imageSize(img types.ImageSource) *uint64 { + if s, ok := img.(sizer); ok { + if sum, err := s.Size(); err == nil { + usum := uint64(sum) + return &usum + } } return nil } +func imageConfigDigest(img types.ImageSource, instanceDigest *digest.Digest) (digest.Digest, error) { + manifestBytes, manifestType, err := img.GetManifest(instanceDigest) + if err != nil { + return "", err + } + imgManifest, err := manifest.FromBlob(manifestBytes, manifestType) + if err != nil { + return "", err + } + return imgManifest.ConfigInfo().Digest, nil +} + func (svc *imageService) CanPull(imageName string, options *copy.Options) (bool, error) { - srcRef, err := svc.prepareImage(imageName, options) + srcRef, err := svc.prepareReference(imageName, options) if err != nil { return false, err } @@ -173,7 +304,11 @@ func (svc *imageService) CanPull(imageName string, options *copy.Options) (bool, if err != nil { return false, err } - src, err := image.FromSource(rawSource) + sourceCtx := &types.SystemContext{} + if options.SourceCtx != nil { + sourceCtx = options.SourceCtx + } + src, err := image.FromSource(sourceCtx, rawSource) if err != nil { rawSource.Close() return false, err @@ -182,9 +317,9 @@ func (svc *imageService) CanPull(imageName string, options *copy.Options) (bool, return true, nil } -// prepareImage creates an image reference from an image string and set options +// prepareReference creates an image reference from an image string and set options // for the source context -func (svc *imageService) prepareImage(imageName string, options *copy.Options) (types.ImageReference, error) { +func (svc *imageService) prepareReference(imageName string, options *copy.Options) (types.ImageReference, error) { if imageName == "" { return nil, storage.ErrNotAnImage } @@ -212,6 +347,18 @@ func (svc *imageService) prepareImage(imageName string, options *copy.Options) ( return srcRef, nil } +func (svc *imageService) PrepareImage(systemContext *types.SystemContext, imageName string, options *copy.Options) (types.Image, error) { + if options == nil { + options = ©.Options{} + } + + srcRef, err := svc.prepareReference(imageName, options) + if err != nil { + return nil, err + } + return srcRef.NewImage(systemContext) +} + func (svc *imageService) PullImage(systemContext *types.SystemContext, imageName string, options *copy.Options) (types.ImageReference, error) { policy, err := signature.DefaultPolicy(systemContext) if err != nil { @@ -225,7 +372,7 @@ func (svc *imageService) PullImage(systemContext *types.SystemContext, imageName options = ©.Options{} } - srcRef, err := svc.prepareImage(imageName, options) + srcRef, err := svc.prepareReference(imageName, options) if err != nil { return nil, err } @@ -251,6 +398,57 @@ func (svc *imageService) PullImage(systemContext *types.SystemContext, imageName return destRef, nil } +func (svc *imageService) UntagImage(systemContext *types.SystemContext, nameOrID string) error { + ref, err := alltransports.ParseImageName(nameOrID) + if err != nil { + ref2, err2 := istorage.Transport.ParseStoreReference(svc.store, "@"+nameOrID) + if err2 != nil { + ref3, err3 := istorage.Transport.ParseStoreReference(svc.store, nameOrID) + if err3 != nil { + return err + } + ref2 = ref3 + } + ref = ref2 + } + + img, err := istorage.Transport.GetStoreImage(svc.store, ref) + if err != nil { + return err + } + + if !strings.HasPrefix(img.ID, nameOrID) { + namedRef, err := svc.prepareReference(nameOrID, ©.Options{}) + if err != nil { + return err + } + + name := nameOrID + if namedRef.DockerReference() != nil { + name = namedRef.DockerReference().Name() + if tagged, ok := namedRef.DockerReference().(reference.NamedTagged); ok { + name = name + ":" + tagged.Tag() + } + if canonical, ok := namedRef.DockerReference().(reference.Canonical); ok { + name = name + "@" + canonical.Digest().String() + } + } + + prunedNames := make([]string, 0, len(img.Names)) + for _, imgName := range img.Names { + if imgName != name && imgName != nameOrID { + prunedNames = append(prunedNames, imgName) + } + } + + if len(prunedNames) > 0 { + return svc.store.SetNames(img.ID, prunedNames) + } + } + + return ref.DeleteImage(systemContext) +} + func (svc *imageService) RemoveImage(systemContext *types.SystemContext, nameOrID string) error { ref, err := alltransports.ParseImageName(nameOrID) if err != nil { @@ -307,113 +505,35 @@ func (svc *imageService) isSecureIndex(indexName string) bool { return true } -func isValidHostname(hostname string) bool { - return hostname != "" && !strings.Contains(hostname, "/") && - (strings.Contains(hostname, ".") || - strings.Contains(hostname, ":") || hostname == "localhost") -} - -func isReferenceFullyQualified(reposName reference.Named) bool { - indexName, _, _ := splitReposName(reposName) - return indexName != "" -} - -const ( - // defaultHostname is the default built-in hostname - defaultHostname = "docker.io" - // legacyDefaultHostname is automatically converted to DefaultHostname - legacyDefaultHostname = "index.docker.io" - // defaultRepoPrefix is the prefix used for default repositories in default host - defaultRepoPrefix = "library/" -) - -// splitReposName breaks a reposName into an index name and remote name -func splitReposName(reposName reference.Named) (indexName string, remoteName reference.Named, err error) { - var remoteNameStr string - indexName, remoteNameStr = distreference.SplitHostname(reposName) - if !isValidHostname(indexName) { - // This is a Docker Index repos (ex: samalba/hipache or ubuntu) - // 'docker.io' - indexName = "" - remoteName = reposName - } else { - remoteName, err = withName(remoteNameStr) - } - return -} - -func validateName(name string) error { - if err := validateID(strings.TrimPrefix(name, defaultHostname+"/")); err == nil { - return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) - } - return nil -} - -var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) - -// validateID checks whether an ID string is a valid image ID. -func validateID(id string) error { - if ok := validHex.MatchString(id); !ok { - return fmt.Errorf("image ID %q is invalid", id) - } - return nil -} - -// withName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func withName(name string) (reference.Named, error) { - name, err := normalize(name) - if err != nil { - return nil, err - } - if err := validateName(name); err != nil { - return nil, err - } - r, err := distreference.WithName(name) - return r, err -} - -// splitHostname splits a repository name to hostname and remotename string. -// If no valid hostname is found, empty string will be returned as a resulting -// hostname. Repository name needs to be already validated before. -func splitHostname(name string) (hostname, remoteName string) { +func splitDockerDomain(name string) (domain, remainder string) { i := strings.IndexRune(name, '/') if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - hostname, remoteName = "", name + domain, remainder = "", name } else { - hostname, remoteName = name[:i], name[i+1:] - } - if hostname == legacyDefaultHostname { - hostname = defaultHostname - } - if hostname == defaultHostname && !strings.ContainsRune(remoteName, '/') { - remoteName = defaultRepoPrefix + remoteName + domain, remainder = name[:i], name[i+1:] } return } -// normalize returns a repository name in its normalized form, meaning it -// will contain library/ prefix for official images. -func normalize(name string) (string, error) { - host, remoteName := splitHostname(name) - if strings.ToLower(remoteName) != remoteName { - return "", errors.New("invalid reference format: repository name must be lowercase") - } - if host == defaultHostname { - if strings.HasPrefix(remoteName, defaultRepoPrefix) { - remoteName = strings.TrimPrefix(remoteName, defaultRepoPrefix) - } - return host + "/" + remoteName, nil - } - return name, nil -} - func (svc *imageService) ResolveNames(imageName string) ([]string, error) { - r, err := reference.ParseNormalizedNamed(imageName) + // _Maybe_ it's a truncated image ID. Don't prepend a registry name, then. + if len(imageName) >= minimumTruncatedIDLength && svc.store != nil { + if img, err := svc.store.Image(imageName); err == nil && img != nil && strings.HasPrefix(img.ID, imageName) { + // It's a truncated version of the ID of an image that's present in local storage; + // we need to expand it. + return []string{img.ID}, nil + } + } + // This to prevent any image ID to go through this routine + _, err := reference.ParseNormalizedNamed(imageName) if err != nil { + if strings.Contains(err.Error(), "cannot specify 64-byte hexadecimal strings") { + return nil, ErrCannotParseImageID + } return nil, err } - if isReferenceFullyQualified(r) { + domain, remainder := splitDockerDomain(imageName) + if domain != "" { // this means the image is already fully qualified return []string{imageName}, nil } @@ -425,10 +545,13 @@ func (svc *imageService) ResolveNames(imageName string) ([]string, error) { // this means we got an image in the form of "busybox" // we need to use additional registries... // normalize the unqualified image to be domain/repo/image... - _, rest := splitDomain(r.Name()) images := []string{} for _, r := range svc.registries { - images = append(images, filepath.Join(r, rest)) + rem := remainder + if r == "docker.io" && !strings.ContainsRune(remainder, '/') { + rem = "library/" + rem + } + images = append(images, path.Join(r, rem)) } return images, nil } diff --git a/pkg/storage/image_regexp.go b/pkg/storage/image_regexp.go deleted file mode 100644 index 96de6488..00000000 --- a/pkg/storage/image_regexp.go +++ /dev/null @@ -1,125 +0,0 @@ -package storage - -// This is a fork of docker/distribution code to be used when manipulating image -// references. -// DO NOT EDIT THIS FILE. - -import "regexp" - -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) - - // separatorRegexp defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) - - // domainComponentRegexp restricts the registry domain component of a - // repository name to start with a component as defined by domainRegexp - // and followed by an optional port. - domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - - // domainRegexp defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - domainRegexp = expression( - domainComponentRegexp, - optional(repeated(literal(`.`), domainComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) - - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the domain and name part omitting - // the separating forward slash from either. - NameRegexp = expression( - optional(domainRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) - - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = anchored( - optional(capture(domainRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) - - // IdentifierRegexp is the format for string identifier used as a - // content addressable identifier using sha256. These identifiers - // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = match(`([a-f0-9]{64})`) - - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) -) - -// match compiles the string to a regular expression. -var match = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re -} - -func splitDomain(name string) (string, string) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) -} - -// group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) -} - -// capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) -} diff --git a/pkg/storage/image_test.go b/pkg/storage/image_test.go new file mode 100644 index 00000000..82807206 --- /dev/null +++ b/pkg/storage/image_test.go @@ -0,0 +1,84 @@ +package storage + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestResolveNames(t *testing.T) { + cases := []struct { + name string + additionalRegistries []string + imageName string + expected []string + err bool + errContains string + }{ + { + name: "test unqualified images get correctly qualified in order and correct tag", + additionalRegistries: []string{"testregistry.com", "registry.access.redhat.com", "docker.io"}, + imageName: "openshift3/ose-deployer:sometag", + expected: []string{"testregistry.com/openshift3/ose-deployer:sometag", "registry.access.redhat.com/openshift3/ose-deployer:sometag", "docker.io/openshift3/ose-deployer:sometag"}, + err: false, + }, + { + name: "test unqualified images get correctly qualified in order and correct digest", + additionalRegistries: []string{"testregistry.com", "registry.access.redhat.com", "docker.io"}, + imageName: "openshift3/ose-deployer@sha256:dc5f67a48da730d67bf4bfb8824ea8a51be26711de090d6d5a1ffff2723168a3", + expected: []string{"testregistry.com/openshift3/ose-deployer@sha256:dc5f67a48da730d67bf4bfb8824ea8a51be26711de090d6d5a1ffff2723168a3", "registry.access.redhat.com/openshift3/ose-deployer@sha256:dc5f67a48da730d67bf4bfb8824ea8a51be26711de090d6d5a1ffff2723168a3", "docker.io/openshift3/ose-deployer@sha256:dc5f67a48da730d67bf4bfb8824ea8a51be26711de090d6d5a1ffff2723168a3"}, + err: false, + }, + { + name: "test unqualified images get correctly qualified in order", + additionalRegistries: []string{"testregistry.com", "registry.access.redhat.com", "docker.io"}, + imageName: "openshift3/ose-deployer:latest", + expected: []string{"testregistry.com/openshift3/ose-deployer:latest", "registry.access.redhat.com/openshift3/ose-deployer:latest", "docker.io/openshift3/ose-deployer:latest"}, + err: false, + }, + { + name: "test unqualified images get correctly qualified from official library", + additionalRegistries: []string{"testregistry.com", "registry.access.redhat.com", "docker.io"}, + imageName: "nginx:latest", + expected: []string{"testregistry.com/nginx:latest", "registry.access.redhat.com/nginx:latest", "docker.io/library/nginx:latest"}, + err: false, + }, + { + name: "test qualified images returns just qualified", + additionalRegistries: []string{"testregistry.com", "registry.access.redhat.com", "docker.io"}, + imageName: "mypersonalregistry.com/nginx:latest", + expected: []string{"mypersonalregistry.com/nginx:latest"}, + err: false, + }, + { + name: "test we don't have names w/o registries", + imageName: "openshift3/ose-deployer:latest", + err: true, + }, + { + name: "test we cannot resolve names from an image ID", + imageName: "6ad733544a6317992a6fac4eb19fe1df577d4dec7529efec28a5bd0edad0fd30", + err: true, + errContains: "cannot parse an image ID", + }, + } + for _, c := range cases { + svc := &imageService{ + registries: c.additionalRegistries, + } + names, err := svc.ResolveNames(c.imageName) + if !c.err { + require.NoError(t, err, c.name) + if !reflect.DeepEqual(names, c.expected) { + t.Fatalf("Exepected: %v, Got: %v: %q", c.expected, names, c.name) + } + } else { + require.Error(t, err, c.name) + if c.errContains != "" { + assert.Contains(t, err.Error(), c.errContains) + } + } + } +} diff --git a/server/apparmor/apparmor_common.go b/server/apparmor/apparmor_common.go index 6366a66e..76c640b8 100644 --- a/server/apparmor/apparmor_common.go +++ b/server/apparmor/apparmor_common.go @@ -3,10 +3,6 @@ package apparmor const ( // DefaultApparmorProfile is the name of default apparmor profile name. DefaultApparmorProfile = "crio-default" - - // ContainerAnnotationKeyPrefix is the prefix to an annotation key specifying a container profile. - ContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/" - // ProfileRuntimeDefault is he profile specifying the runtime default. ProfileRuntimeDefault = "runtime/default" // ProfileNamePrefix is the prefix for specifying profiles loaded on the node. diff --git a/server/apparmor/apparmor_supported.go b/server/apparmor/apparmor_supported.go index d765c9de..49360470 100644 --- a/server/apparmor/apparmor_supported.go +++ b/server/apparmor/apparmor_supported.go @@ -11,7 +11,7 @@ import ( "path" "strings" - "github.com/docker/docker/utils/templates" + "github.com/docker/docker/pkg/templates" "github.com/opencontainers/runc/libcontainer/apparmor" ) @@ -34,7 +34,7 @@ type profileData struct { // EnsureDefaultApparmorProfile loads default apparmor profile, if it is not loaded. func EnsureDefaultApparmorProfile() error { - if apparmor.IsEnabled() { + if IsEnabled() { loaded, err := IsLoaded(DefaultApparmorProfile) if err != nil { return fmt.Errorf("Could not check if %s AppArmor profile was loaded: %s", DefaultApparmorProfile, err) @@ -59,12 +59,6 @@ func IsEnabled() bool { return apparmor.IsEnabled() } -// GetProfileNameFromPodAnnotations gets the name of the profile to use with container from -// pod annotations -func GetProfileNameFromPodAnnotations(annotations map[string]string, containerName string) string { - return annotations[ContainerAnnotationKeyPrefix+containerName] -} - // InstallDefault generates a default profile in a temp directory determined by // os.TempDir(), then loads the profile into the kernel using 'apparmor_parser'. func InstallDefault(name string) error { diff --git a/server/apparmor/apparmor_unsupported.go b/server/apparmor/apparmor_unsupported.go index fbd1d87a..20cd15d2 100644 --- a/server/apparmor/apparmor_unsupported.go +++ b/server/apparmor/apparmor_unsupported.go @@ -11,8 +11,3 @@ func IsEnabled() bool { func EnsureDefaultApparmorProfile() error { return nil } - -// GetProfileNameFromPodAnnotations dose nothing, when build without apparmor build tag. -func GetProfileNameFromPodAnnotations(annotations map[string]string, containerName string) string { - return "" -} diff --git a/server/config.go b/server/config.go index 6c2d26cd..541bfdc0 100644 --- a/server/config.go +++ b/server/config.go @@ -5,7 +5,7 @@ import ( "io/ioutil" "github.com/BurntSushi/toml" - "github.com/kubernetes-incubator/cri-o/libkpod" + "github.com/kubernetes-incubator/cri-o/lib" ) //CrioConfigPath is the default location for the conf file @@ -14,7 +14,7 @@ const CrioConfigPath = "/etc/crio/crio.conf" // Config represents the entire set of configuration values that can be set for // the server. This is intended to be loaded from a toml-encoded config file. type Config struct { - libkpod.Config + lib.Config APIConfig } @@ -37,11 +37,11 @@ type APIConfig struct { // conversions. type tomlConfig struct { Crio struct { - libkpod.RootConfig - API struct{ APIConfig } `toml:"api"` - Runtime struct{ libkpod.RuntimeConfig } `toml:"runtime"` - Image struct{ libkpod.ImageConfig } `toml:"image"` - Network struct{ libkpod.NetworkConfig } `toml:"network"` + lib.RootConfig + API struct{ APIConfig } `toml:"api"` + Runtime struct{ lib.RuntimeConfig } `toml:"runtime"` + Image struct{ lib.ImageConfig } `toml:"image"` + Network struct{ lib.NetworkConfig } `toml:"network"` } `toml:"crio"` } @@ -102,9 +102,9 @@ func (c *Config) ToFile(path string) error { // DefaultConfig returns the default configuration for crio. func DefaultConfig() *Config { return &Config{ - Config: *libkpod.DefaultConfig(), + Config: *lib.DefaultConfig(), APIConfig: APIConfig{ - Listen: "/var/run/crio.sock", + Listen: "/var/run/crio/crio.sock", StreamAddress: "", StreamPort: "10010", }, diff --git a/server/config_test.go b/server/config_test.go new file mode 100644 index 00000000..9d8ddf04 --- /dev/null +++ b/server/config_test.go @@ -0,0 +1,89 @@ +package server + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/kubernetes-incubator/cri-o/lib" +) + +const fixturePath = "fixtures/crio.conf" + +func must(t *testing.T, err error) { + if err != nil { + t.Error(err) + } +} + +func assertAllFieldsEquality(t *testing.T, c Config) { + testCases := []struct { + fieldValue, expected interface{} + }{ + {c.RootConfig.Root, "/var/lib/containers/storage"}, + {c.RootConfig.RunRoot, "/var/run/containers/storage"}, + {c.RootConfig.Storage, "overlay"}, + {c.RootConfig.StorageOptions[0], "overlay.override_kernel_check=1"}, + + {c.APIConfig.Listen, "/var/run/crio.sock"}, + {c.APIConfig.StreamPort, "10010"}, + {c.APIConfig.StreamAddress, "localhost"}, + + {c.RuntimeConfig.Runtime, "/usr/local/bin/runc"}, + {c.RuntimeConfig.RuntimeUntrustedWorkload, "untrusted"}, + {c.RuntimeConfig.DefaultWorkloadTrust, "trusted"}, + {c.RuntimeConfig.Conmon, "/usr/local/libexec/crio/conmon"}, + {c.RuntimeConfig.ConmonEnv[0], "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + {c.RuntimeConfig.SELinux, true}, + {c.RuntimeConfig.SeccompProfile, "/etc/crio/seccomp.json"}, + {c.RuntimeConfig.ApparmorProfile, "crio-default"}, + {c.RuntimeConfig.CgroupManager, "cgroupfs"}, + {c.RuntimeConfig.PidsLimit, int64(1024)}, + + {c.ImageConfig.DefaultTransport, "docker://"}, + {c.ImageConfig.PauseImage, "kubernetes/pause"}, + {c.ImageConfig.PauseCommand, "/pause"}, + {c.ImageConfig.SignaturePolicyPath, "/tmp"}, + {c.ImageConfig.ImageVolumes, lib.ImageVolumesType("mkdir")}, + {c.ImageConfig.InsecureRegistries[0], "insecure-registry:1234"}, + {c.ImageConfig.Registries[0], "registry:4321"}, + + {c.NetworkConfig.NetworkDir, "/etc/cni/net.d/"}, + {c.NetworkConfig.PluginDir, "/opt/cni/bin/"}, + } + for _, tc := range testCases { + if tc.fieldValue != tc.expected { + t.Errorf(`Expecting: "%s", got: "%s"`, tc.expected, tc.fieldValue) + } + } +} + +func TestUpdateFromFile(t *testing.T) { + c := Config{} + + must(t, c.UpdateFromFile(fixturePath)) + + assertAllFieldsEquality(t, c) +} + +func TestToFile(t *testing.T) { + configFromFixture := Config{} + + must(t, configFromFixture.UpdateFromFile(fixturePath)) + + f, err := ioutil.TempFile("", "crio.conf") + if err != nil { + t.Error(err) + } + defer os.Remove(f.Name()) + + must(t, configFromFixture.ToFile(f.Name())) + + writtenConfig := Config{} + err = writtenConfig.UpdateFromFile(f.Name()) + if err != nil { + t.Fatal(err) + } + + assertAllFieldsEquality(t, writtenConfig) +} diff --git a/server/container_attach.go b/server/container_attach.go index 2d2fe203..ec9bedab 100644 --- a/server/container_attach.go +++ b/server/container_attach.go @@ -6,6 +6,7 @@ import ( "net" "os" "path/filepath" + "time" "github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/utils" @@ -25,10 +26,15 @@ const ( ) // Attach prepares a streaming endpoint to attach to a running container. -func (s *Server) Attach(ctx context.Context, req *pb.AttachRequest) (*pb.AttachResponse, error) { +func (s *Server) Attach(ctx context.Context, req *pb.AttachRequest) (resp *pb.AttachResponse, err error) { + const operation = "attach" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() logrus.Debugf("AttachRequest %+v", req) - resp, err := s.GetAttach(req) + resp, err = s.GetAttach(req) if err != nil { return nil, fmt.Errorf("unable to prepare attach endpoint") } @@ -67,7 +73,7 @@ func (ss streamService) Attach(containerID string, inputStream io.Reader, output } }) - attachSocketPath := filepath.Join("/var/run/crio", c.ID(), "attach") + attachSocketPath := filepath.Join(oci.ContainerAttachSocketDir, c.ID(), "attach") conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: attachSocketPath, Net: "unixpacket"}) if err != nil { return fmt.Errorf("failed to connect to container %s attach socket: %v", c.ID(), err) diff --git a/server/container_create.go b/server/container_create.go index 6d93408c..a4652cf3 100644 --- a/server/container_create.go +++ b/server/container_create.go @@ -5,18 +5,20 @@ import ( "errors" "fmt" "io" + "io/ioutil" "os" "path/filepath" "regexp" + "sort" "strconv" "strings" "time" - "github.com/docker/distribution/reference" + dockermounts "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/symlink" - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "github.com/kubernetes-incubator/cri-o/lib" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/pkg/annotations" "github.com/kubernetes-incubator/cri-o/pkg/storage" @@ -46,41 +48,96 @@ const ( defaultSystemdParent = "system.slice" ) -func addOCIBindMounts(mountLabel string, containerConfig *pb.ContainerConfig, specgen *generate.Generator) ([]oci.ContainerVolume, error) { +type orderedMounts []rspec.Mount + +// Len returns the number of mounts. Used in sorting. +func (m orderedMounts) Len() int { + return len(m) +} + +// Less returns true if the number of parts (a/b/c would be 3 parts) in the +// mount indexed by parameter 1 is less than that of the mount indexed by +// parameter 2. Used in sorting. +func (m orderedMounts) Less(i, j int) bool { + return m.parts(i) < m.parts(j) +} + +// Swap swaps two items in an array of mounts. Used in sorting +func (m orderedMounts) Swap(i, j int) { + m[i], m[j] = m[j], m[i] +} + +// parts returns the number of parts in the destination of a mount. Used in sorting. +func (m orderedMounts) parts(i int) int { + return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) +} + +func addOCIBindMounts(mountLabel string, containerConfig *pb.ContainerConfig, specgen *generate.Generator) ([]oci.ContainerVolume, []rspec.Mount, error) { volumes := []oci.ContainerVolume{} + ociMounts := []rspec.Mount{} mounts := containerConfig.GetMounts() for _, mount := range mounts { dest := mount.ContainerPath if dest == "" { - return nil, fmt.Errorf("Mount.ContainerPath is empty") + return nil, nil, fmt.Errorf("Mount.ContainerPath is empty") } src := mount.HostPath if src == "" { - return nil, fmt.Errorf("Mount.HostPath is empty") + return nil, nil, fmt.Errorf("Mount.HostPath is empty") } if _, err := os.Stat(src); err != nil && os.IsNotExist(err) { if err1 := os.MkdirAll(src, 0644); err1 != nil { - return nil, fmt.Errorf("Failed to mkdir %s: %s", src, err) + return nil, nil, fmt.Errorf("Failed to mkdir %s: %s", src, err) } } src, err := resolveSymbolicLink(src) if err != nil { - return nil, fmt.Errorf("failed to resolve symlink %q: %v", src, err) + return nil, nil, fmt.Errorf("failed to resolve symlink %q: %v", src, err) } options := []string{"rw"} if mount.Readonly { options = []string{"ro"} } - options = append(options, []string{"rbind", "rprivate"}...) + options = append(options, "rbind") + + // mount propagation + mountInfos, err := dockermounts.GetMounts() + if err != nil { + return nil, nil, err + } + switch mount.GetPropagation() { + case pb.MountPropagation_PROPAGATION_PRIVATE: + options = append(options, "rprivate") + // Since default root propagation in runc is rprivate ignore + // setting the root propagation + case pb.MountPropagation_PROPAGATION_BIDIRECTIONAL: + if err := ensureShared(src, mountInfos); err != nil { + return nil, nil, err + } + options = append(options, "rshared") + specgen.SetLinuxRootPropagation("rshared") + case pb.MountPropagation_PROPAGATION_HOST_TO_CONTAINER: + if err := ensureSharedOrSlave(src, mountInfos); err != nil { + return nil, nil, err + } + options = append(options, "rslave") + if specgen.Spec().Linux.RootfsPropagation != "rshared" && + specgen.Spec().Linux.RootfsPropagation != "rslave" { + specgen.SetLinuxRootPropagation("rslave") + } + default: + logrus.Warnf("Unknown propagation mode for hostPath %q", mount.HostPath) + options = append(options, "rprivate") + } if mount.SelinuxRelabel { // Need a way in kubernetes to determine if the volume is shared or private if err := label.Relabel(src, mountLabel, true); err != nil && err != unix.ENOTSUP { - return nil, fmt.Errorf("relabel failed %s: %v", src, err) + return nil, nil, fmt.Errorf("relabel failed %s: %v", src, err) } } @@ -90,45 +147,123 @@ func addOCIBindMounts(mountLabel string, containerConfig *pb.ContainerConfig, sp Readonly: mount.Readonly, }) - specgen.AddBindMount(src, dest, options) + ociMounts = append(ociMounts, rspec.Mount{ + Source: src, + Destination: dest, + Options: options, + }) } - return volumes, nil + return volumes, ociMounts, nil } -func addImageVolumes(rootfs string, s *Server, containerInfo *storage.ContainerInfo, specgen *generate.Generator, mountLabel string) error { +// Ensure mount point on which path is mounted, is shared. +func ensureShared(path string, mountInfos []*dockermounts.Info) error { + sourceMount, optionalOpts, err := getSourceMount(path, mountInfos) + if err != nil { + return err + } + + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + return nil + } + } + + return fmt.Errorf("path %q is mounted on %q but it is not a shared mount", path, sourceMount) +} + +// Ensure mount point on which path is mounted, is either shared or slave. +func ensureSharedOrSlave(path string, mountInfos []*dockermounts.Info) error { + sourceMount, optionalOpts, err := getSourceMount(path, mountInfos) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + return nil + } else if strings.HasPrefix(opt, "master:") { + return nil + } + } + return fmt.Errorf("path %q is mounted on %q but it is not a shared or slave mount", path, sourceMount) +} + +func getMountInfo(mountInfos []*dockermounts.Info, dir string) *dockermounts.Info { + for _, m := range mountInfos { + if m.Mountpoint == dir { + return m + } + } + return nil +} + +func getSourceMount(source string, mountInfos []*dockermounts.Info) (string, string, error) { + mountinfo := getMountInfo(mountInfos, source) + if mountinfo != nil { + return source, mountinfo.Optional, nil + } + + path := source + for { + path = filepath.Dir(path) + mountinfo = getMountInfo(mountInfos, path) + if mountinfo != nil { + return path, mountinfo.Optional, nil + } + + if path == "/" { + break + } + } + + // If we are here, we did not find parent mount. Something is wrong. + return "", "", fmt.Errorf("Could not find source mount of %s", source) +} + +func addImageVolumes(rootfs string, s *Server, containerInfo *storage.ContainerInfo, specgen *generate.Generator, mountLabel string) ([]rspec.Mount, error) { + mounts := []rspec.Mount{} for dest := range containerInfo.Config.Config.Volumes { fp, err := symlink.FollowSymlinkInScope(filepath.Join(rootfs, dest), rootfs) if err != nil { - return err + return nil, err } switch s.config.ImageVolumes { - case libkpod.ImageVolumesMkdir: + case lib.ImageVolumesMkdir: if err1 := os.MkdirAll(fp, 0644); err1 != nil { - return err1 + return nil, err1 } - case libkpod.ImageVolumesBind: + case lib.ImageVolumesBind: volumeDirName := stringid.GenerateNonCryptoID() src := filepath.Join(containerInfo.RunDir, "mounts", volumeDirName) if err1 := os.MkdirAll(src, 0644); err1 != nil { - return err1 + return nil, err1 } // Label the source with the sandbox selinux mount label if mountLabel != "" { if err1 := label.Relabel(src, mountLabel, true); err1 != nil && err1 != unix.ENOTSUP { - return fmt.Errorf("relabel failed %s: %v", src, err1) + return nil, fmt.Errorf("relabel failed %s: %v", src, err1) } } logrus.Debugf("Adding bind mounted volume: %s to %s", src, dest) - specgen.AddBindMount(src, dest, []string{"rw"}) - case libkpod.ImageVolumesIgnore: + mounts = append(mounts, rspec.Mount{ + Source: src, + Destination: dest, + Options: []string{"rw"}, + }) + + case lib.ImageVolumesIgnore: logrus.Debugf("Ignoring volume %v", dest) default: logrus.Fatalf("Unrecognized image volumes setting") } } - return nil + return mounts, nil } // resolveSymbolicLink resolves a possbile symlink path. If the path is a symlink, returns resolved @@ -146,7 +281,7 @@ func resolveSymbolicLink(path string) (string, error) { func addDevices(sb *sandbox.Sandbox, containerConfig *pb.ContainerConfig, specgen *generate.Generator) error { sp := specgen.Spec() - if containerConfig.GetLinux().GetSecurityContext().Privileged { + if containerConfig.GetLinux().GetSecurityContext().GetPrivileged() { hostDevices, err := devices.HostDevices() if err != nil { return err @@ -288,18 +423,21 @@ func buildOCIProcessArgs(containerKubeConfig *pb.ContainerConfig, imageOCIConfig } // addOCIHook look for hooks programs installed in hooksDirPath and add them to spec -func addOCIHook(specgen *generate.Generator, hook libkpod.HookParams) error { +func addOCIHook(specgen *generate.Generator, hook lib.HookParams) error { logrus.Debugf("AddOCIHook", hook) for _, stage := range hook.Stage { + h := rspec.Hook{ + Path: hook.Hook, + Args: append([]string{hook.Hook}, hook.Arguments...), + Env: []string{fmt.Sprintf("stage=%s", stage)}, + } switch stage { case "prestart": - specgen.AddPreStartHook(hook.Hook, []string{hook.Hook, "prestart"}) - + specgen.AddPreStartHook(h) case "poststart": - specgen.AddPostStartHook(hook.Hook, []string{hook.Hook, "poststart"}) - + specgen.AddPostStartHook(h) case "poststop": - specgen.AddPostStopHook(hook.Hook, []string{hook.Hook, "poststop"}) + specgen.AddPostStopHook(h) } } return nil @@ -352,6 +490,110 @@ func setupContainerUser(specgen *generate.Generator, rootfs string, sc *pb.Linux return nil } +// setupCapabilities sets process.capabilities in the OCI runtime config. +func setupCapabilities(specgen *generate.Generator, capabilities *pb.Capability) error { + if capabilities == nil { + return nil + } + + toCAPPrefixed := func(cap string) string { + if !strings.HasPrefix(strings.ToLower(cap), "cap_") { + return "CAP_" + strings.ToUpper(cap) + } + return cap + } + + // Add/drop all capabilities if "all" is specified, so that + // following individual add/drop could still work. E.g. + // AddCapabilities: []string{"ALL"}, DropCapabilities: []string{"CHOWN"} + // will be all capabilities without `CAP_CHOWN`. + // see https://github.com/kubernetes/kubernetes/issues/51980 + if inStringSlice(capabilities.GetAddCapabilities(), "ALL") { + for _, c := range getOCICapabilitiesList() { + if err := specgen.AddProcessCapabilityAmbient(c); err != nil { + return err + } + if err := specgen.AddProcessCapabilityBounding(c); err != nil { + return err + } + if err := specgen.AddProcessCapabilityEffective(c); err != nil { + return err + } + if err := specgen.AddProcessCapabilityInheritable(c); err != nil { + return err + } + if err := specgen.AddProcessCapabilityPermitted(c); err != nil { + return err + } + } + } + if inStringSlice(capabilities.GetDropCapabilities(), "ALL") { + for _, c := range getOCICapabilitiesList() { + if err := specgen.DropProcessCapabilityAmbient(c); err != nil { + return err + } + if err := specgen.DropProcessCapabilityBounding(c); err != nil { + return err + } + if err := specgen.DropProcessCapabilityEffective(c); err != nil { + return err + } + if err := specgen.DropProcessCapabilityInheritable(c); err != nil { + return err + } + if err := specgen.DropProcessCapabilityPermitted(c); err != nil { + return err + } + } + } + + for _, cap := range capabilities.GetAddCapabilities() { + if strings.ToUpper(cap) == "ALL" { + continue + } + capPrefixed := toCAPPrefixed(cap) + if err := specgen.AddProcessCapabilityAmbient(capPrefixed); err != nil { + return err + } + if err := specgen.AddProcessCapabilityBounding(capPrefixed); err != nil { + return err + } + if err := specgen.AddProcessCapabilityEffective(capPrefixed); err != nil { + return err + } + if err := specgen.AddProcessCapabilityInheritable(capPrefixed); err != nil { + return err + } + if err := specgen.AddProcessCapabilityPermitted(capPrefixed); err != nil { + return err + } + } + + for _, cap := range capabilities.GetDropCapabilities() { + if strings.ToUpper(cap) == "ALL" { + continue + } + capPrefixed := toCAPPrefixed(cap) + if err := specgen.DropProcessCapabilityAmbient(capPrefixed); err != nil { + return fmt.Errorf("failed to drop cap %s %v", capPrefixed, err) + } + if err := specgen.DropProcessCapabilityBounding(capPrefixed); err != nil { + return fmt.Errorf("failed to drop cap %s %v", capPrefixed, err) + } + if err := specgen.DropProcessCapabilityEffective(capPrefixed); err != nil { + return fmt.Errorf("failed to drop cap %s %v", capPrefixed, err) + } + if err := specgen.DropProcessCapabilityInheritable(capPrefixed); err != nil { + return fmt.Errorf("failed to drop cap %s %v", capPrefixed, err) + } + if err := specgen.DropProcessCapabilityPermitted(capPrefixed); err != nil { + return fmt.Errorf("failed to drop cap %s %v", capPrefixed, err) + } + } + + return nil +} + func hostNetwork(containerConfig *pb.ContainerConfig) bool { securityContext := containerConfig.GetLinux().GetSecurityContext() if securityContext == nil || securityContext.GetNamespaceOptions() == nil { @@ -384,8 +626,23 @@ func ensureSaneLogPath(logPath string) error { return nil } +// addSecretsBindMounts mounts user defined secrets to the container +func addSecretsBindMounts(mountLabel, ctrRunDir string, defaultMounts []string, specgen generate.Generator) ([]rspec.Mount, error) { + containerMounts := specgen.Spec().Mounts + mounts, err := secretMounts(defaultMounts, mountLabel, ctrRunDir, containerMounts) + if err != nil { + return nil, err + } + return mounts, nil +} + // CreateContainer creates a new container in specified PodSandbox func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (res *pb.CreateContainerResponse, err error) { + const operation = "create_container" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() logrus.Debugf("CreateContainerRequest %+v", req) s.updateLock.RLock() @@ -412,7 +669,11 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig is nil") } - name := containerConfig.GetMetadata().Name + if containerConfig.GetMetadata() == nil { + return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Metadata is nil") + } + + name := containerConfig.GetMetadata().GetName() if name == "" { return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Name is empty") } @@ -465,7 +726,7 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq func (s *Server) setupOCIHooks(specgen *generate.Generator, sb *sandbox.Sandbox, containerConfig *pb.ContainerConfig, command string) error { mounts := containerConfig.GetMounts() addedHooks := map[string]struct{}{} - addHook := func(hook libkpod.HookParams) error { + addHook := func(hook lib.HookParams) error { // Only add a hook once if _, ok := addedHooks[hook.Hook]; !ok { if err := addOCIHook(specgen, hook); err != nil { @@ -549,7 +810,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, } } - containerVolumes, err := addOCIBindMounts(mountLabel, containerConfig, &specgen) + containerVolumes, ociMounts, err := addOCIBindMounts(mountLabel, containerConfig, &specgen) if err != nil { return nil, err } @@ -560,8 +821,14 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, } specgen.AddAnnotation(annotations.Volumes, string(volumesJSON)) + mnt := rspec.Mount{ + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"nosuid", "noexec", "nodev", "relatime", "ro"}, + } // Add cgroup mount so container process can introspect its own limits - specgen.AddCgroupsMount("ro") + specgen.AddMount(mnt) if err := addDevices(sb, containerConfig, &specgen); err != nil { return nil, err @@ -569,6 +836,10 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, labels := containerConfig.GetLabels() + if err := validateLabels(labels); err != nil { + return nil, err + } + metadata := containerConfig.GetMetadata() kubeAnnotations := containerConfig.GetAnnotations() @@ -585,7 +856,8 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, // set this container's apparmor profile if it is set by sandbox if s.appArmorEnabled && !privileged { - appArmorProfileName := s.getAppArmorProfileName(sb.Annotations(), metadata.Name) + + appArmorProfileName := s.getAppArmorProfileName(containerConfig.GetLinux().GetSecurityContext().GetApparmorProfile()) if appArmorProfileName != "" { // reload default apparmor profile if it is unloaded. if s.appArmorProfile == apparmor.DefaultApparmorProfile { @@ -596,6 +868,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, specgen.SetProcessApparmorProfile(appArmorProfileName) } + } logPath := containerConfig.LogPath @@ -629,28 +902,13 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, if linux != nil { resources := linux.GetResources() if resources != nil { - cpuPeriod := resources.CpuPeriod - if cpuPeriod != 0 { - specgen.SetLinuxResourcesCPUPeriod(uint64(cpuPeriod)) - } - - cpuQuota := resources.CpuQuota - if cpuQuota != 0 { - specgen.SetLinuxResourcesCPUQuota(cpuQuota) - } - - cpuShares := resources.CpuShares - if cpuShares != 0 { - specgen.SetLinuxResourcesCPUShares(uint64(cpuShares)) - } - - memoryLimit := resources.MemoryLimitInBytes - if memoryLimit != 0 { - specgen.SetLinuxResourcesMemoryLimit(memoryLimit) - } - - oomScoreAdj := resources.OomScoreAdj - specgen.SetProcessOOMScoreAdj(int(oomScoreAdj)) + specgen.SetLinuxResourcesCPUPeriod(uint64(resources.GetCpuPeriod())) + specgen.SetLinuxResourcesCPUQuota(resources.GetCpuQuota()) + specgen.SetLinuxResourcesCPUShares(uint64(resources.GetCpuShares())) + specgen.SetLinuxResourcesMemoryLimit(resources.GetMemoryLimitInBytes()) + specgen.SetProcessOOMScoreAdj(int(resources.GetOomScoreAdj())) + specgen.SetLinuxResourcesCPUCpus(resources.GetCpusetCpus()) + specgen.SetLinuxResourcesCPUMems(resources.GetCpusetMems()) } var cgPath string @@ -669,61 +927,18 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, } specgen.SetLinuxCgroupsPath(cgPath) - capabilities := linux.GetSecurityContext().GetCapabilities() if privileged { - // this is setting correct capabilities as well for privileged mode specgen.SetupPrivileged(true) setOCIBindMountsPrivileged(&specgen) } else { - toCAPPrefixed := func(cap string) string { - if !strings.HasPrefix(strings.ToLower(cap), "cap_") { - return "CAP_" + strings.ToUpper(cap) - } - return cap - } - - // Add/drop all capabilities if "all" is specified, so that - // following individual add/drop could still work. E.g. - // AddCapabilities: []string{"ALL"}, DropCapabilities: []string{"CHOWN"} - // will be all capabilities without `CAP_CHOWN`. - // see https://github.com/kubernetes/kubernetes/issues/51980 - if inStringSlice(capabilities.GetAddCapabilities(), "ALL") { - for _, c := range getOCICapabilitiesList() { - if err := specgen.AddProcessCapability(c); err != nil { - return nil, err - } - } - } - if inStringSlice(capabilities.GetDropCapabilities(), "ALL") { - for _, c := range getOCICapabilitiesList() { - if err := specgen.DropProcessCapability(c); err != nil { - return nil, err - } - } - } - - if capabilities != nil { - for _, cap := range capabilities.GetAddCapabilities() { - if strings.ToUpper(cap) == "ALL" { - continue - } - if err := specgen.AddProcessCapability(toCAPPrefixed(cap)); err != nil { - return nil, err - } - } - - for _, cap := range capabilities.GetDropCapabilities() { - if strings.ToUpper(cap) == "ALL" { - continue - } - if err := specgen.DropProcessCapability(toCAPPrefixed(cap)); err != nil { - return nil, fmt.Errorf("failed to drop cap %s %v", toCAPPrefixed(cap), err) - } - } + err = setupCapabilities(&specgen, linux.GetSecurityContext().GetCapabilities()) + if err != nil { + return nil, err } } specgen.SetProcessSelinuxLabel(processLabel) specgen.SetLinuxMountLabel(mountLabel) + specgen.SetProcessNoNewPrivileges(linux.GetSecurityContext().GetNoNewPrivs()) if containerConfig.GetLinux().GetSecurityContext() != nil && !containerConfig.GetLinux().GetSecurityContext().Privileged { @@ -733,6 +948,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, "/proc/timer_list", "/proc/timer_stats", "/proc/sched_debug", + "/proc/scsi", "/sys/firmware", } { specgen.AddLinuxMaskedPaths(mp) @@ -756,10 +972,26 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, logrus.Debugf("pod container state %+v", podInfraState) ipcNsPath := fmt.Sprintf("/proc/%d/ns/ipc", podInfraState.Pid) - if err := specgen.AddOrReplaceLinuxNamespace("ipc", ipcNsPath); err != nil { + if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.IPCNamespace), ipcNsPath); err != nil { return nil, err } + utsNsPath := fmt.Sprintf("/proc/%d/ns/uts", podInfraState.Pid) + if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.UTSNamespace), utsNsPath); err != nil { + return nil, err + } + + if containerConfig.GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostPid() { + // kubernetes PodSpec specify to use Host PID namespace + specgen.RemoveLinuxNamespace(string(rspec.PIDNamespace)) + } else if s.config.EnableSharedPIDNamespace { + // share Pod PID namespace + pidNsPath := fmt.Sprintf("/proc/%d/ns/pid", podInfraState.Pid) + if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.PIDNamespace), pidNsPath); err != nil { + return nil, err + } + } + netNsPath := sb.NetNsPath() if netNsPath == "" { // The sandbox does not have a permanent namespace, @@ -767,7 +999,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, netNsPath = fmt.Sprintf("/proc/%d/ns/net", podInfraState.Pid) } - if err := specgen.AddOrReplaceLinuxNamespace("network", netNsPath); err != nil { + if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.NetworkNamespace), netNsPath); err != nil { return nil, err } @@ -782,53 +1014,37 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, } images, err := s.StorageImageServer().ResolveNames(image) if err != nil { - // This means we got an image ID - if strings.Contains(err.Error(), "cannot specify 64-byte hexadecimal strings") { + if err == storage.ErrCannotParseImageID { images = append(images, image) } else { return nil, err } } - image = images[0] - // Get imageName and imageRef that are requested in container status - imageName := image - status, err := s.StorageImageServer().ImageStatus(s.ImageContext(), image) + // Get imageName and imageRef that are later requested in container status + status, err := s.StorageImageServer().ImageStatus(s.ImageContext(), images[0]) if err != nil { return nil, err } - + imageName := status.Name imageRef := status.ID - // - // TODO: https://github.com/kubernetes-incubator/cri-o/issues/531 - // - //for _, n := range status.Names { - //r, err := reference.ParseNormalizedNamed(n) - //if err != nil { - //return nil, fmt.Errorf("failed to normalize image name for ImageRef: %v", err) - //} - //if digested, isDigested := r.(reference.Canonical); isDigested { - //imageRef = reference.FamiliarString(digested) - //break - //} - //} - for _, n := range status.Names { - r, err := reference.ParseNormalizedNamed(n) - if err != nil { - return nil, fmt.Errorf("failed to normalize image name for Image: %v", err) - } - if tagged, isTagged := r.(reference.Tagged); isTagged { - imageName = reference.FamiliarString(tagged) - break - } + if len(status.RepoDigests) > 0 { + imageRef = status.RepoDigests[0] } + specgen.AddAnnotation(annotations.Image, image) specgen.AddAnnotation(annotations.ImageName, imageName) specgen.AddAnnotation(annotations.ImageRef, imageRef) specgen.AddAnnotation(annotations.IP, sb.IP()) + mnt = rspec.Mount{ + Type: "bind", + Source: sb.ShmPath(), + Destination: "/etc/shm", + Options: []string{"rw", "bind"}, + } // bind mount the pod shm - specgen.AddBindMount(sb.ShmPath(), "/dev/shm", []string{"rw"}) + specgen.AddMount(mnt) options := []string{"rw"} if readOnlyRootfs { @@ -839,8 +1055,14 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, return nil, err } + mnt = rspec.Mount{ + Type: "bind", + Source: sb.ResolvPath(), + Destination: "/etc/resolv.conf", + Options: append(options, "bind"), + } // bind mount the pod resolver file - specgen.AddBindMount(sb.ResolvPath(), "/etc/resolv.conf", options) + specgen.AddMount(mnt) } if sb.HostnamePath() != "" { @@ -848,15 +1070,29 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, return nil, err } - specgen.AddBindMount(sb.HostnamePath(), "/etc/hostname", options) + mnt = rspec.Mount{ + Type: "bind", + Source: sb.HostnamePath(), + Destination: "/etc/hostname", + Options: append(options, "bind"), + } + specgen.AddMount(mnt) } // Bind mount /etc/hosts for host networking containers if hostNetwork(containerConfig) { - specgen.AddBindMount("/etc/hosts", "/etc/hosts", options) + mnt = rspec.Mount{ + Type: "bind", + Source: "/etc/hosts", + Destination: "/etc/hosts", + Options: append(options, "bind"), + } + specgen.AddMount(mnt) } + // Set hostname and add env for hostname specgen.SetHostname(sb.Hostname()) + specgen.AddProcessEnv("HOSTNAME", sb.Hostname()) specgen.AddAnnotation(annotations.Name, containerName) specgen.AddAnnotation(annotations.ContainerID, containerID) @@ -867,7 +1103,6 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, specgen.AddAnnotation(annotations.TTY, fmt.Sprintf("%v", containerConfig.Tty)) specgen.AddAnnotation(annotations.Stdin, fmt.Sprintf("%v", containerConfig.Stdin)) specgen.AddAnnotation(annotations.StdinOnce, fmt.Sprintf("%v", containerConfig.StdinOnce)) - specgen.AddAnnotation(annotations.Image, image) specgen.AddAnnotation(annotations.ResolvPath, sb.InfraContainer().CrioAnnotations()[annotations.ResolvPath]) created := time.Now() @@ -891,17 +1126,19 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, } specgen.AddAnnotation(annotations.Annotations, string(kubeAnnotationsJSON)) - metaname := metadata.Name + spp := containerConfig.GetLinux().GetSecurityContext().GetSeccompProfilePath() if !privileged { - if err = s.setupSeccomp(&specgen, metaname, sb.Annotations()); err != nil { + if err = s.setupSeccomp(&specgen, spp); err != nil { return nil, err } } + specgen.AddAnnotation(annotations.SeccompProfilePath, spp) + metaname := metadata.Name attempt := metadata.Attempt containerInfo, err := s.StorageRuntimeServer().CreateContainer(s.ImageContext(), sb.Name(), sb.ID(), - image, image, + image, status.ID, containerName, containerID, metaname, attempt, @@ -910,6 +1147,14 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, if err != nil { return nil, err } + defer func() { + if err != nil { + err2 := s.StorageRuntimeServer().DeleteContainer(containerInfo.ID) + if err2 != nil { + logrus.Warnf("Failed to cleanup container directory: %v", err2) + } + } + }() mountPoint, err := s.StorageRuntimeServer().StartContainer(containerID) if err != nil { @@ -919,7 +1164,8 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, containerImageConfig := containerInfo.Config if containerImageConfig == nil { - return nil, fmt.Errorf("empty image config for %s", image) + err = fmt.Errorf("empty image config for %s", image) + return nil, err } if containerImageConfig.Config.StopSignal != "" { @@ -928,7 +1174,8 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, } // Add image volumes - if err := addImageVolumes(mountPoint, s, &containerInfo, &specgen, mountLabel); err != nil { + volumeMounts, err := addImageVolumes(mountPoint, s, &containerInfo, &specgen, mountLabel) + if err != nil { return nil, err } @@ -938,30 +1185,10 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, } specgen.SetProcessArgs(processArgs) - // Add environment variables from CRI and image config - envs := containerConfig.GetEnvs() - if envs != nil { - for _, item := range envs { - key := item.Key - value := item.Value - if key == "" { - continue - } - specgen.AddProcessEnv(key, value) - } - } - if containerImageConfig != nil { - for _, item := range containerImageConfig.Config.Env { - parts := strings.SplitN(item, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid env from image: %s", item) - } - - if parts[0] == "" { - continue - } - specgen.AddProcessEnv(parts[0], parts[1]) - } + envs := mergeEnvs(containerImageConfig, containerConfig.GetEnvs()) + for _, e := range envs { + parts := strings.SplitN(e, "=", 2) + specgen.AddProcessEnv(parts[0], parts[1]) } // Set working directory @@ -978,6 +1205,38 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, containerCwd = runtimeCwd } specgen.SetProcessCwd(containerCwd) + if err := setupWorkingDirectory(mountPoint, mountLabel, containerCwd); err != nil { + if err1 := s.StorageRuntimeServer().StopContainer(containerID); err1 != nil { + return nil, fmt.Errorf("can't umount container after cwd error %v: %v", err, err1) + } + return nil, err + } + + var secretMounts []rspec.Mount + if len(s.config.DefaultMounts) > 0 { + var err error + secretMounts, err = addSecretsBindMounts(mountLabel, containerInfo.RunDir, s.config.DefaultMounts, specgen) + if err != nil { + return nil, fmt.Errorf("failed to mount secrets: %v", err) + } + } + + mounts := []rspec.Mount{} + mounts = append(mounts, ociMounts...) + mounts = append(mounts, volumeMounts...) + mounts = append(mounts, secretMounts...) + + sort.Sort(orderedMounts(mounts)) + + for _, m := range mounts { + mnt = rspec.Mount{ + Type: "bind", + Source: m.Source, + Destination: m.Destination, + Options: append(m.Options, "bind"), + } + specgen.AddMount(mnt) + } if err := s.setupOCIHooks(&specgen, sb, containerConfig, processArgs[0]); err != nil { return nil, err @@ -1013,7 +1272,9 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, if err != nil { return nil, err } + container.SetSpec(specgen.Spec()) container.SetMountPoint(mountPoint) + container.SetSeccompProfilePath(spp) for _, cv := range containerVolumes { container.AddVolume(cv) @@ -1022,14 +1283,11 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, return container, nil } -func (s *Server) setupSeccomp(specgen *generate.Generator, cname string, sbAnnotations map[string]string) error { - profile, ok := sbAnnotations["container.seccomp.security.alpha.kubernetes.io/"+cname] - if !ok { - profile, ok = sbAnnotations["seccomp.security.alpha.kubernetes.io/pod"] - if !ok { - // running w/o seccomp, aka unconfined - profile = seccompUnconfined - } +func (s *Server) setupSeccomp(specgen *generate.Generator, profile string) error { + if profile == "" { + // running w/o seccomp, aka unconfined + specgen.Spec().Linux.Seccomp = nil + return nil } if !s.seccompEnabled { if profile != seccompUnconfined { @@ -1048,14 +1306,16 @@ func (s *Server) setupSeccomp(specgen *generate.Generator, cname string, sbAnnot if !strings.HasPrefix(profile, seccompLocalhostPrefix) { return fmt.Errorf("unknown seccomp profile option: %q", profile) } - // FIXME: https://github.com/kubernetes/kubernetes/issues/39128 - return nil + fname := strings.TrimPrefix(profile, "localhost/") + file, err := ioutil.ReadFile(filepath.FromSlash(fname)) + if err != nil { + return fmt.Errorf("cannot load seccomp profile %q: %v", fname, err) + } + return seccomp.LoadProfileFromBytes(file, specgen) } // getAppArmorProfileName gets the profile name for the given container. -func (s *Server) getAppArmorProfileName(annotations map[string]string, ctrName string) string { - profile := apparmor.GetProfileNameFromPodAnnotations(annotations, ctrName) - +func (s *Server) getAppArmorProfileName(profile string) string { if profile == "" { return "" } @@ -1135,3 +1395,19 @@ func clearReadOnly(m *rspec.Mount) { } m.Options = opt } + +func setupWorkingDirectory(rootfs, mountLabel, containerCwd string) error { + fp, err := symlink.FollowSymlinkInScope(filepath.Join(rootfs, containerCwd), rootfs) + if err != nil { + return err + } + if err := os.MkdirAll(fp, 0755); err != nil { + return err + } + if mountLabel != "" { + if err1 := label.Relabel(fp, mountLabel, true); err1 != nil && err1 != unix.ENOTSUP { + return fmt.Errorf("relabel failed %s: %v", fp, err1) + } + } + return nil +} diff --git a/server/container_exec.go b/server/container_exec.go index 0cdb9579..3bb37749 100644 --- a/server/container_exec.go +++ b/server/container_exec.go @@ -5,6 +5,7 @@ import ( "io" "os" "os/exec" + "time" "github.com/docker/docker/pkg/pools" "github.com/kubernetes-incubator/cri-o/oci" @@ -13,15 +14,21 @@ import ( "k8s.io/client-go/tools/remotecommand" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - utilexec "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/term" + utilexec "k8s.io/utils/exec" ) // Exec prepares a streaming endpoint to execute a command in the container. -func (s *Server) Exec(ctx context.Context, req *pb.ExecRequest) (*pb.ExecResponse, error) { +func (s *Server) Exec(ctx context.Context, req *pb.ExecRequest) (resp *pb.ExecResponse, err error) { + const operation = "exec" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("ExecRequest %+v", req) - resp, err := s.GetExec(req) + resp, err = s.GetExec(req) if err != nil { return nil, fmt.Errorf("unable to prepare exec endpoint") } @@ -46,12 +53,15 @@ func (ss streamService) Exec(containerID string, cmd []string, stdin io.Reader, return fmt.Errorf("container is not created or running") } - args := []string{"exec"} - if tty { - args = append(args, "-t") + processFile, err := oci.PrepareProcessExec(c, cmd, tty) + if err != nil { + return err } + defer os.RemoveAll(processFile.Name()) + + args := []string{"exec"} + args = append(args, "--process", processFile.Name()) args = append(args, c.ID()) - args = append(args, cmd...) execCmd := exec.Command(ss.runtimeServer.Runtime().Path(c), args...) var cmdErr error if tty { diff --git a/server/container_execsync.go b/server/container_execsync.go index 35f7896c..4d7b6718 100644 --- a/server/container_execsync.go +++ b/server/container_execsync.go @@ -2,6 +2,7 @@ package server import ( "fmt" + "time" "github.com/kubernetes-incubator/cri-o/oci" "github.com/sirupsen/logrus" @@ -10,7 +11,12 @@ import ( ) // ExecSync runs a command in a container synchronously. -func (s *Server) ExecSync(ctx context.Context, req *pb.ExecSyncRequest) (*pb.ExecSyncResponse, error) { +func (s *Server) ExecSync(ctx context.Context, req *pb.ExecSyncRequest) (resp *pb.ExecSyncResponse, err error) { + const operation = "exec_sync" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() logrus.Debugf("ExecSyncRequest %+v", req) c, err := s.GetContainerFromRequest(req.ContainerId) if err != nil { @@ -35,7 +41,7 @@ func (s *Server) ExecSync(ctx context.Context, req *pb.ExecSyncRequest) (*pb.Exe if err != nil { return nil, err } - resp := &pb.ExecSyncResponse{ + resp = &pb.ExecSyncResponse{ Stdout: execResp.Stdout, Stderr: execResp.Stderr, ExitCode: execResp.ExitCode, diff --git a/server/container_list.go b/server/container_list.go index 995b7e1b..060fa2af 100644 --- a/server/container_list.go +++ b/server/container_list.go @@ -1,6 +1,8 @@ package server import ( + "time" + "github.com/kubernetes-incubator/cri-o/oci" "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -27,41 +29,53 @@ func filterContainer(c *pb.Container, filter *pb.ContainerFilter) bool { } // ListContainers lists all containers by filters. -func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersRequest) (*pb.ListContainersResponse, error) { +func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersRequest) (resp *pb.ListContainersResponse, err error) { + const operation = "list_containers" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() logrus.Debugf("ListContainersRequest %+v", req) + var ctrs []*pb.Container - filter := req.Filter + filter := req.GetFilter() ctrList, err := s.ContainerServer.ListContainers() if err != nil { return nil, err } - // Filter using container id and pod id first. - if filter.Id != "" { - id, err := s.CtrIDIndex().Get(filter.Id) - if err != nil { - return nil, err - } - c := s.ContainerServer.GetContainer(id) - if c != nil { - if filter.PodSandboxId != "" { - if c.Sandbox() == filter.PodSandboxId { - ctrList = []*oci.Container{c} - } else { - ctrList = []*oci.Container{} - } + if filter != nil { - } else { - ctrList = []*oci.Container{c} + // Filter using container id and pod id first. + if filter.Id != "" { + id, err := s.CtrIDIndex().Get(filter.Id) + if err != nil { + // If we don't find a container ID with a filter, it should not + // be considered an error. Log a warning and return an empty struct + logrus.Warn("unable to find container ID %s", filter.Id) + return &pb.ListContainersResponse{}, nil } - } - } else { - if filter.PodSandboxId != "" { - pod := s.ContainerServer.GetSandbox(filter.PodSandboxId) - if pod == nil { - ctrList = []*oci.Container{} - } else { - ctrList = pod.Containers().List() + c := s.ContainerServer.GetContainer(id) + if c != nil { + if filter.PodSandboxId != "" { + if c.Sandbox() == filter.PodSandboxId { + ctrList = []*oci.Container{c} + } else { + ctrList = []*oci.Container{} + } + + } else { + ctrList = []*oci.Container{c} + } + } + } else { + if filter.PodSandboxId != "" { + pod := s.ContainerServer.GetSandbox(filter.PodSandboxId) + if pod == nil { + ctrList = []*oci.Container{} + } else { + ctrList = pod.Containers().List() + } } } } @@ -83,6 +97,7 @@ func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersReque Metadata: ctr.Metadata(), Annotations: ctr.Annotations(), Image: img, + ImageRef: ctr.ImageRef(), } switch cState.Status { @@ -101,7 +116,7 @@ func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersReque } } - resp := &pb.ListContainersResponse{ + resp = &pb.ListContainersResponse{ Containers: ctrs, } logrus.Debugf("ListContainersResponse: %+v", resp) diff --git a/server/container_portforward.go b/server/container_portforward.go index 97dd5342..38d33bea 100644 --- a/server/container_portforward.go +++ b/server/container_portforward.go @@ -6,6 +6,7 @@ import ( "io" "os/exec" "strings" + "time" "github.com/docker/docker/pkg/pools" "github.com/kubernetes-incubator/cri-o/oci" @@ -15,11 +16,15 @@ import ( ) // PortForward prepares a streaming endpoint to forward ports from a PodSandbox. -func (s *Server) PortForward(ctx context.Context, req *pb.PortForwardRequest) (*pb.PortForwardResponse, error) { +func (s *Server) PortForward(ctx context.Context, req *pb.PortForwardRequest) (resp *pb.PortForwardResponse, err error) { + const operation = "port_forward" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() logrus.Debugf("PortForwardRequest %+v", req) - resp, err := s.GetPortForward(req) - + resp, err = s.GetPortForward(req) if err != nil { return nil, fmt.Errorf("unable to prepare portforward endpoint") } diff --git a/server/container_remove.go b/server/container_remove.go index cedfc602..d29e9fb2 100644 --- a/server/container_remove.go +++ b/server/container_remove.go @@ -1,6 +1,8 @@ package server import ( + "time" + "github.com/sirupsen/logrus" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" @@ -8,13 +10,20 @@ import ( // RemoveContainer removes the container. If the container is running, the container // should be force removed. -func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*pb.RemoveContainerResponse, error) { - _, err := s.ContainerServer.Remove(req.ContainerId, true) +func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (resp *pb.RemoveContainerResponse, err error) { + const operation = "remove_container" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("RemoveContainerRequest: %+v", req) + + _, err = s.ContainerServer.Remove(ctx, req.ContainerId, true) if err != nil { return nil, err } - resp := &pb.RemoveContainerResponse{} + resp = &pb.RemoveContainerResponse{} logrus.Debugf("RemoveContainerResponse: %+v", resp) return resp, nil } diff --git a/server/container_start.go b/server/container_start.go index 85be0948..b4dd222f 100644 --- a/server/container_start.go +++ b/server/container_start.go @@ -2,6 +2,7 @@ package server import ( "fmt" + "time" "github.com/kubernetes-incubator/cri-o/oci" "github.com/sirupsen/logrus" @@ -10,7 +11,12 @@ import ( ) // StartContainer starts the container. -func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (*pb.StartContainerResponse, error) { +func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (resp *pb.StartContainerResponse, err error) { + const operation = "start_container" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() logrus.Debugf("StartContainerRequest %+v", req) c, err := s.GetContainerFromRequest(req.ContainerId) if err != nil { @@ -37,7 +43,7 @@ func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerReque return nil, fmt.Errorf("failed to start container %s: %v", c.ID(), err) } - resp := &pb.StartContainerResponse{} + resp = &pb.StartContainerResponse{} logrus.Debugf("StartContainerResponse %+v", resp) return resp, nil } diff --git a/server/container_stats.go b/server/container_stats.go index 22b87c45..17df31ad 100644 --- a/server/container_stats.go +++ b/server/container_stats.go @@ -2,6 +2,7 @@ package server import ( "fmt" + "time" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" @@ -9,6 +10,11 @@ import ( // ContainerStats returns stats of the container. If the container does not // exist, the call returns an error. -func (s *Server) ContainerStats(ctx context.Context, req *pb.ContainerStatsRequest) (*pb.ContainerStatsResponse, error) { +func (s *Server) ContainerStats(ctx context.Context, req *pb.ContainerStatsRequest) (resp *pb.ContainerStatsResponse, err error) { + const operation = "container_stats" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() return nil, fmt.Errorf("not implemented") } diff --git a/server/container_stats_list.go b/server/container_stats_list.go index 92922099..2c564714 100644 --- a/server/container_stats_list.go +++ b/server/container_stats_list.go @@ -2,12 +2,18 @@ package server import ( "fmt" + "time" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) // ListContainerStats returns stats of all running containers. -func (s *Server) ListContainerStats(ctx context.Context, req *pb.ListContainerStatsRequest) (*pb.ListContainerStatsResponse, error) { +func (s *Server) ListContainerStats(ctx context.Context, req *pb.ListContainerStatsRequest) (resp *pb.ListContainerStatsResponse, err error) { + const operation = "list_container_stats" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() return nil, fmt.Errorf("not implemented") } diff --git a/server/container_status.go b/server/container_status.go index b4684c9c..3b84468f 100644 --- a/server/container_status.go +++ b/server/container_status.go @@ -1,6 +1,9 @@ package server import ( + "time" + + "github.com/containers/image/types" "github.com/kubernetes-incubator/cri-o/oci" "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -14,7 +17,12 @@ const ( ) // ContainerStatus returns status of the container. -func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (*pb.ContainerStatusResponse, error) { +func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (resp *pb.ContainerStatusResponse, err error) { + const operation = "container_status" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() logrus.Debugf("ContainerStatusRequest %+v", req) c, err := s.GetContainerFromRequest(req.ContainerId) if err != nil { @@ -22,7 +30,7 @@ func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusReq } containerID := c.ID() - resp := &pb.ContainerStatusResponse{ + resp = &pb.ContainerStatusResponse{ Status: &pb.ContainerStatus{ Id: containerID, Metadata: c.Metadata(), @@ -31,7 +39,10 @@ func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusReq ImageRef: c.ImageRef(), }, } - resp.Status.Image = &pb.ImageSpec{Image: c.ImageName()} + resp.Status.Image = &pb.ImageSpec{Image: c.Image()} + if status, err := s.StorageImageServer().ImageStatus(&types.SystemContext{}, c.ImageRef()); err == nil { + resp.Status.Image.Image = status.Name + } mounts := []*pb.Mount{} for _, cv := range c.Volumes() { diff --git a/server/container_stop.go b/server/container_stop.go index c0093cfd..6846f90d 100644 --- a/server/container_stop.go +++ b/server/container_stop.go @@ -1,19 +1,28 @@ package server import ( + "time" + "github.com/sirupsen/logrus" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) // StopContainer stops a running container with a grace period (i.e., timeout). -func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest) (*pb.StopContainerResponse, error) { - _, err := s.ContainerServer.ContainerStop(req.ContainerId, req.Timeout) +func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest) (resp *pb.StopContainerResponse, err error) { + const operation = "stop_container" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("StopContainerRequest %+v", req) + + _, err = s.ContainerServer.ContainerStop(ctx, req.ContainerId, req.Timeout) if err != nil { return nil, err } - resp := &pb.StopContainerResponse{} + resp = &pb.StopContainerResponse{} logrus.Debugf("StopContainerResponse %s: %+v", req.ContainerId, resp) return resp, nil } diff --git a/server/container_update_resources.go b/server/container_update_resources.go new file mode 100644 index 00000000..c58f3484 --- /dev/null +++ b/server/container_update_resources.go @@ -0,0 +1,55 @@ +package server + +import ( + "fmt" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/kubernetes-incubator/cri-o/oci" + rspec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" + "golang.org/x/net/context" + pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" +) + +// UpdateContainerResources updates ContainerConfig of the container. +func (s *Server) UpdateContainerResources(ctx context.Context, req *pb.UpdateContainerResourcesRequest) (resp *pb.UpdateContainerResourcesResponse, err error) { + const operation = "update_container_resources" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("UpdateContainerResources %+v", req) + + c, err := s.GetContainerFromRequest(req.GetContainerId()) + if err != nil { + return nil, err + } + state := s.Runtime().ContainerStatus(c) + if !(state.Status == oci.ContainerStateRunning || state.Status == oci.ContainerStateCreated) { + return nil, fmt.Errorf("container %s is not running or created state: %s", c.ID(), state.Status) + } + + resources := toOCIResources(req.GetLinux()) + if err := s.Runtime().UpdateContainer(c, resources); err != nil { + return nil, err + } + return &pb.UpdateContainerResourcesResponse{}, nil +} + +// toOCIResources converts CRI resource constraints to OCI. +func toOCIResources(r *pb.LinuxContainerResources) *rspec.LinuxResources { + return &rspec.LinuxResources{ + CPU: &rspec.LinuxCPU{ + Shares: proto.Uint64(uint64(r.GetCpuShares())), + Quota: proto.Int64(r.GetCpuQuota()), + Period: proto.Uint64(uint64(r.GetCpuPeriod())), + Cpus: r.GetCpusetCpus(), + Mems: r.GetCpusetMems(), + }, + Memory: &rspec.LinuxMemory{ + Limit: proto.Int64(r.GetMemoryLimitInBytes()), + }, + // TODO(runcom): OOMScoreAdj is missing + } +} diff --git a/server/container_updateruntimeconfig.go b/server/container_updateruntimeconfig.go index b900c9b1..b976fc67 100644 --- a/server/container_updateruntimeconfig.go +++ b/server/container_updateruntimeconfig.go @@ -1,11 +1,19 @@ package server import ( + "time" + "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) // UpdateRuntimeConfig updates the configuration of a running container. -func (s *Server) UpdateRuntimeConfig(ctx context.Context, req *pb.UpdateRuntimeConfigRequest) (*pb.UpdateRuntimeConfigResponse, error) { +func (s *Server) UpdateRuntimeConfig(ctx context.Context, req *pb.UpdateRuntimeConfigRequest) (resp *pb.UpdateRuntimeConfigResponse, err error) { + const operation = "update_runtime_config" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + return &pb.UpdateRuntimeConfigResponse{}, nil } diff --git a/server/fixtures/crio.conf b/server/fixtures/crio.conf new file mode 100644 index 00000000..d901990a --- /dev/null +++ b/server/fixtures/crio.conf @@ -0,0 +1,41 @@ +[crio] +root = "/var/lib/containers/storage" +runroot = "/var/run/containers/storage" +storage_driver = "overlay" +storage_option = ["overlay.override_kernel_check=1"] + +[crio.api] +listen = "/var/run/crio.sock" +stream_address = "localhost" +stream_port = "10010" + +[crio.runtime] +runtime = "/usr/local/bin/runc" +runtime_untrusted_workload = "untrusted" +default_workload_trust = "trusted" +conmon = "/usr/local/libexec/crio/conmon" +conmon_env = [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", +] +selinux = true +seccomp_profile = "/etc/crio/seccomp.json" +apparmor_profile = "crio-default" +cgroup_manager = "cgroupfs" +pids_limit = 1024 + +[crio.image] +default_transport = "docker://" +pause_image = "kubernetes/pause" +pause_command = "/pause" +signature_policy = "/tmp" +image_volumes = "mkdir" +insecure_registries = [ + "insecure-registry:1234", +] +registries = [ + "registry:4321", +] + +[crio.network] +network_dir = "/etc/cni/net.d/" +plugin_dir = "/opt/cni/bin/" diff --git a/server/fixtures/resolv.conf b/server/fixtures/resolv.conf new file mode 100644 index 00000000..19fa6252 --- /dev/null +++ b/server/fixtures/resolv.conf @@ -0,0 +1,4 @@ +search 192.30.253.113 192.30.252.153 +nameserver cri-o.io +nameserver github.com +options timeout:5 attempts:3 diff --git a/server/fixtures/secret/testDataA b/server/fixtures/secret/testDataA new file mode 100644 index 00000000..ec9068d4 --- /dev/null +++ b/server/fixtures/secret/testDataA @@ -0,0 +1 @@ +secretDataA \ No newline at end of file diff --git a/server/fixtures/secret/testDataB b/server/fixtures/secret/testDataB new file mode 100644 index 00000000..3ff8ea8f --- /dev/null +++ b/server/fixtures/secret/testDataB @@ -0,0 +1 @@ +secretDataB \ No newline at end of file diff --git a/server/image_fs_info.go b/server/image_fs_info.go index 969bdc34..bfa297a7 100644 --- a/server/image_fs_info.go +++ b/server/image_fs_info.go @@ -2,12 +2,19 @@ package server import ( "fmt" + "time" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) // ImageFsInfo returns information of the filesystem that is used to store images. -func (s *Server) ImageFsInfo(ctx context.Context, req *pb.ImageFsInfoRequest) (*pb.ImageFsInfoResponse, error) { +func (s *Server) ImageFsInfo(ctx context.Context, req *pb.ImageFsInfoRequest) (resp *pb.ImageFsInfoResponse, err error) { + const operation = "image_fs_info" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + return nil, fmt.Errorf("not implemented") } diff --git a/server/image_list.go b/server/image_list.go index ebcc6f6a..bcdc1036 100644 --- a/server/image_list.go +++ b/server/image_list.go @@ -1,13 +1,21 @@ package server import ( + "time" + "github.com/sirupsen/logrus" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) // ListImages lists existing images. -func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb.ListImagesResponse, error) { +func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (resp *pb.ListImagesResponse, err error) { + const operation = "list_images" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("ListImagesRequest: %+v", req) filter := "" reqFilter := req.GetFilter() @@ -21,21 +29,23 @@ func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb if err != nil { return nil, err } - response := pb.ListImagesResponse{} + resp = &pb.ListImagesResponse{} for _, result := range results { if result.Size != nil { - response.Images = append(response.Images, &pb.Image{ - Id: result.ID, - RepoTags: result.Names, - Size_: *result.Size, + resp.Images = append(resp.Images, &pb.Image{ + Id: result.ID, + RepoTags: result.RepoTags, + RepoDigests: result.RepoDigests, + Size_: *result.Size, }) } else { - response.Images = append(response.Images, &pb.Image{ - Id: result.ID, - RepoTags: result.Names, + resp.Images = append(resp.Images, &pb.Image{ + Id: result.ID, + RepoTags: result.RepoTags, + RepoDigests: result.RepoDigests, }) } } - logrus.Debugf("ListImagesResponse: %+v", response) - return &response, nil + logrus.Debugf("ListImagesResponse: %+v", resp) + return resp, nil } diff --git a/server/image_pull.go b/server/image_pull.go index 26d08912..67dfc469 100644 --- a/server/image_pull.go +++ b/server/image_pull.go @@ -3,16 +3,24 @@ package server import ( "encoding/base64" "strings" + "time" "github.com/containers/image/copy" "github.com/containers/image/types" + "github.com/kubernetes-incubator/cri-o/pkg/storage" "github.com/sirupsen/logrus" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) // PullImage pulls a image with authentication config. -func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.PullImageResponse, error) { +func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (resp *pb.PullImageResponse, err error) { + const operation = "pull_image" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("PullImageRequest: %+v", req) // TODO: what else do we need here? (Signatures when the story isn't just pulling from docker://) image := "" @@ -24,7 +32,6 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P var ( images []string pulled string - err error ) images, err = s.StorageImageServer().ResolveNames(image) if err != nil { @@ -67,11 +74,23 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P } // let's be smart, docker doesn't repull if image already exists. - _, err = s.StorageImageServer().ImageStatus(s.ImageContext(), img) + var storedImage *storage.ImageResult + storedImage, err = s.StorageImageServer().ImageStatus(s.ImageContext(), img) if err == nil { - logrus.Debugf("image %s already in store, skipping pull", img) - pulled = img - break + tmpImg, err := s.StorageImageServer().PrepareImage(s.ImageContext(), img, options) + if err == nil { + tmpImgConfigDigest := tmpImg.ConfigInfo().Digest + if tmpImgConfigDigest.String() == "" { + // this means we are playing with a schema1 image, in which + // case, we're going to repull the image in any case + logrus.Debugf("image config digest is empty, re-pulling image") + } else if tmpImgConfigDigest.String() == storedImage.ConfigDigest.String() { + logrus.Debugf("image %s already in store, skipping pull", img) + pulled = img + break + } + } + logrus.Debugf("image in store has different ID, re-pulling %s", img) } _, err = s.StorageImageServer().PullImage(s.ImageContext(), img, options) @@ -85,8 +104,16 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P if pulled == "" && err != nil { return nil, err } - resp := &pb.PullImageResponse{ - ImageRef: pulled, + status, err := s.StorageImageServer().ImageStatus(s.ImageContext(), pulled) + if err != nil { + return nil, err + } + imageRef := status.ID + if len(status.RepoDigests) > 0 { + imageRef = status.RepoDigests[0] + } + resp = &pb.PullImageResponse{ + ImageRef: imageRef, } logrus.Debugf("PullImageResponse: %+v", resp) return resp, nil diff --git a/server/image_remove.go b/server/image_remove.go index 32ca4066..d1f1e884 100644 --- a/server/image_remove.go +++ b/server/image_remove.go @@ -2,15 +2,22 @@ package server import ( "fmt" - "strings" + "time" + "github.com/kubernetes-incubator/cri-o/pkg/storage" "github.com/sirupsen/logrus" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) // RemoveImage removes the image. -func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*pb.RemoveImageResponse, error) { +func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (resp *pb.RemoveImageResponse, err error) { + const operation = "remove_image" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("RemoveImageRequest: %+v", req) image := "" img := req.GetImage() @@ -22,20 +29,18 @@ func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (* } var ( images []string - err error deleted bool ) images, err = s.StorageImageServer().ResolveNames(image) if err != nil { - // This means we got an image ID - if strings.Contains(err.Error(), "cannot specify 64-byte hexadecimal strings") { + if err == storage.ErrCannotParseImageID { images = append(images, image) } else { return nil, err } } for _, img := range images { - err = s.StorageImageServer().RemoveImage(s.ImageContext(), img) + err = s.StorageImageServer().UntagImage(s.ImageContext(), img) if err != nil { logrus.Debugf("error deleting image %s: %v", img, err) continue @@ -46,7 +51,7 @@ func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (* if !deleted && err != nil { return nil, err } - resp := &pb.RemoveImageResponse{} + resp = &pb.RemoveImageResponse{} logrus.Debugf("RemoveImageResponse: %+v", resp) return resp, nil } diff --git a/server/image_status.go b/server/image_status.go index 1e362a43..4e2e6a0e 100644 --- a/server/image_status.go +++ b/server/image_status.go @@ -2,9 +2,10 @@ package server import ( "fmt" - "strings" + "time" "github.com/containers/storage" + pkgstorage "github.com/kubernetes-incubator/cri-o/pkg/storage" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -12,7 +13,13 @@ import ( ) // ImageStatus returns the status of the image. -func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*pb.ImageStatusResponse, error) { +func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (resp *pb.ImageStatusResponse, err error) { + const operation = "image_status" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("ImageStatusRequest: %+v", req) image := "" img := req.GetImage() @@ -24,8 +31,7 @@ func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (* } images, err := s.StorageImageServer().ResolveNames(image) if err != nil { - // This means we got an image ID - if strings.Contains(err.Error(), "cannot specify 64-byte hexadecimal strings") { + if err == pkgstorage.ErrCannotParseImageID { images = append(images, image) } else { return nil, err @@ -40,12 +46,12 @@ func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (* } return nil, err } - resp := &pb.ImageStatusResponse{ + resp = &pb.ImageStatusResponse{ Image: &pb.Image{ - Id: status.ID, - RepoTags: status.Names, - Size_: *status.Size, - // TODO: https://github.com/kubernetes-incubator/cri-o/issues/531 + Id: status.ID, + RepoTags: status.RepoTags, + RepoDigests: status.RepoDigests, + Size_: *status.Size, }, } logrus.Debugf("ImageStatusResponse: %+v", resp) diff --git a/server/inspect.go b/server/inspect.go index 6e3e813c..d1fe6abe 100644 --- a/server/inspect.go +++ b/server/inspect.go @@ -6,8 +6,9 @@ import ( "fmt" "net/http" + cimage "github.com/containers/image/types" "github.com/go-zoo/bone" - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/types" "github.com/sirupsen/logrus" @@ -45,10 +46,17 @@ func (s *Server) getContainerInfo(id string, getContainerFunc func(id string) *o logrus.Debugf("can't find sandbox %s for container %s", ctr.Sandbox(), id) return types.ContainerInfo{}, errSandboxNotFound } + image := ctr.Image() + if s.ContainerServer != nil && s.ContainerServer.StorageImageServer() != nil { + if status, err := s.ContainerServer.StorageImageServer().ImageStatus(&cimage.SystemContext{}, ctr.ImageRef()); err == nil { + image = status.Name + } + } return types.ContainerInfo{ Name: ctr.Name(), Pid: ctrState.Pid, - Image: ctr.Image(), + Image: image, + ImageRef: ctr.ImageRef(), CreatedTime: ctrState.Created.UnixNano(), Labels: ctr.Labels(), Annotations: ctr.Annotations(), diff --git a/server/inspect_test.go b/server/inspect_test.go index 8c892e43..7246ef86 100644 --- a/server/inspect_test.go +++ b/server/inspect_test.go @@ -7,14 +7,14 @@ import ( "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" "github.com/containernetworking/plugins/pkg/ns" - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "github.com/kubernetes-incubator/cri-o/lib" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" specs "github.com/opencontainers/runtime-spec/specs-go" ) func TestGetInfo(t *testing.T) { - c := libkpod.DefaultConfig() + c := lib.DefaultConfig() c.RootConfig.Storage = "afoobarstorage" c.RootConfig.Root = "afoobarroot" c.RuntimeConfig.CgroupManager = "systemd" @@ -67,7 +67,7 @@ func TestGetContainerInfo(t *testing.T) { "io.kubernetes.test1": "value1", } getContainerFunc := func(id string) *oci.Container { - container, err := oci.NewContainer("testid", "testname", "", "/container/logs", mockNetNS{}, labels, annotations, annotations, "imageName", "imageName", "imageRef", &runtime.ContainerMetadata{}, "testsandboxid", false, false, false, false, false, "/root/for/container", created, "SIGKILL") + container, err := oci.NewContainer("testid", "testname", "", "/container/logs", mockNetNS{}, labels, annotations, annotations, "image", "imageName", "imageRef", &runtime.ContainerMetadata{}, "testsandboxid", false, false, false, false, false, "/root/for/container", created, "SIGKILL") if err != nil { t.Fatal(err) } @@ -96,13 +96,16 @@ func TestGetContainerInfo(t *testing.T) { t.Fatalf("expected same created time %d, got %d", created.UnixNano(), ci.CreatedTime) } if ci.Pid != 42 { - t.Fatalf("expected pid 42, got %s", ci.Pid) + t.Fatalf("expected pid 42, got %v", ci.Pid) } if ci.Name != "testname" { t.Fatalf("expected name testname, got %s", ci.Name) } - if ci.Image != "imageName" { - t.Fatalf("expected image name imageName, got %s", ci.Image) + if ci.Image != "image" { + t.Fatalf("expected image name image, got %s", ci.Image) + } + if ci.ImageRef != "imageRef" { + t.Fatalf("expected image ref imageRef, got %s", ci.ImageRef) } if ci.Root != "/var/foo/container" { t.Fatalf("expected root to be /var/foo/container, got %s", ci.Root) @@ -114,7 +117,7 @@ func TestGetContainerInfo(t *testing.T) { t.Fatalf("expected sandbox to be testsandboxid, got %s", ci.Sandbox) } if ci.IP != "1.1.1.42" { - t.Fatal("expected ip 1.1.1.42, got %s", ci.IP) + t.Fatalf("expected ip 1.1.1.42, got %s", ci.IP) } if len(ci.Annotations) == 0 { t.Fatal("annotations are empty") diff --git a/server/metrics/metrics.go b/server/metrics/metrics.go new file mode 100644 index 00000000..b0527bcc --- /dev/null +++ b/server/metrics/metrics.go @@ -0,0 +1,70 @@ +package metrics + +import ( + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + // CRIOOperationsKey is the key for CRI-O operation metrics. + CRIOOperationsKey = "crio_operations" + // CRIOOperationsLatencyKey is the key for the operation latency metrics. + CRIOOperationsLatencyKey = "crio_operations_latency_microseconds" + // CRIOOperationsErrorsKey is the key for the operation error metrics. + CRIOOperationsErrorsKey = "crio_operations_errors" + + // TODO(runcom): + // timeouts + + subsystem = "container_runtime" +) + +var ( + // CRIOOperations collects operation counts by operation type. + CRIOOperations = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: subsystem, + Name: CRIOOperationsKey, + Help: "Cumulative number of CRI-O operations by operation type.", + }, + []string{"operation_type"}, + ) + // CRIOOperationsLatency collects operation latency numbers by operation + // type. + CRIOOperationsLatency = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Subsystem: subsystem, + Name: CRIOOperationsLatencyKey, + Help: "Latency in microseconds of CRI-O operations. Broken down by operation type.", + }, + []string{"operation_type"}, + ) + // CRIOOperationsErrors collects operation errors by operation + // type. + CRIOOperationsErrors = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: subsystem, + Name: CRIOOperationsErrorsKey, + Help: "Cumulative number of CRI-O operation errors by operation type.", + }, + []string{"operation_type"}, + ) +) + +var registerMetrics sync.Once + +// Register all metrics +func Register() { + registerMetrics.Do(func() { + prometheus.MustRegister(CRIOOperations) + prometheus.MustRegister(CRIOOperationsLatency) + prometheus.MustRegister(CRIOOperationsErrors) + }) +} + +// SinceInMicroseconds gets the time since the specified start in microseconds. +func SinceInMicroseconds(start time.Time) float64 { + return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) +} diff --git a/server/runtime_status.go b/server/runtime_status.go index 67fc87b6..5632fab3 100644 --- a/server/runtime_status.go +++ b/server/runtime_status.go @@ -1,12 +1,19 @@ package server import ( + "time" + "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) // Status returns the status of the runtime -func (s *Server) Status(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) { +func (s *Server) Status(ctx context.Context, req *pb.StatusRequest) (resp *pb.StatusResponse, err error) { + const operation = "status" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() // Deal with Runtime conditions runtimeReady, err := s.Runtime().RuntimeReady() @@ -22,7 +29,7 @@ func (s *Server) Status(ctx context.Context, req *pb.StatusRequest) (*pb.StatusR runtimeReadyConditionString := pb.RuntimeReady networkReadyConditionString := pb.NetworkReady - resp := &pb.StatusResponse{ + resp = &pb.StatusResponse{ Status: &pb.RuntimeStatus{ Conditions: []*pb.RuntimeCondition{ { diff --git a/server/sandbox_list.go b/server/sandbox_list.go index e3cac025..4d629b88 100644 --- a/server/sandbox_list.go +++ b/server/sandbox_list.go @@ -1,7 +1,9 @@ package server import ( - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "time" + + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -28,7 +30,13 @@ func filterSandbox(p *pb.PodSandbox, filter *pb.PodSandboxFilter) bool { } // ListPodSandbox returns a list of SandBoxes. -func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxRequest) (*pb.ListPodSandboxResponse, error) { +func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxRequest) (resp *pb.ListPodSandboxResponse, err error) { + const operation = "list_pod_sandbox" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("ListPodSandboxRequest %+v", req) var pods []*pb.PodSandbox var podList []*sandbox.Sandbox @@ -42,7 +50,11 @@ func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxReque if filter.Id != "" { id, err := s.PodIDIndex().Get(filter.Id) if err != nil { - return nil, err + // Not finding an ID in a filtered list should not be considered + // and error; it might have been deleted when stop was done. + // Log and return an empty struct. + logrus.Warn("unable to find pod %s with filter", filter.Id) + return &pb.ListPodSandboxResponse{}, nil } sb := s.getSandbox(id) if sb == nil { @@ -82,7 +94,7 @@ func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxReque } } - resp := &pb.ListPodSandboxResponse{ + resp = &pb.ListPodSandboxResponse{ Items: pods, } logrus.Debugf("ListPodSandboxResponse %+v", resp) diff --git a/server/sandbox_network.go b/server/sandbox_network.go index 15cf99c8..9b054bdc 100644 --- a/server/sandbox_network.go +++ b/server/sandbox_network.go @@ -4,7 +4,7 @@ import ( "fmt" "net" - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/sirupsen/logrus" "k8s.io/kubernetes/pkg/kubelet/network/hostport" ) diff --git a/server/sandbox_remove.go b/server/sandbox_remove.go index 856b8938..62b2c698 100644 --- a/server/sandbox_remove.go +++ b/server/sandbox_remove.go @@ -2,9 +2,10 @@ package server import ( "fmt" + "time" "github.com/containers/storage" - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" pkgstorage "github.com/kubernetes-incubator/cri-o/pkg/storage" "github.com/pkg/errors" @@ -15,7 +16,13 @@ import ( // RemovePodSandbox deletes the sandbox. If there are any running containers in the // sandbox, they should be force deleted. -func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (*pb.RemovePodSandboxResponse, error) { +func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (resp *pb.RemovePodSandboxResponse, err error) { + const operation = "remove_pod_sandbox" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("RemovePodSandboxRequest %+v", req) sb, err := s.getPodSandboxFromRequest(req.PodSandboxId) if err != nil { @@ -27,7 +34,7 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR // the the CRI interface which expects to not error out in not found // cases. - resp := &pb.RemovePodSandboxResponse{} + resp = &pb.RemovePodSandboxResponse{} logrus.Warnf("could not get sandbox %s, it's probably been removed already: %v", req.PodSandboxId, err) return resp, nil } @@ -41,7 +48,7 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR if !sb.Stopped() { cState := s.Runtime().ContainerStatus(c) if cState.Status == oci.ContainerStateCreated || cState.Status == oci.ContainerStateRunning { - if err := s.Runtime().StopContainer(c, -1); err != nil { + if err := s.Runtime().StopContainer(ctx, c, 10); err != nil { // Assume container is already stopped logrus.Warnf("failed to stop container %s: %v", c.Name(), err) } @@ -92,7 +99,7 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR return nil, fmt.Errorf("failed to delete pod sandbox %s from index: %v", sb.ID(), err) } - resp := &pb.RemovePodSandboxResponse{} + resp = &pb.RemovePodSandboxResponse{} logrus.Debugf("RemovePodSandboxResponse %+v", resp) return resp, nil } diff --git a/server/sandbox_run.go b/server/sandbox_run.go index 0bebef84..5ba007c2 100644 --- a/server/sandbox_run.go +++ b/server/sandbox_run.go @@ -13,16 +13,17 @@ import ( "time" "github.com/containers/storage" - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/pkg/annotations" + runtimespec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/net/context" "golang.org/x/sys/unix" - "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/api/core/v1" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" "k8s.io/kubernetes/pkg/kubelet/leaky" "k8s.io/kubernetes/pkg/kubelet/network/hostport" @@ -82,10 +83,7 @@ func (s *Server) runContainer(container *oci.Container, cgroupParent string) err if err := s.Runtime().CreateContainer(container, cgroupParent); err != nil { return err } - if err := s.Runtime().StartContainer(container); err != nil { - return err - } - return nil + return s.Runtime().StartContainer(container) } var ( @@ -94,19 +92,29 @@ var ( // RunPodSandbox creates and runs a pod-level sandbox. func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest) (resp *pb.RunPodSandboxResponse, err error) { + const operation = "run_pod_sandbox" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + s.updateLock.RLock() defer s.updateLock.RUnlock() + if req.GetConfig().GetMetadata() == nil { + return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Metadata is nil") + } + logrus.Debugf("RunPodSandboxRequest %+v", req) var processLabel, mountLabel, resolvPath string // process req.Name - kubeName := req.GetConfig().GetMetadata().Name + kubeName := req.GetConfig().GetMetadata().GetName() if kubeName == "" { return nil, fmt.Errorf("PodSandboxConfig.Name should not be empty") } - namespace := req.GetConfig().GetMetadata().Namespace - attempt := req.GetConfig().GetMetadata().Attempt + namespace := req.GetConfig().GetMetadata().GetNamespace() + attempt := req.GetConfig().GetMetadata().GetAttempt() id, name, err := s.generatePodIDandName(req.GetConfig()) if err != nil { @@ -152,8 +160,8 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest name, id, s.config.PauseImage, "", containerName, - req.GetConfig().GetMetadata().Name, - req.GetConfig().GetMetadata().Uid, + req.GetConfig().GetMetadata().GetName(), + req.GetConfig().GetMetadata().GetUid(), namespace, attempt, nil) @@ -206,8 +214,13 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest if err := label.Relabel(resolvPath, mountLabel, true); err != nil && err != unix.ENOTSUP { return nil, err } - - g.AddBindMount(resolvPath, "/etc/resolv.conf", []string{"ro"}) + mnt := runtimespec.Mount{ + Type: "bind", + Source: resolvPath, + Destination: "/etc/resolv.conf", + Options: []string{"ro", "bind"}, + } + g.AddMount(mnt) } // add metadata @@ -220,6 +233,10 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest // add labels labels := req.GetConfig().GetLabels() + if err := validateLabels(labels); err != nil { + return nil, err + } + // Add special container name label for the infra container labelsJSON := []byte{} if labels != nil { @@ -254,7 +271,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest securityContext := req.GetConfig().GetLinux().GetSecurityContext() if securityContext == nil { - return nil, fmt.Errorf("no security context found") + logrus.Warn("no security context found in config.") } processLabel, mountLabel, err = getSELinuxLabels(securityContext.GetSelinuxOptions(), privileged) @@ -263,12 +280,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest } // Don't use SELinux separation with Host Pid or IPC Namespace or privileged. - namespaceOptions := securityContext.GetNamespaceOptions() - if namespaceOptions == nil { - return nil, fmt.Errorf("no namespace options found") - } - - if securityContext.GetNamespaceOptions().HostPid || securityContext.GetNamespaceOptions().HostIpc { + if securityContext.GetNamespaceOptions().GetHostPid() || securityContext.GetNamespaceOptions().GetHostIpc() { processLabel, mountLabel = "", "" } g.SetProcessSelinuxLabel(processLabel) @@ -276,7 +288,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest // create shm mount for the pod containers. var shmPath string - if namespaceOptions.HostIpc { + if securityContext.GetNamespaceOptions().GetHostIpc() { shmPath = "/dev/shm" } else { shmPath, err = setupShm(podContainer.RunDir, mountLabel) @@ -317,7 +329,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest return nil, err } - hostNetwork := namespaceOptions.HostNetwork + hostNetwork := securityContext.GetNamespaceOptions().GetHostNetwork() hostname, err := getHostname(id, req.GetConfig().Hostname, hostNetwork) if err != nil { @@ -352,7 +364,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest portMappings := convertPortMappings(req.GetConfig().GetPortMappings()) // setup cgroup settings - cgroupParent := req.GetConfig().GetLinux().CgroupParent + cgroupParent := req.GetConfig().GetLinux().GetCgroupParent() if cgroupParent != "" { if s.config.CgroupManager == oci.SystemdCgroupsManager { if len(cgroupParent) <= 6 || !strings.HasSuffix(path.Base(cgroupParent), ".slice") { @@ -405,15 +417,8 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest } // extract linux sysctls from annotations and pass down to oci runtime - safe, unsafe, err := SysctlsFromPodAnnotations(kubeAnnotations) - if err != nil { - return nil, err - } - for _, sysctl := range safe { - g.AddLinuxSysctl(sysctl.Name, sysctl.Value) - } - for _, sysctl := range unsafe { - g.AddLinuxSysctl(sysctl.Name, sysctl.Value) + for key, value := range req.GetConfig().GetLinux().GetSysctls() { + g.AddLinuxSysctl(key, value) } // Set OOM score adjust of the infra container to be very low @@ -424,7 +429,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest // set up namespaces if hostNetwork { - err = g.RemoveLinuxNamespace("network") + err = g.RemoveLinuxNamespace(string(runtimespec.NetworkNamespace)) if err != nil { return nil, err } @@ -445,21 +450,21 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest }() // Pass the created namespace path to the runtime - err = g.AddOrReplaceLinuxNamespace("network", sb.NetNsPath()) + err = g.AddOrReplaceLinuxNamespace(string(runtimespec.NetworkNamespace), sb.NetNsPath()) if err != nil { return nil, err } } - if namespaceOptions.HostPid { - err = g.RemoveLinuxNamespace("pid") + if securityContext.GetNamespaceOptions().GetHostPid() { + err = g.RemoveLinuxNamespace(string(runtimespec.PIDNamespace)) if err != nil { return nil, err } } - if namespaceOptions.HostIpc { - err = g.RemoveLinuxNamespace("ipc") + if securityContext.GetNamespaceOptions().GetHostIpc() { + err = g.RemoveLinuxNamespace(string(runtimespec.IPCNamespace)) if err != nil { return nil, err } @@ -484,7 +489,13 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest if err := label.Relabel(hostnamePath, mountLabel, true); err != nil && err != unix.ENOTSUP { return nil, err } - g.AddBindMount(hostnamePath, "/etc/hostname", []string{"ro"}) + mnt := runtimespec.Mount{ + Type: "bind", + Source: hostnamePath, + Destination: "/etc/hostname", + Options: []string{"ro", "bind"}, + } + g.AddMount(mnt) g.AddAnnotation(annotations.HostnamePath, hostnamePath) sb.AddHostnamePath(hostnamePath) @@ -492,6 +503,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest if err != nil { return nil, err } + container.SetSpec(g.Spec()) container.SetMountPoint(mountPoint) sb.SetInfraContainer(container) @@ -510,6 +522,15 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest g.AddAnnotation(annotations.IP, ip) sb.AddIP(ip) + spp := req.GetConfig().GetLinux().GetSecurityContext().GetSeccompProfilePath() + g.AddAnnotation(annotations.SeccompProfilePath, spp) + sb.SetSeccompProfilePath(spp) + if !privileged { + if err = s.setupSeccomp(&g, spp); err != nil { + return nil, err + } + } + err = g.SaveToFile(filepath.Join(podContainer.Dir, "config.json"), saveOptions) if err != nil { return nil, fmt.Errorf("failed to save template configuration for pod sandbox %s(%s): %v", sb.Name(), id, err) diff --git a/server/sandbox_status.go b/server/sandbox_status.go index f5b6dd09..90193e71 100644 --- a/server/sandbox_status.go +++ b/server/sandbox_status.go @@ -1,6 +1,8 @@ package server import ( + "time" + "github.com/kubernetes-incubator/cri-o/oci" "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -8,7 +10,13 @@ import ( ) // PodSandboxStatus returns the Status of the PodSandbox. -func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (*pb.PodSandboxStatusResponse, error) { +func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (resp *pb.PodSandboxStatusResponse, err error) { + const operation = "pod_sandbox_status" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("PodSandboxStatusRequest %+v", req) sb, err := s.getPodSandboxFromRequest(req.PodSandboxId) if err != nil { @@ -24,7 +32,7 @@ func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusR } sandboxID := sb.ID() - resp := &pb.PodSandboxStatusResponse{ + resp = &pb.PodSandboxStatusResponse{ Status: &pb.PodSandboxStatus{ Id: sandboxID, CreatedAt: podInfraContainer.CreatedAt().UnixNano(), diff --git a/server/sandbox_stop.go b/server/sandbox_stop.go index 7db436d1..75e97291 100644 --- a/server/sandbox_stop.go +++ b/server/sandbox_stop.go @@ -2,11 +2,12 @@ package server import ( "fmt" + "time" "github.com/containers/storage" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/symlink" - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" @@ -18,7 +19,13 @@ import ( // StopPodSandbox stops the sandbox. If there are any running containers in the // sandbox, they should be force terminated. -func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (*pb.StopPodSandboxResponse, error) { +func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (resp *pb.StopPodSandboxResponse, err error) { + const operation = "stop_pod_sandbox" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() + logrus.Debugf("StopPodSandboxRequest %+v", req) sb, err := s.getPodSandboxFromRequest(req.PodSandboxId) if err != nil { @@ -30,14 +37,14 @@ func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxReque // the the CRI interface which expects to not error out in not found // cases. - resp := &pb.StopPodSandboxResponse{} + resp = &pb.StopPodSandboxResponse{} logrus.Warnf("could not get sandbox %s, it's probably been stopped already: %v", req.PodSandboxId, err) logrus.Debugf("StopPodSandboxResponse %s: %+v", req.PodSandboxId, resp) return resp, nil } if sb.Stopped() { - resp := &pb.StopPodSandboxResponse{} + resp = &pb.StopPodSandboxResponse{} logrus.Debugf("StopPodSandboxResponse %s: %+v", sb.ID(), resp) return resp, nil } @@ -56,7 +63,7 @@ func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxReque for _, c := range containers { cStatus := s.Runtime().ContainerStatus(c) if cStatus.Status != oci.ContainerStateStopped { - if err := s.Runtime().StopContainer(c, -1); err != nil { + if err := s.Runtime().StopContainer(ctx, c, 10); err != nil { return nil, fmt.Errorf("failed to stop container %s in pod sandbox %s: %v", c.Name(), sb.ID(), err) } if c.ID() == podInfraContainer.ID() { @@ -95,7 +102,7 @@ func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxReque } sb.SetStopped() - resp := &pb.StopPodSandboxResponse{} + resp = &pb.StopPodSandboxResponse{} logrus.Debugf("StopPodSandboxResponse %s: %+v", sb.ID(), resp) return resp, nil } diff --git a/server/secrets.go b/server/secrets.go new file mode 100644 index 00000000..56d3ba81 --- /dev/null +++ b/server/secrets.go @@ -0,0 +1,162 @@ +package server + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + rspec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// SecretData info +type SecretData struct { + Name string + Data []byte +} + +// SaveTo saves secret data to given directory +func (s SecretData) SaveTo(dir string) error { + path := filepath.Join(dir, s.Name) + if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil && !os.IsExist(err) { + return err + } + return ioutil.WriteFile(path, s.Data, 0700) +} + +func readAll(root, prefix string) ([]SecretData, error) { + path := filepath.Join(root, prefix) + + data := []SecretData{} + + files, err := ioutil.ReadDir(path) + if err != nil { + if os.IsNotExist(err) { + return data, nil + } + + return nil, err + } + + for _, f := range files { + fileData, err := readFile(root, filepath.Join(prefix, f.Name())) + if err != nil { + // If the file did not exist, might be a dangling symlink + // Ignore the error + if os.IsNotExist(err) { + continue + } + return nil, err + } + data = append(data, fileData...) + } + + return data, nil +} + +func readFile(root, name string) ([]SecretData, error) { + path := filepath.Join(root, name) + + s, err := os.Stat(path) + if err != nil { + return nil, err + } + + if s.IsDir() { + dirData, err := readAll(root, name) + if err != nil { + return nil, err + } + return dirData, nil + } + bytes, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + return []SecretData{{Name: name, Data: bytes}}, nil +} + +// getHostAndCtrDir separates the host:container paths +func getMountsMap(path string) (string, string, error) { + arr := strings.SplitN(path, ":", 2) + if len(arr) == 2 { + return arr[0], arr[1], nil + } + return "", "", errors.Errorf("unable to get host and container dir") +} + +func getHostSecretData(hostDir string) ([]SecretData, error) { + var allSecrets []SecretData + hostSecrets, err := readAll(hostDir, "") + if err != nil { + return nil, errors.Wrapf(err, "failed to read secrets from %q", hostDir) + } + return append(allSecrets, hostSecrets...), nil +} + +// secretMount copies the contents of host directory to container directory +// and returns a list of mounts +func secretMounts(defaultMountsPaths []string, mountLabel, containerWorkingDir string, runtimeMounts []rspec.Mount) ([]rspec.Mount, error) { + var mounts []rspec.Mount + for _, path := range defaultMountsPaths { + hostDir, ctrDir, err := getMountsMap(path) + if err != nil { + return nil, err + } + // skip if the hostDir path doesn't exist + if _, err := os.Stat(hostDir); os.IsNotExist(err) { + logrus.Warnf("%q doesn't exist, skipping", hostDir) + continue + } + + ctrDirOnHost := filepath.Join(containerWorkingDir, ctrDir) + // skip if ctrDir has already been mounted by caller + if isAlreadyMounted(runtimeMounts, ctrDir) { + logrus.Warnf("%q has already been mounted; cannot override mount", ctrDir) + continue + } + + if err := os.RemoveAll(ctrDirOnHost); err != nil { + return nil, fmt.Errorf("remove container directory failed: %v", err) + } + + if err := os.MkdirAll(ctrDirOnHost, 0755); err != nil { + return nil, fmt.Errorf("making container directory failed: %v", err) + } + + hostDir, err = resolveSymbolicLink(hostDir) + if err != nil { + return nil, err + } + + data, err := getHostSecretData(hostDir) + if err != nil { + return nil, errors.Wrapf(err, "getting host secret data failed") + } + for _, s := range data { + s.SaveTo(ctrDirOnHost) + } + label.Relabel(ctrDirOnHost, mountLabel, false) + + m := rspec.Mount{ + Source: ctrDirOnHost, + Destination: ctrDir, + } + + mounts = append(mounts, m) + } + return mounts, nil +} + +func isAlreadyMounted(mounts []rspec.Mount, mountPath string) bool { + for _, mount := range mounts { + if mount.Destination == mountPath { + return true + } + } + return false +} diff --git a/server/secrets_test.go b/server/secrets_test.go new file mode 100644 index 00000000..e7e2a205 --- /dev/null +++ b/server/secrets_test.go @@ -0,0 +1,61 @@ +package server + +import ( + "testing" +) + +const ( + defaultError = "unable to get host and container dir" + secretDataPath = "fixtures/secret" + emptyPath = "fixtures/secret/empty" +) + +func TestGetMountsMap(t *testing.T) { + testCases := []struct { + Path, HostDir, CtrDir string + Error string + }{ + {"", "", "", defaultError}, + {"/tmp:/home/crio", "/tmp", "/home/crio", ""}, + {"crio/logs:crio/logs", "crio/logs", "crio/logs", ""}, + {"/tmp", "", "", defaultError}, + } + for _, c := range testCases { + hostDir, ctrDir, err := getMountsMap(c.Path) + if hostDir != c.HostDir || ctrDir != c.CtrDir || (err != nil && err.Error() != c.Error) { + t.Errorf("expect: (%v, %v, %v) \n but got: (%v, %v, %v) \n", + c.HostDir, c.CtrDir, c.Error, hostDir, ctrDir, err) + } + } +} + +func TestGetHostSecretData(t *testing.T) { + testCases := []struct { + Path string + Want []SecretData + }{ + { + "emptyPath", + []SecretData{}, + }, + { + secretDataPath, + []SecretData{ + {"testDataA", []byte("secretDataA")}, + {"testDataB", []byte("secretDataB")}, + }, + }, + } + for _, c := range testCases { + if secretData, err := getHostSecretData(c.Path); err != nil { + t.Error(err) + } else { + for index, data := range secretData { + if data.Name != c.Want[index].Name || string(data.Data) != string(c.Want[index].Data) { + t.Errorf("expect: (%v, %v) \n but got: (%v, %v) \n", + c.Want[index].Name, string(c.Want[index].Data), data.Name, string(data.Data)) + } + } + } + } +} diff --git a/server/server.go b/server/server.go index 5139a398..b42496c7 100644 --- a/server/server.go +++ b/server/server.go @@ -15,11 +15,12 @@ import ( "github.com/cri-o/ocicni/pkg/ocicni" "github.com/fsnotify/fsnotify" - "github.com/kubernetes-incubator/cri-o/libkpod" - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "github.com/kubernetes-incubator/cri-o/lib" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/pkg/storage" "github.com/kubernetes-incubator/cri-o/server/apparmor" + "github.com/kubernetes-incubator/cri-o/server/metrics" "github.com/kubernetes-incubator/cri-o/server/seccomp" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -30,13 +31,12 @@ import ( "k8s.io/kubernetes/pkg/kubelet/server/streaming" iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables" utildbus "k8s.io/kubernetes/pkg/util/dbus" - utilexec "k8s.io/kubernetes/pkg/util/exec" utiliptables "k8s.io/kubernetes/pkg/util/iptables" + utilexec "k8s.io/utils/exec" ) const ( - runtimeAPIVersion = "v1alpha1" - shutdownFile = "/var/lib/crio/crio.shutdown" + shutdownFile = "/var/lib/crio/crio.shutdown" ) func isTrue(annotaton string) bool { @@ -53,7 +53,7 @@ type streamService struct { // Server implements the RuntimeService and ImageService type Server struct { - *libkpod.ContainerServer + *lib.ContainerServer config Config updateLock sync.RWMutex @@ -190,7 +190,7 @@ func New(config *Config) (*Server, error) { if err := os.MkdirAll(config.ContainerExitsDir, 0755); err != nil { return nil, err } - containerServer, err := libkpod.New(&config.Config) + containerServer, err := lib.New(&config.Config) if err != nil { return nil, err } @@ -201,11 +201,10 @@ func New(config *Config) (*Server, error) { } iptInterface := utiliptables.New(utilexec.New(), utildbus.New(), utiliptables.ProtocolIpv4) iptInterface.EnsureChain(utiliptables.TableNAT, iptablesproxy.KubeMarkMasqChain) - hostportManager := hostport.NewHostportManager() + hostportManager := hostport.NewHostportManager(iptInterface) s := &Server{ ContainerServer: containerServer, - netPlugin: netPlugin, hostportManager: hostportManager, config: *config, @@ -350,6 +349,7 @@ func (s *Server) getPodSandboxFromRequest(podSandboxID string) (*sandbox.Sandbox // CreateMetricsEndpoint creates a /metrics endpoint // for prometheus monitoring func (s *Server) CreateMetricsEndpoint() (*http.ServeMux, error) { + metrics.Register() mux := &http.ServeMux{} mux.Handle("/metrics", prometheus.Handler()) return mux, nil @@ -419,6 +419,7 @@ func (s *Server) StartExitMonitor() { }() if err := watcher.Add(s.config.ContainerExitsDir); err != nil { logrus.Errorf("watcher.Add(%q) failed: %s", s.config.ContainerExitsDir, err) + close(done) } <-done } diff --git a/server/utils.go b/server/utils.go index 195942d3..0512ea4b 100644 --- a/server/utils.go +++ b/server/utils.go @@ -5,17 +5,23 @@ import ( "io" "os" "strings" + "time" "github.com/cri-o/ocicni/pkg/ocicni" - "github.com/kubernetes-incubator/cri-o/libkpod/sandbox" + "github.com/kubernetes-incubator/cri-o/lib/sandbox" + "github.com/kubernetes-incubator/cri-o/server/metrics" + "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/runtime-tools/validate" "github.com/syndtr/gocapability/capability" + pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) const ( // According to http://man7.org/linux/man-pages/man5/resolv.conf.5.html: // "The search list is currently limited to six domains with a total of 256 characters." maxDNSSearches = 6 + + maxLabelSize = 4096 ) func copyFile(src, dest string) error { @@ -181,3 +187,69 @@ func getOCICapabilitiesList() []string { } return caps } + +func recordOperation(operation string, start time.Time) { + metrics.CRIOOperations.WithLabelValues(operation).Inc() + metrics.CRIOOperationsLatency.WithLabelValues(operation).Observe(metrics.SinceInMicroseconds(start)) +} + +// recordError records error for metric if an error occurred. +func recordError(operation string, err error) { + if err != nil { + // TODO(runcom): handle timeout from ctx as well + metrics.CRIOOperationsErrors.WithLabelValues(operation).Inc() + } +} + +func validateLabels(labels map[string]string) error { + for k, v := range labels { + if (len(k) + len(v)) > maxLabelSize { + if len(k) > 10 { + k = k[:10] + } + return fmt.Errorf("label key and value greater than maximum size (%d bytes), key: %s", maxLabelSize, k) + } + } + return nil +} + +func mergeEnvs(imageConfig *v1.Image, kubeEnvs []*pb.KeyValue) []string { + envs := []string{} + if kubeEnvs == nil && imageConfig != nil { + envs = imageConfig.Config.Env + } else { + for _, item := range kubeEnvs { + if item.GetKey() == "" { + continue + } + envs = append(envs, item.GetKey()+"="+item.GetValue()) + } + if imageConfig != nil { + for _, imageEnv := range imageConfig.Config.Env { + var found bool + parts := strings.SplitN(imageEnv, "=", 2) + if len(parts) != 2 { + continue + } + imageEnvKey := parts[0] + if imageEnvKey == "" { + continue + } + for _, kubeEnv := range envs { + kubeEnvKey := strings.SplitN(kubeEnv, "=", 2)[0] + if kubeEnvKey == "" { + continue + } + if imageEnvKey == kubeEnvKey { + found = true + break + } + } + if !found { + envs = append(envs, imageEnv) + } + } + } + } + return envs +} diff --git a/server/utils_test.go b/server/utils_test.go new file mode 100644 index 00000000..f943c2ea --- /dev/null +++ b/server/utils_test.go @@ -0,0 +1,143 @@ +package server + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/opencontainers/image-spec/specs-go/v1" + pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" +) + +const ( + defaultDNSPath = "/etc/resolv.conf" + testDNSPath = "fixtures/resolv_test.conf" + dnsPath = "fixtures/resolv.conf" +) + +func TestParseDNSOptions(t *testing.T) { + testCases := []struct { + Servers, Searches, Options []string + Path string + Want string + }{ + { + []string{}, + []string{}, + []string{}, + testDNSPath, defaultDNSPath, + }, + { + []string{"cri-o.io", "github.com"}, + []string{"192.30.253.113", "192.30.252.153"}, + []string{"timeout:5", "attempts:3"}, + testDNSPath, dnsPath, + }, + } + + for _, c := range testCases { + if err := parseDNSOptions(c.Servers, c.Searches, + c.Options, c.Path); err != nil { + t.Error(err) + } + + expect, _ := ioutil.ReadFile(c.Want) + result, _ := ioutil.ReadFile(c.Path) + if string(expect) != string(result) { + t.Errorf("expect %v: \n but got : %v", string(expect), string(result)) + } + os.Remove(c.Path) + } +} + +func TestSysctlsFromPodAnnotations(t *testing.T) { + testCases := []struct { + Annotations map[string]string + SafeSysctls []Sysctl + UnsafeSysctls []Sysctl + }{ + { + map[string]string{ + "foo-": "bar", + SysctlsPodAnnotationKey: "kernel.shmmax=100000000,safe=20000000", + }, + []Sysctl{ + {"kernel.shmmax", "100000000"}, + {"safe", "20000000"}, + }, + []Sysctl{}, + }, + { + map[string]string{ + UnsafeSysctlsPodAnnotationKey: "kernel.shmmax=10,unsafe=20", + }, + []Sysctl{}, + []Sysctl{ + {"kernel.shmmax", "10"}, + {"unsafe", "20"}, + }, + }, + { + map[string]string{ + "bar..": "42", + SysctlsPodAnnotationKey: "kernel.shmmax=20000000,safe=40000000", + UnsafeSysctlsPodAnnotationKey: "kernel.shmmax=10,unsafe=20", + }, + []Sysctl{ + {"kernel.shmmax", "20000000"}, + {"safe", "40000000"}, + }, + []Sysctl{ + {"kernel.shmmax", "10"}, + {"unsafe", "20"}, + }, + }, + } + + for _, c := range testCases { + safe, unsafe, err := SysctlsFromPodAnnotations(c.Annotations) + if err != nil { + t.Error(err) + } + for index, sysctl := range safe { + if sysctl.Name != safe[index].Name || sysctl.Value != safe[index].Value { + t.Errorf("Expect safe: %v, but got: %v\n", safe[index], sysctl) + } + } + for index, sysctl := range unsafe { + if sysctl.Name != unsafe[index].Name || sysctl.Value != unsafe[index].Value { + t.Errorf("Expect unsafe: %v, but got: %v\n", safe[index], sysctl) + } + } + } +} + +func TestMergeEnvs(t *testing.T) { + configImage := &v1.Image{ + Config: v1.ImageConfig{ + Env: []string{"VAR1=1", "VAR2=2"}, + }, + } + + configKube := []*pb.KeyValue{ + { + Key: "VAR2", + Value: "3", + }, + { + Key: "VAR3", + Value: "3", + }, + } + + mergedEnvs := mergeEnvs(configImage, configKube) + + if len(mergedEnvs) != 3 { + t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(mergedEnvs)) + } + for _, env := range mergedEnvs { + if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { + t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) + } + } +} diff --git a/server/version.go b/server/version.go index d55cd046..74f4799b 100644 --- a/server/version.go +++ b/server/version.go @@ -1,29 +1,35 @@ package server import ( + "time" + + "github.com/kubernetes-incubator/cri-o/version" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" ) -// Version returns the runtime name, runtime version and runtime API version -func (s *Server) Version(ctx context.Context, req *pb.VersionRequest) (*pb.VersionResponse, error) { - - runtimeVersion, err := s.Runtime().Version() - if err != nil { - return nil, err - } - +const ( + // kubeAPIVersion is the api version of kubernetes. // TODO: Track upstream code. For now it expects 0.1.0 - version := "0.1.0" + kubeAPIVersion = "0.1.0" + // containerName is the name prepended in kubectl describe->Container ID: + // cri-o:// + containerName = "cri-o" + runtimeAPIVersion = "v1alpha1" +) - // taking const address - rav := runtimeAPIVersion - runtimeName := s.Runtime().Name() +// Version returns the runtime name, runtime version and runtime API version +func (s *Server) Version(ctx context.Context, req *pb.VersionRequest) (resp *pb.VersionResponse, err error) { + const operation = "version" + defer func() { + recordOperation(operation, time.Now()) + recordError(operation, err) + }() return &pb.VersionResponse{ - Version: version, - RuntimeName: runtimeName, - RuntimeVersion: runtimeVersion, - RuntimeApiVersion: rav, + Version: kubeAPIVersion, + RuntimeName: containerName, + RuntimeVersion: version.Version, + RuntimeApiVersion: runtimeAPIVersion, }, nil } diff --git a/test/README.md b/test/README.md index 1dd2e3c7..1d1742b0 100644 --- a/test/README.md +++ b/test/README.md @@ -41,11 +41,12 @@ You will also need to install the [CNI](https://github.com/containernetworking/c the the default pod test template runs without host networking: ``` -$ go get github.com/containernetworking/cni -$ cd "$GOPATH/src/github.com/containernetworking/cni" -$ git checkout -q d4bbce1865270cd2d2be558d6a23e63d314fe769 -$ ./build.sh \ -$ mkdir -p /opt/cni/bin \ +$ cd "$GOPATH/src/github.com/containernetworking" +$ git clone https://github.com/containernetworking/plugins.git +$ cd plugins +$ git checkout -q dcf7368eeab15e2affc6256f0bb1e84dd46a34de +$ ./build.sh +$ mkdir -p /opt/cni/bin $ cp bin/* /opt/cni/bin/ ``` @@ -69,17 +70,16 @@ Tests on the host will run with `runc` as the default runtime. However you can select other OCI compatible runtimes by setting the `RUNTIME` environment variable. -For example one could use the [Clear Containers](https://github.com/01org/cc-oci-runtime/wiki/Installation) +For example one could use the [Clear Containers](https://github.com/clearcontainers/runtime) runtime instead of `runc`: ``` -make localintegration RUNTIME=cc-oci-runtime +make localintegration RUNTIME=cc-runtime ``` ## Writing integration tests -[Helper functions] -(https://github.com/kubernetes-incubator/crio/blob/master/test/helpers.bash) +[Helper functions](https://github.com/kubernetes-incubator/cri-o/blob/master/test/helpers.bash) are provided in order to facilitate writing tests. ```sh @@ -97,9 +97,9 @@ function teardown() { cleanup_test } -@test "crioctl runtimeversion" { +@test "crictl runtimeversion" { start_crio - crioctl runtimeversion + crictl runtimeversion [ "$status" -eq 0 ] } diff --git a/test/apparmor.bats b/test/apparmor.bats index babfb170..65e853e2 100644 --- a/test/apparmor.bats +++ b/test/apparmor.bats @@ -19,20 +19,18 @@ function teardown() { sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname1": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor1.json - run crioctl pod run --name apparmor1 --config "$TESTDIR"/apparmor1.json + run crictl runs "$TESTDIR"/apparmor1.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --name testname1 --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDIR"/apparmor1.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" touch test.txt + run crictl exec --sync "$ctr_id" touch test.txt echo "$output" [ "$status" -eq 0 ] - cleanup_ctrs cleanup_pods stop_crio @@ -52,16 +50,15 @@ function teardown() { sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname2": "apparmor-test-deny-write"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor2.json - run crioctl pod run --name apparmor2 --config "$TESTDIR"/apparmor2.json + run crictl runs "$TESTDIR"/apparmor2.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --name testname2 --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDIR"/apparmor2.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" touch test.txt + run crictl exec --sync "$ctr_id" touch test.txt echo "$output" [ "$status" -ne 0 ] [[ "$output" =~ "Permission denied" ]] @@ -86,16 +83,15 @@ function teardown() { sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname3": "apparmor-test-deny-write"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor3.json - run crioctl pod run --name apparmor3 --config "$TESTDIR"/apparmor3.json + run crictl runs "$TESTDIR"/apparmor3.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --name testname3 --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDIR"/apparmor3.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" touch test.txt + run crictl exec --sync "$ctr_id" touch test.txt echo "$output" [ "$status" -ne 0 ] [[ "$output" =~ "Permission denied" ]] @@ -119,16 +115,15 @@ function teardown() { sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname4": "not-exists"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor4.json - run crioctl pod run --name apparmor4 --config "$TESTDIR"/apparmor4.json + run crictl runs "$TESTDIR"/apparmor4.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --name testname4 --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDIR"/apparmor4.json echo "$output" [ "$status" -ne 0 ] [[ "$output" =~ "Creating container failed" ]] - cleanup_ctrs cleanup_pods stop_crio @@ -148,20 +143,18 @@ function teardown() { sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname5": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor5.json - run crioctl pod run --name apparmor5 --config "$TESTDIR"/apparmor5.json + run crictl runs "$TESTDIR"/apparmor5.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --name testname5 --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDIR"/apparmor5.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" touch test.txt + run crictl exec --sync "$ctr_id" touch test.txt echo "$output" [ "$status" -eq 0 ] - cleanup_ctrs cleanup_pods stop_crio diff --git a/test/cgroups.bats b/test/cgroups.bats index cbe27be4..03990f1f 100644 --- a/test/cgroups.bats +++ b/test/cgroups.bats @@ -11,27 +11,27 @@ function teardown() { skip "pids cgroup controller is not mounted" fi PIDS_LIMIT=1234 start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" pids_limit_config=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin); obj["command"] = ["/bin/sleep", "600"]; json.dump(obj, sys.stdout)') echo "$pids_limit_config" > "$TESTDIR"/container_pids_limit.json - run crioctl ctr create --config "$TESTDIR"/container_pids_limit.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/container_pids_limit.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" cat /sys/fs/cgroup/pids/pids.max + run crictl exec --sync "$ctr_id" cat /sys/fs/cgroup/pids/pids.max echo "$output" [ "$status" -eq 0 ] [[ "$output" =~ "1234" ]] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs diff --git a/test/command.bats b/test/command.bats new file mode 100644 index 00000000..86e58f9d --- /dev/null +++ b/test/command.bats @@ -0,0 +1,12 @@ +#!/usr/bin/env bats + +load helpers + +@test "crio commands" { + run ${CRIO_BINARY} --config /dev/null config > /dev/null + echo "$output" + [ "$status" -eq 0 ] + run ${CRIO_BINARY} badoption > /dev/null + echo "$output" + [ "$status" -ne 0 ] +} diff --git a/test/ctr.bats b/test/ctr.bats index 79eae2a3..5f37c708 100644 --- a/test/ctr.bats +++ b/test/ctr.bats @@ -8,31 +8,31 @@ function teardown() { @test "ctr not found correct error message" { start_crio - run crioctl ctr status --id randomid + run crictl inspect "container_not_exist" echo "$output" [ "$status" -eq 1 ] - [[ "$output" =~ "container with ID starting with randomid not found" ]] stop_crio } @test "ctr termination reason Completed" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr status --id "$ctr_id" + run sleep 5 + run crictl inspect --output yaml "$ctr_id" echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "Reason: Completed" ]] + [[ "$output" =~ "reason: Completed" ]] cleanup_ctrs cleanup_pods @@ -41,23 +41,24 @@ function teardown() { @test "ctr termination reason Error" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" errorconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["command"] = ["false"]; json.dump(obj, sys.stdout)') echo "$errorconfig" > "$TESTDIR"/container_config_error.json - run crioctl ctr create --config "$TESTDIR"/container_config_error.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/container_config_error.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr status --id "$ctr_id" + run sleep 5 + run crictl inspect --output yaml "$ctr_id" echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "Reason: Error" ]] + [[ "$output" =~ "reason: Error" ]] cleanup_ctrs cleanup_pods @@ -66,24 +67,24 @@ function teardown() { @test "ctr remove" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr remove --id "$ctr_id" + run crictl rm "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -93,65 +94,69 @@ function teardown() { @test "ctr lifecycle" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl pod list + run crictl sandboxes --quiet echo "$output" [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + [[ "$output" == "$pod_id" ]] + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr list + run crictl ps --quiet echo "$output" [ "$status" -eq 0 ] - run crioctl ctr status --id "$ctr_id" + [[ "$output" == "$ctr_id" ]] + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr list + run crictl ps --quiet echo "$output" [ "$status" -eq 0 ] - run crioctl ctr stop --id "$ctr_id" + [[ "$output" == "$ctr_id" ]] + run crictl stop "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr list + run crictl ps --quiet echo "$output" [ "$status" -eq 0 ] - run crioctl ctr remove --id "$ctr_id" + [[ "$output" == "$ctr_id" ]] + run crictl rm "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr list + run crictl ps --quiet echo "$output" [ "$status" -eq 0 ] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod list + run crictl sandboxes --quiet echo "$output" [ "$status" -eq 0 ] - run crioctl ctr list + [[ "$output" == "$pod_id" ]] + run crictl ps --quiet echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + [[ "$output" == "" ]] + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod list - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr list + run crictl sandboxes --quiet echo "$output" [ "$status" -eq 0 ] + [[ "$output" == "" ]] cleanup_ctrs cleanup_pods stop_crio @@ -159,31 +164,28 @@ function teardown() { @test "ctr logging" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl pod list - echo "$output" - [ "$status" -eq 0 ] # Create a new container. newconfig=$(mktemp --tmpdir crio-config.XXXXXX.json) cp "$TESTDATA"/container_config_logging.json "$newconfig" sed -i 's|"%shellcommand%"|"echo here is some output \&\& echo and some from stderr >\&2"|' "$newconfig" - run crioctl ctr create --config "$newconfig" --pod "$pod_id" + run crictl create "$pod_id" "$newconfig" "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr stop --id "$ctr_id" + run crictl stop "$ctr_id" echo "$output" # Ignore errors on stop. - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" [ "$status" -eq 0 ] - run crioctl ctr remove --id "$ctr_id" + run crictl rm "$ctr_id" echo "$output" [ "$status" -eq 0 ] @@ -191,13 +193,13 @@ function teardown() { logpath="$DEFAULT_LOG_PATH/$pod_id/$ctr_id.log" [ -f "$logpath" ] echo "$logpath :: $(cat "$logpath")" - grep -E "^[^\n]+ stdout here is some output$" "$logpath" - grep -E "^[^\n]+ stderr and some from stderr$" "$logpath" + grep -E "^[^\n]+ stdout F here is some output$" "$logpath" + grep -E "^[^\n]+ stderr F and some from stderr$" "$logpath" - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] @@ -208,32 +210,29 @@ function teardown() { @test "ctr logging [tty=true]" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl pod list - echo "$output" - [ "$status" -eq 0 ] # Create a new container. newconfig=$(mktemp --tmpdir crio-config.XXXXXX.json) cp "$TESTDATA"/container_config_logging.json "$newconfig" sed -i 's|"%shellcommand%"|"echo here is some output"|' "$newconfig" sed -i 's|"tty": false,|"tty": true,|' "$newconfig" - run crioctl ctr create --config "$newconfig" --pod "$pod_id" + run crictl create "$pod_id" "$newconfig" "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr stop --id "$ctr_id" + run crictl stop "$ctr_id" echo "$output" # Ignore errors on stop. - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" [ "$status" -eq 0 ] - run crioctl ctr remove --id "$ctr_id" + run crictl rm "$ctr_id" echo "$output" [ "$status" -eq 0 ] @@ -241,12 +240,12 @@ function teardown() { logpath="$DEFAULT_LOG_PATH/$pod_id/$ctr_id.log" [ -f "$logpath" ] echo "$logpath :: $(cat "$logpath")" - grep --binary -P "^[^\n]+ stdout here is some output\x0d$" "$logpath" + grep --binary -P "^[^\n]+ stdout F here is some output\x0d$" "$logpath" - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] @@ -257,29 +256,26 @@ function teardown() { @test "ctr log max" { LOG_SIZE_MAX_LIMIT=10000 start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl pod list - echo "$output" - [ "$status" -eq 0 ] # Create a new container. newconfig=$(mktemp --tmpdir crio-config.XXXXXX.json) cp "$TESTDATA"/container_config_logging.json "$newconfig" sed -i 's|"%shellcommand%"|"for i in $(seq 250); do echo $i; done"|' "$newconfig" - run crioctl ctr create --config "$newconfig" --pod "$pod_id" + run crictl create "$pod_id" "$newconfig" "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] sleep 6 - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" [ "$status" -eq 0 ] - run crioctl ctr remove --id "$ctr_id" + run crictl rm "$ctr_id" echo "$output" [ "$status" -eq 0 ] @@ -290,10 +286,55 @@ function teardown() { len=$(wc -l "$logpath" | awk '{print $1}') [ $len -lt 250 ] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" + echo "$output" + [ "$status" -eq 0 ] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "ctr partial line logging" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + + # Create a new container. + newconfig=$(mktemp --tmpdir crio-config.XXXXXX.json) + cp "$TESTDATA"/container_config_logging.json "$newconfig" + sed -i 's|"%shellcommand%"|"echo -n hello"|' "$newconfig" + run crictl create "$pod_id" "$newconfig" "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl stop "$ctr_id" + echo "$output" + # Ignore errors on stop. + run crictl inspect "$ctr_id" + [ "$status" -eq 0 ] + run crictl rm "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + + # Check that the output is what we expect. + logpath="$DEFAULT_LOG_PATH/$pod_id/$ctr_id.log" + [ -f "$logpath" ] + echo "$logpath :: $(cat "$logpath")" + grep -E "^[^\n]+ stdout P hello$" "$logpath" + + run crictl stops "$pod_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] @@ -305,22 +346,24 @@ function teardown() { # regression test for #127 @test "ctrs status for a pod" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] + ctr_id="$output" - run crioctl ctr list --quiet + run crictl ps --quiet echo "$output" [ "$status" -eq 0 ] - [[ "${output}" != "" ]] + [[ "$output" != "" ]] + [[ "$output" == "$ctr_id" ]] printf '%s\n' "$output" | while IFS= read -r id do - run crioctl ctr status --id "$id" + run crictl inspect "$id" echo "$output" [ "$status" -eq 0 ] done @@ -331,105 +374,101 @@ function teardown() { } @test "ctr list filtering" { + # start 3 redis sandbox + # pod1 ctr1 create & start + # pod2 ctr2 create + # pod3 ctr3 create & start & stop start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json --name pod1 + run crictl runs "$TESTDATA"/sandbox1_config.json echo "$output" [ "$status" -eq 0 ] pod1_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod1_id" + run crictl create "$pod1_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox1_config.json echo "$output" [ "$status" -eq 0 ] ctr1_id="$output" - run crioctl ctr start --id "$ctr1_id" + run crictl start "$ctr1_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json --name pod2 + run crictl runs "$TESTDATA"/sandbox2_config.json echo "$output" [ "$status" -eq 0 ] pod2_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod2_id" + run crictl create "$pod2_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox2_config.json echo "$output" [ "$status" -eq 0 ] ctr2_id="$output" - run crioctl pod run --config "$TESTDATA"/sandbox_config.json --name pod3 + run crictl runs "$TESTDATA"/sandbox3_config.json echo "$output" [ "$status" -eq 0 ] pod3_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod3_id" + run crictl create "$pod3_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox3_config.json echo "$output" [ "$status" -eq 0 ] ctr3_id="$output" - run crioctl ctr start --id "$ctr3_id" + run crictl start "$ctr3_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr stop --id "$ctr3_id" + run crictl stop "$ctr3_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr list --id "$ctr1_id" --quiet + + run crictl ps --id "$ctr1_id" --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr1_id" ]] - run crioctl ctr list --id "${ctr1_id:0:4}" --quiet + [[ "$output" == "$ctr1_id" ]] + run crictl ps --id "${ctr1_id:0:4}" --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr1_id" ]] - run crioctl ctr list --id "$ctr2_id" --pod "$pod2_id" --quiet + [[ "$output" == "$ctr1_id" ]] + run crictl ps --id "$ctr2_id" --sandbox "$pod2_id" --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr2_id" ]] - run crioctl ctr list --id "$ctr2_id" --pod "$pod3_id" --quiet + [[ "$output" == "$ctr2_id" ]] + run crictl ps --id "$ctr2_id" --sandbox "$pod3_id" --quiet echo "$output" [ "$status" -eq 0 ] [[ "$output" == "" ]] - run crioctl ctr list --state created --quiet + run crictl ps --state created --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr2_id" ]] - run crioctl ctr list --state running --quiet + [[ "$output" == "$ctr2_id" ]] + run crictl ps --state running --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr1_id" ]] - run crioctl ctr list --state stopped --quiet + [[ "$output" == "$ctr1_id" ]] + run crictl ps --state stopped --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr3_id" ]] - run crioctl ctr list --pod "$pod1_id" --quiet + [[ "$output" == "$ctr3_id" ]] + run crictl ps --sandbox "$pod1_id" --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr1_id" ]] - run crioctl ctr list --pod "$pod2_id" --quiet + [[ "$output" == "$ctr1_id" ]] + run crictl ps --sandbox "$pod2_id" --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr2_id" ]] - run crioctl ctr list --pod "$pod3_id" --quiet + [[ "$output" == "$ctr2_id" ]] + run crictl ps --sandbox "$pod3_id" --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr3_id" ]] - run crioctl pod stop --id "$pod1_id" + [[ "$output" == "$ctr3_id" ]] + run crictl stops "$pod1_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod1_id" + run crictl rms "$pod1_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod stop --id "$pod2_id" + run crictl stops "$pod2_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod2_id" + run crictl rms "$pod2_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod stop --id "$pod3_id" + run crictl stops "$pod3_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod3_id" + run crictl rms "$pod3_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -438,49 +477,64 @@ function teardown() { } @test "ctr list label filtering" { + # start a pod with 3 containers + # ctr1 with labels: group=test container=redis version=v1.0.0 + # ctr2 with labels: group=test container=redis version=v1.0.0 + # ctr3 with labels: group=test container=redis version=v1.1.0 start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" --name ctr1 --label "a=b" --label "c=d" --label "e=f" + + ctrconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["metadata"]["name"] = "ctr1";obj["labels"]["group"] = "test";obj["labels"]["name"] = "ctr1";obj["labels"]["version"] = "v1.0.0"; json.dump(obj, sys.stdout)') + echo "$ctrconfig" > "$TESTDATA"/labeled_container_redis.json + run crictl create "$pod_id" "$TESTDATA"/labeled_container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr1_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" --name ctr2 --label "a=b" --label "c=d" + + ctrconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["metadata"]["name"] = "ctr2";obj["labels"]["group"] = "test";obj["labels"]["name"] = "ctr2";obj["labels"]["version"] = "v1.0.0"; json.dump(obj, sys.stdout)') + echo "$ctrconfig" > "$TESTDATA"/labeled_container_redis.json + run crictl create "$pod_id" "$TESTDATA"/labeled_container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr2_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" --name ctr3 --label "a=b" + + ctrconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["metadata"]["name"] = "ctr3";obj["labels"]["group"] = "test";obj["labels"]["name"] = "ctr3";obj["labels"]["version"] = "v1.1.0"; json.dump(obj, sys.stdout)') + echo "$ctrconfig" > "$TESTDATA"/labeled_container_redis.json + run crictl create "$pod_id" "$TESTDATA"/labeled_container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr3_id="$output" - run crioctl ctr list --label "tier=backend" --label "a=b" --label "c=d" --label "e=f" --quiet + + run crictl ps --label "group=test" --label "name=ctr1" --label "version=v1.0.0" --quiet echo "$output" [ "$status" -eq 0 ] - [[ "$output" != "" ]] - [[ "$output" =~ "$ctr1_id" ]] - run crioctl ctr list --label "tier=frontend" --quiet + [[ "$output" == "$ctr1_id" ]] + run crictl ps --label "group=production" --quiet echo "$output" [ "$status" -eq 0 ] [[ "$output" == "" ]] - run crioctl ctr list --label "a=b" --label "c=d" --quiet + run crictl ps --label "group=test" --label "version=v1.0.0" --quiet echo "$output" [ "$status" -eq 0 ] [[ "$output" != "" ]] - [[ "$output" =~ "$ctr1_id" ]] - [[ "$output" =~ "$ctr2_id" ]] - run crioctl ctr list --label "a=b" --quiet + [[ "$output" =~ "$ctr1_id" ]] + [[ "$output" =~ "$ctr2_id" ]] + [[ "$output" != "$ctr3_id" ]] + run crictl ps --label "group=test" --quiet echo "$output" [ "$status" -eq 0 ] [[ "$output" != "" ]] [[ "$output" =~ "$ctr1_id" ]] [[ "$output" =~ "$ctr2_id" ]] [[ "$output" =~ "$ctr3_id" ]] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -490,23 +544,23 @@ function teardown() { @test "ctr metadata in list & status" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr list --id "$ctr_id" + run crictl ps --id "$ctr_id" --output yaml echo "$output" [ "$status" -eq 0 ] # TODO: expected value should not hard coded here - [[ "$output" =~ "Name: container1" ]] - [[ "$output" =~ "Attempt: 1" ]] + [[ "$output" =~ "name: container1" ]] + [[ "$output" =~ "attempt: 1" ]] - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] # TODO: expected value should not hard coded here @@ -520,21 +574,21 @@ function teardown() { @test "ctr execsync conflicting with conmon flags parsing" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" sh -c "echo hello world" + run crictl exec --sync "$ctr_id" sh -c "echo hello world" echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "hello world" ]] + [[ "$output" == "hello world" ]] cleanup_ctrs cleanup_pods stop_crio @@ -542,28 +596,29 @@ function teardown() { @test "ctr execsync" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" echo HELLO + run crictl exec --sync "$ctr_id" echo HELLO echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "HELLO" ]] - run crioctl ctr execsync --id "$ctr_id" --timeout 1 sleep 10 + [[ "$output" == "HELLO" ]] + run crictl exec --sync --timeout 1 "$ctr_id" sleep 3 echo "$output" [[ "$output" =~ "command timed out" ]] - run crioctl pod stop --id "$pod_id" + [ "$status" -ne 0 ] + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -573,25 +628,50 @@ function teardown() { @test "ctr device add" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis_device.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis_device.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" ls /dev/mynull + run crictl exec --sync "$ctr_id" ls /dev/mynull echo "$output" [ "$status" -eq 0 ] [[ "$output" =~ "/dev/mynull" ]] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" + echo "$output" + [ "$status" -eq 0 ] + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "ctr hostname env" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDATA"/container_config.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl exec --sync "$ctr_id" env + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "HOSTNAME" ]] + run crictl stops "$pod_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -601,18 +681,18 @@ function teardown() { @test "ctr execsync failure" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" doesnotexist + run crictl exec --sync "$ctr_id" doesnotexist echo "$output" [ "$status" -ne 0 ] @@ -623,18 +703,18 @@ function teardown() { @test "ctr execsync exit code" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" false + run crictl exec --sync "$ctr_id" false echo "$output" [ "$status" -eq 0 ] [[ "$output" =~ "Exit code: 1" ]] @@ -645,39 +725,39 @@ function teardown() { @test "ctr execsync std{out,err}" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" echo hello0 stdout + run crictl exec --sync "$ctr_id" echo hello0 stdout echo "$output" [ "$status" -eq 0 ] - [[ "$output" == *"$(printf "Stdout:\nhello0 stdout")"* ]] + [[ "$output" =~ "hello0 stdout" ]] stderrconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["image"]["image"] = "runcom/stderr-test"; obj["command"] = ["/bin/sleep", "600"]; json.dump(obj, sys.stdout)') echo "$stderrconfig" > "$TESTDIR"/container_config_stderr.json - run crioctl ctr create --config "$TESTDIR"/container_config_stderr.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/container_config_stderr.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" stderr + run crictl exec --sync "$ctr_id" stderr echo "$output" [ "$status" -eq 0 ] - [[ "$output" == *"$(printf "Stderr:\nthis goes to stderr")"* ]] - run crioctl pod stop --id "$pod_id" + [[ "$output" =~ "this goes to stderr" ]] + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -687,21 +767,21 @@ function teardown() { @test "ctr stop idempotent" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr stop --id "$ctr_id" + run crictl stop "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr stop --id "$ctr_id" + run crictl stop "$ctr_id" echo "$output" [ "$status" -eq 0 ] @@ -712,13 +792,13 @@ function teardown() { @test "ctr caps drop" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" capsconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["linux"]["security_context"]["capabilities"] = {u"add_capabilities": [], u"drop_capabilities": [u"mknod", u"kill", u"sys_chroot", u"setuid", u"setgid"]}; json.dump(obj, sys.stdout)') echo "$capsconfig" > "$TESTDIR"/container_config_caps.json - run crioctl ctr create --config "$TESTDIR"/container_config_caps.json --pod "$pod_id" + run crictl create "$TESTDIR"/container_config_caps.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] @@ -729,16 +809,16 @@ function teardown() { @test "run ctr with image with Config.Volumes" { start_crio - run crioctl image pull gcr.io/k8s-testimages/redis:e2e + run crictl pull gcr.io/k8s-testimages/redis:e2e echo "$output" [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" volumesconfig=$(cat "$TESTDATA"/container_redis.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["image"]["image"] = "gcr.io/k8s-testimages/redis:e2e"; obj["args"] = []; json.dump(obj, sys.stdout)') echo "$volumesconfig" > "$TESTDIR"/container_config_volumes.json - run crioctl ctr create --config "$TESTDIR"/container_config_volumes.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/container_config_volumes.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] @@ -752,29 +832,36 @@ function teardown() { skip "travis container tests don't support testing OOM" fi start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" oomconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["image"]["image"] = "mrunalp/oom"; obj["linux"]["resources"]["memory_limit_in_bytes"] = 5120000; obj["command"] = ["/oom"]; json.dump(obj, sys.stdout)') echo "$oomconfig" > "$TESTDIR"/container_config_oom.json - run crioctl ctr create --config "$TESTDIR"/container_config_oom.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/container_config_oom.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] # Wait for container to OOM - run sleep 100 - run crioctl ctr status --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] + attempt=0 + while [ $attempt -le 100 ]; do + attempt=$((attempt+1)) + run crictl inspect --output yaml "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + if [[ "$output" =~ "OOMKilled" ]]; then + break + fi + sleep 10 + done [[ "$output" =~ "OOMKilled" ]] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -784,37 +871,34 @@ function teardown() { @test "ctr /etc/resolv.conf rw/ro mode" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config_resolvconf.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config_resolvconf.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "Status: CONTAINER_EXITED" ]] + [[ "$output" =~ "State: CONTAINER_EXITED" ]] [[ "$output" =~ "Exit Code: 0" ]] - [[ "$output" =~ "Reason: Completed" ]] - run crioctl ctr create --name roctr --config "$TESTDATA"/container_config_resolvconf_ro.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config_resolvconf_ro.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "Status: CONTAINER_EXITED" ]] - [[ "$output" =~ "Exit Code: 1" ]] - [[ "$output" =~ "Reason: Error" ]] + [[ "$output" =~ "State: CONTAINER_EXITED" ]] cleanup_ctrs cleanup_pods @@ -823,19 +907,19 @@ function teardown() { @test "ctr create with non-existent command" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" newconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["command"] = ["nonexistent"]; json.dump(obj, sys.stdout)') echo "$newconfig" > "$TESTDIR"/container_nonexistent.json - run crioctl ctr create --config "$TESTDIR"/container_nonexistent.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/container_nonexistent.json "$TESTDATA"/sandbox_config.json [ "$status" -ne 0 ] [[ "$output" =~ "executable file not found" ]] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -845,22 +929,163 @@ function teardown() { @test "ctr create with non-existent command [tty]" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" newconfig=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["command"] = ["nonexistent"]; obj["tty"] = True; json.dump(obj, sys.stdout)') echo "$newconfig" > "$TESTDIR"/container_nonexistent.json - run crioctl ctr create --config "$TESTDIR"/container_nonexistent.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/container_nonexistent.json "$TESTDATA"/sandbox_config.json [ "$status" -ne 0 ] [[ "$output" =~ "executable file not found" ]] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs cleanup_pods stop_crio } + +@test "ctr update resources" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/memory/memory.limit_in_bytes" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "209715200" ]] + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpu/cpu.shares" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "512" ]] + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpu/cpu.cfs_period_us" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "10000" ]] + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "20000" ]] + + run crictl update --memory 524288000 --cpu-period 20000 --cpu-quota 10000 --cpu-share 256 "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/memory/memory.limit_in_bytes" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "524288000" ]] + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpu/cpu.shares" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "256" ]] + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpu/cpu.cfs_period_us" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "20000" ]] + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "10000" ]] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "ctr correctly setup working directory" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + notexistcwd=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["working_dir"] = "/thisshouldntexistatall"; json.dump(obj, sys.stdout)') + echo "$notexistcwd" > "$TESTDIR"/container_cwd_notexist.json + run crictl create "$pod_id" "$TESTDIR"/container_cwd_notexist.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + + filecwd=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["working_dir"] = "/etc/passwd"; obj["metadata"]["name"] = "container2"; json.dump(obj, sys.stdout)') + echo "$filecwd" > "$TESTDIR"/container_cwd_file.json + run crictl create "$pod_id" "$TESTDIR"/container_cwd_file.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -ne 0 ] + ctr_id="$output" + [[ "$output" =~ "not a directory" ]] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "ctr execsync conflicting with conmon env" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDATA"/container_redis_env_custom.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl exec "$ctr_id" env + echo "$output" + echo "$status" + [ "$status" -eq 0 ] + [[ "$output" =~ "acustompathinpath" ]] + run crictl exec --sync "$ctr_id" env + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "acustompathinpath" ]] + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "ctr resources" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpuset/cpuset.cpus" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "0-1" ]] + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpuset/cpuset.mems" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "0" ]] + + cleanup_ctrs + cleanup_pods + stop_crio +} diff --git a/test/ctr_seccomp.bats b/test/ctr_seccomp.bats new file mode 100644 index 00000000..2dae19d1 --- /dev/null +++ b/test/ctr_seccomp.bats @@ -0,0 +1,221 @@ +#!/usr/bin/env bats + +load helpers + +function teardown() { + cleanup_test +} + +# 1. test running with ctr unconfined +# test that we can run with a syscall which would be otherwise blocked +@test "ctr seccomp profiles unconfined" { + # this test requires seccomp, so skip this test if seccomp is not enabled. + enabled=$(is_seccomp_enabled) + if [[ "$enabled" -eq 0 ]]; then + skip "skip this test since seccomp is not enabled." + fi + + sed -e 's/"chmod",//' "$SECCOMP_PROFILE" > "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json + + start_crio "$TESTDIR"/seccomp_profile1.json + + sed -e 's/%VALUE%/unconfined/g' "$TESTDATA"/container_config_seccomp.json > "$TESTDIR"/seccomp1.json + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDIR"/seccomp1.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl exec --sync "$ctr_id" chmod 777 . + echo "$output" + [ "$status" -eq 0 ] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +# 2. test running with ctr runtime/default +# test that we cannot run with a syscall blocked by the default seccomp profile +@test "ctr seccomp profiles runtime/default" { + # this test requires seccomp, so skip this test if seccomp is not enabled. + enabled=$(is_seccomp_enabled) + if [[ "$enabled" -eq 0 ]]; then + skip "skip this test since seccomp is not enabled." + fi + + sed -e 's/"chmod",//' "$SECCOMP_PROFILE" > "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json + + start_crio "$TESTDIR"/seccomp_profile1.json + + sed -e 's/%VALUE%/runtime\/default/g' "$TESTDATA"/container_config_seccomp.json > "$TESTDIR"/seccomp2.json + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$TESTDIR"/seccomp2.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl exec --sync "$ctr_id" chmod 777 . + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "Exit code: 1" ]] + [[ "$output" =~ "Operation not permitted" ]] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +# 3. test running with ctr unconfined and profile empty +# test that we can run with a syscall which would be otherwise blocked +@test "ctr seccomp profiles unconfined by empty field" { + # this test requires seccomp, so skip this test if seccomp is not enabled. + enabled=$(is_seccomp_enabled) + if [[ "$enabled" -eq 0 ]]; then + skip "skip this test since seccomp is not enabled." + fi + + sed -e 's/"chmod",//' "$SECCOMP_PROFILE" > "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json + + start_crio "$TESTDIR"/seccomp_profile1.json + + sed -e 's/%VALUE%//g' "$TESTDATA"/container_config_seccomp.json > "$TESTDIR"/seccomp1.json + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDIR"/seccomp1.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl exec --sync "$ctr_id" chmod 777 . + echo "$output" + [ "$status" -eq 0 ] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +# 4. test running with ctr wrong profile name +@test "ctr seccomp profiles wrong profile name" { + # this test requires seccomp, so skip this test if seccomp is not enabled. + enabled=$(is_seccomp_enabled) + if [[ "$enabled" -eq 0 ]]; then + skip "skip this test since seccomp is not enabled." + fi + + sed -e 's/"chmod",//' "$SECCOMP_PROFILE" > "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json + + start_crio "$TESTDIR"/seccomp_profile1.json + + sed -e 's/%VALUE%/wontwork/g' "$TESTDATA"/container_config_seccomp.json > "$TESTDIR"/seccomp1.json + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDIR"/seccomp1.json "$TESTDATA"/sandbox_config.json + echo "$output" + [[ "$status" -ne 0 ]] + [[ "$output" =~ "unknown seccomp profile option:" ]] + [[ "$output" =~ "wontwork" ]] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +# 5. test running with ctr localhost/profile_name +@test "ctr seccomp profiles localhost/profile_name" { + # this test requires seccomp, so skip this test if seccomp is not enabled. + enabled=$(is_seccomp_enabled) + if [[ "$enabled" -eq 0 ]]; then + skip "skip this test since seccomp is not enabled." + fi + + start_crio + + sed -e 's/"chmod",//' "$SECCOMP_PROFILE" > "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json + + sed -e 's@%VALUE%@localhost/'"$TESTDIR"'/seccomp_profile1.json@g' "$TESTDATA"/container_config_seccomp.json > "$TESTDIR"/seccomp1.json + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDIR"/seccomp1.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl exec --sync "$ctr_id" chmod 777 . + [ "$status" -eq 0 ] + [[ "$output" =~ "Exit code: 1" ]] + [[ "$output" =~ "Operation not permitted" ]] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +# 6. test running with ctr docker/default +# test that we cannot run with a syscall blocked by the default seccomp profile +@test "ctr seccomp profiles runtime/default" { + # this test requires seccomp, so skip this test if seccomp is not enabled. + enabled=$(is_seccomp_enabled) + if [[ "$enabled" -eq 0 ]]; then + skip "skip this test since seccomp is not enabled." + fi + + sed -e 's/"chmod",//' "$SECCOMP_PROFILE" > "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json + sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json + + start_crio "$TESTDIR"/seccomp_profile1.json + + sed -e 's/%VALUE%/docker\/default/g' "$TESTDATA"/container_config_seccomp.json > "$TESTDIR"/seccomp2.json + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDIR"/seccomp2.json "$TESTDIR"/seccomp_profile1.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl exec --sync "$ctr_id" chmod 777 . + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "Exit code: 1" ]] + [[ "$output" =~ "Operation not permitted" ]] + + cleanup_ctrs + cleanup_pods + stop_crio +} diff --git a/test/default_mounts.bats b/test/default_mounts.bats new file mode 100644 index 00000000..711aa40c --- /dev/null +++ b/test/default_mounts.bats @@ -0,0 +1,69 @@ +#!/usr/bin/env bats + +load helpers + +IMAGE="redis:alpine" + +function teardown() { + cleanup_test +} + +@test "bind secrets mounts to container" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl pull "$IMAGE" + [ "$status" -eq 0 ] + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl exec --sync "$ctr_id" cat /proc/mounts + echo "$output" + [ "$status" -eq 0 ] + mount_info="$output" + run grep /container/path1 <<< "$mount_info" + echo "$output" + [ "$status" -eq 0 ] + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "default mounts correctly sorted with other mounts" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl pull "$IMAGE" + [ "$status" -eq 0 ] + host_path="$TESTDIR"/clash + mkdir "$host_path" + echo "clashing..." > "$host_path"/clashing.txt + sed -e "s,%HPATH%,$host_path,g" "$TESTDATA"/container_redis_default_mounts.json > "$TESTDIR"/defmounts_pre.json + sed -e 's,%CPATH%,\/container\/path1\/clash,g' "$TESTDIR"/defmounts_pre.json > "$TESTDIR"/defmounts.json + run crictl create "$pod_id" "$TESTDIR"/defmounts.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl exec --sync "$ctr_id" ls -la /container/path1/clash + echo "$output" + [ "$status" -eq 0 ] + run crictl exec --sync "$ctr_id" cat /container/path1/clash/clashing.txt + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "clashing..." ]] + run crictl exec --sync "$ctr_id" ls -la /container/path1 + echo "$output" + [ "$status" -eq 0 ] + run crictl exec --sync "$ctr_id" cat /container/path1/test.txt + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "Testing secrets mounts!" ]] + cleanup_ctrs + cleanup_pods + stop_crio +} diff --git a/test/helpers.bash b/test/helpers.bash index ac30f22d..a0c715e1 100644 --- a/test/helpers.bash +++ b/test/helpers.bash @@ -10,16 +10,14 @@ TESTDATA="${INTEGRATION_ROOT}/testdata" CRIO_ROOT=${CRIO_ROOT:-$(cd "$INTEGRATION_ROOT/../.."; pwd -P)} # Path of the crio binary. -CRIO_BINARY=${CRIO_BINARY:-${CRIO_ROOT}/cri-o/crio} +CRIO_BINARY=${CRIO_BINARY:-${CRIO_ROOT}/cri-o/bin/crio} # Path of the crictl binary. CRICTL_PATH=$(command -v crictl || true) CRICTL_BINARY=${CRICTL_PATH:-/usr/bin/crictl} -# Path to kpod binary. -KPOD_BINARY=${KPOD_BINARY:-${CRIO_ROOT}/cri-o/kpod} # Path of the conmon binary. -CONMON_BINARY=${CONMON_BINARY:-${CRIO_ROOT}/cri-o/conmon/conmon} +CONMON_BINARY=${CONMON_BINARY:-${CRIO_ROOT}/cri-o/bin/conmon} # Path of the pause binary. -PAUSE_BINARY=${PAUSE_BINARY:-${CRIO_ROOT}/cri-o/pause/pause} +PAUSE_BINARY=${PAUSE_BINARY:-${CRIO_ROOT}/cri-o/bin/pause} # Path of the default seccomp profile. SECCOMP_PROFILE=${SECCOMP_PROFILE:-${CRIO_ROOT}/cri-o/seccomp.json} # Name of the default apparmor profile. @@ -58,6 +56,8 @@ IMAGE_VOLUMES=${IMAGE_VOLUMES:-mkdir} PIDS_LIMIT=${PIDS_LIMIT:-1024} # Log size max limit LOG_SIZE_MAX_LIMIT=${LOG_SIZE_MAX_LIMIT:--1} +# enable share container pid namespace +ENABLE_SHARED_PID_NAMESPACE=${ENABLE_SHARED_PID_NAMESPACE:-false} TESTDIR=$(mktemp -d) @@ -69,12 +69,21 @@ HOOKSDIR=$TESTDIR/hooks mkdir ${HOOKSDIR} HOOKS_OPTS="--hooks-dir-path=$HOOKSDIR" +# Setup default secrets mounts +MOUNT_PATH="$TESTDIR/secrets" +mkdir ${MOUNT_PATH} +MOUNT_FILE="${MOUNT_PATH}/test.txt" +touch ${MOUNT_FILE} +echo "Testing secrets mounts!" > ${MOUNT_FILE} + +DEFAULT_MOUNTS_OPTS="--default-mounts=${MOUNT_PATH}:/container/path1" + # We may need to set some default storage options. case "$(stat -f -c %T ${TESTDIR})" in aufs) # None of device mapper, overlay, or aufs can be used dependably over aufs, and of course btrfs and zfs can't, # and we have to explicitly specify the "vfs" driver in order to use it, so do that now. - STORAGE_OPTS=${STORAGE_OPTS:---storage-driver vfs} + STORAGE_OPTIONS=${STORAGE_OPTIONS:---storage-driver vfs} ;; esac @@ -90,13 +99,11 @@ CRIO_CNI_PLUGIN=${CRIO_CNI_PLUGIN:-/opt/cni/bin/} POD_CIDR="10.88.0.0/16" POD_CIDR_MASK="10.88.*.*" -KPOD_OPTIONS="--root $TESTDIR/crio $STORAGE_OPTS --runroot $TESTDIR/crio-run --runtime ${RUNTIME_BINARY}" - cp "$CONMON_BINARY" "$TESTDIR/conmon" PATH=$PATH:$TESTDIR -# Make sure we have a copy of the redis:latest image. +# Make sure we have a copy of the redis:alpine image. if ! [ -d "$ARTIFACTS_PATH"/redis-image ]; then mkdir -p "$ARTIFACTS_PATH"/redis-image if ! "$COPYIMG_BINARY" --import-from=docker://redis:alpine --export-to=dir:"$ARTIFACTS_PATH"/redis-image --signature-policy="$INTEGRATION_ROOT"/policy.json ; then @@ -106,19 +113,6 @@ if ! [ -d "$ARTIFACTS_PATH"/redis-image ]; then fi fi -# TODO: remove the code below for redis digested image id when -# https://github.com/kubernetes-incubator/cri-o/issues/531 is complete -# as the digested reference will be auto-stored when pulling the tag -# above -if ! [ -d "$ARTIFACTS_PATH"/redis-image-digest ]; then - mkdir -p "$ARTIFACTS_PATH"/redis-image-digest - if ! "$COPYIMG_BINARY" --import-from=docker://redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b --export-to=dir:"$ARTIFACTS_PATH"/redis-image-digest --signature-policy="$INTEGRATION_ROOT"/policy.json ; then - echo "Error pulling docker://redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b" - rm -fr "$ARTIFACTS_PATH"/redis-image-digest - exit 1 - fi -fi - # Make sure we have a copy of the runcom/stderr-test image. if ! [ -d "$ARTIFACTS_PATH"/stderr-test ]; then mkdir -p "$ARTIFACTS_PATH"/stderr-test @@ -164,13 +158,6 @@ function crio() { "$CRIO_BINARY" --listen "$CRIO_SOCKET" "$@" } -# DEPRECATED -OCIC_BINARY=${OCIC_BINARY:-${CRIO_ROOT}/cri-o/crioctl} -# Run crioctl using the binary specified by $OCIC_BINARY. -function crioctl() { - "$OCIC_BINARY" --connect "$CRIO_SOCKET" "$@" -} - # Run crictl using the binary specified by $CRICTL_BINARY. function crictl() { "$CRICTL_BINARY" -r "$CRIO_SOCKET" -i "$CRIO_SOCKET" "$@" @@ -202,9 +189,9 @@ function retry() { false } -# Waits until the given crio becomes reachable. +# Waits until crio becomes reachable. function wait_until_reachable() { - retry 15 1 crictl status + retry 15 1 crictl info } # Start crio. @@ -223,19 +210,14 @@ function start_crio() { # Don't forget: bin2img, copyimg, and crio have their own default drivers, so if you override any, you probably need to override them all if ! [ "$3" = "--no-pause-image" ] ; then - "$BIN2IMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --source-binary "$PAUSE_BINARY" + "$BIN2IMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --source-binary "$PAUSE_BINARY" fi - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --image-name=redis:alpine --import-from=dir:"$ARTIFACTS_PATH"/redis-image --add-name=docker.io/library/redis:alpine --signature-policy="$INTEGRATION_ROOT"/policy.json -# TODO: remove the code below for redis:alpine digested image id when -# https://github.com/kubernetes-incubator/cri-o/issues/531 is complete -# as the digested reference will be auto-stored when pulling the tag -# above - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --image-name=redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b --import-from=dir:"$ARTIFACTS_PATH"/redis-image-digest --add-name=docker.io/library/redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b --signature-policy="$INTEGRATION_ROOT"/policy.json - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --image-name=mrunalp/oom --import-from=dir:"$ARTIFACTS_PATH"/oom-image --add-name=docker.io/library/mrunalp/oom --signature-policy="$INTEGRATION_ROOT"/policy.json - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --image-name=mrunalp/image-volume-test --import-from=dir:"$ARTIFACTS_PATH"/image-volume-test-image --add-name=docker.io/library/mrunalp/image-volume-test --signature-policy="$INTEGRATION_ROOT"/policy.json - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --image-name=busybox:latest --import-from=dir:"$ARTIFACTS_PATH"/busybox-image --add-name=docker.io/library/busybox:latest --signature-policy="$INTEGRATION_ROOT"/policy.json - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --image-name=runcom/stderr-test:latest --import-from=dir:"$ARTIFACTS_PATH"/stderr-test --add-name=docker.io/runcom/stderr-test:latest --signature-policy="$INTEGRATION_ROOT"/policy.json - "$CRIO_BINARY" ${HOOKS_OPTS} --conmon "$CONMON_BINARY" --listen "$CRIO_SOCKET" --cgroup-manager "$CGROUP_MANAGER" --registry "docker.io" --runtime "$RUNTIME_BINARY" --root "$TESTDIR/crio" --runroot "$TESTDIR/crio-run" $STORAGE_OPTS --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$CRIO_CNI_CONFIG" --cni-plugin-dir "$CRIO_CNI_PLUGIN" --signature-policy "$INTEGRATION_ROOT"/policy.json --image-volumes "$IMAGE_VOLUMES" --pids-limit "$PIDS_LIMIT" --log-size-max "$LOG_SIZE_MAX_LIMIT" --config /dev/null config >$CRIO_CONFIG + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/library/redis:alpine --import-from=dir:"$ARTIFACTS_PATH"/redis-image --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/mrunalp/oom:latest --import-from=dir:"$ARTIFACTS_PATH"/oom-image --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/mrunalp/image-volume-test:latest --import-from=dir:"$ARTIFACTS_PATH"/image-volume-test-image --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/library/busybox:latest --import-from=dir:"$ARTIFACTS_PATH"/busybox-image --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/runcom/stderr-test:latest --import-from=dir:"$ARTIFACTS_PATH"/stderr-test --signature-policy="$INTEGRATION_ROOT"/policy.json + "$CRIO_BINARY" ${DEFAULT_MOUNTS_OPTS} ${HOOKS_OPTS} --conmon "$CONMON_BINARY" --listen "$CRIO_SOCKET" --cgroup-manager "$CGROUP_MANAGER" --registry "docker.io" --runtime "$RUNTIME_BINARY" --root "$TESTDIR/crio" --runroot "$TESTDIR/crio-run" $STORAGE_OPTIONS --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$CRIO_CNI_CONFIG" --cni-plugin-dir "$CRIO_CNI_PLUGIN" --signature-policy "$INTEGRATION_ROOT"/policy.json --image-volumes "$IMAGE_VOLUMES" --pids-limit "$PIDS_LIMIT" --enable-shared-pid-namespace=${ENABLE_SHARED_PID_NAMESPACE} --log-size-max "$LOG_SIZE_MAX_LIMIT" --config /dev/null config >$CRIO_CONFIG # Prepare the CNI configuration files, we're running with non host networking by default if [[ -n "$4" ]]; then @@ -252,49 +234,33 @@ function start_crio() { if [ "$status" -ne 0 ] ; then crictl pull redis:alpine fi - REDIS_IMAGEID=$(crictl inspecti redis:alpine | head -1 | sed -e "s/ID: //g") + REDIS_IMAGEID=$(crictl inspecti redis:alpine | grep ^ID: | head -n 1 | sed -e "s/ID: //g") + REDIS_IMAGEREF=$(crictl inspecti redis:alpine | grep ^Digest: | head -n 1 | sed -e "s/Digest: //g") run crictl inspecti mrunalp/oom if [ "$status" -ne 0 ] ; then crictl pull mrunalp/oom fi - # - # - # - # TODO: remove the code below for redis digested image id when - # https://github.com/kubernetes-incubator/cri-o/issues/531 is complete - # as the digested reference will be auto-stored when pulling the tag - # above - # - # - # - REDIS_IMAGEID_DIGESTED="redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b" - run crictl inspecti $REDIS_IMAGEID_DIGESTED - if [ "$status" -ne 0 ]; then - crictl pull $REDIS_IMAGEID_DIGESTED - fi - # - # - # - run crictl inspecti runcom/stderr-test + OOM_IMAGEID=$(crictl inspecti mrunalp/oom | grep ^ID: | head -n 1 | sed -e "s/ID: //g") + run crioctl image status --id=runcom/stderr-test if [ "$status" -ne 0 ] ; then crictl pull runcom/stderr-test:latest fi - STDERR_IMAGEID=$(crictl inspecti runcom/stderr-test | head -1 | sed -e "s/ID: //g") + STDERR_IMAGEID=$(crictl inspecti runcom/stderr-test | grep ^ID: | head -n 1 | sed -e "s/ID: //g") run crictl inspecti busybox if [ "$status" -ne 0 ] ; then crictl pull busybox:latest fi - BUSYBOX_IMAGEID=$(crictl inspecti busybox | head -1 | sed -e "s/ID: //g") + BUSYBOX_IMAGEID=$(crictl inspecti busybox | grep ^ID: | head -n 1 | sed -e "s/ID: //g") run crictl inspecti mrunalp/image-volume-test if [ "$status" -ne 0 ] ; then crictl pull mrunalp/image-volume-test:latest fi - VOLUME_IMAGEID=$(crictl inspecti mrunalp/image-volume-test | head -1 | sed -e "s/ID: //g") + VOLUME_IMAGEID=$(crictl inspecti mrunalp/image-volume-test | grep ^ID: | head -n 1 | sed -e "s/ID: //g") } function cleanup_ctrs() { - run crictl ps --quiet - if [ "$status" -eq 0 ]; then + output=$(crictl ps --quiet) + if [ $? -eq 0 ]; then if [ "$output" != "" ]; then printf '%s\n' "$output" | while IFS= read -r line do @@ -307,8 +273,8 @@ function cleanup_ctrs() { } function cleanup_images() { - run crictl images --quiet - if [ "$status" -eq 0 ]; then + output=$(crictl images --quiet) + if [ $? -eq 0 ]; then if [ "$output" != "" ]; then printf '%s\n' "$output" | while IFS= read -r line do @@ -319,8 +285,8 @@ function cleanup_images() { } function cleanup_pods() { - run crictl sandboxes --quiet - if [ "$status" -eq 0 ]; then + output=$(crictl sandboxes --quiet) + if [ $? -eq 0 ]; then if [ "$output" != "" ]; then printf '%s\n' "$output" | while IFS= read -r line do @@ -439,7 +405,7 @@ EOF } function check_pod_cidr() { - run crioctl ctr execsync --id $1 ip addr show dev eth0 scope global 2>&1 + run crictl exec --sync $1 ip addr show dev eth0 scope global 2>&1 echo "$output" [ "$status" -eq 0 ] [[ "$output" =~ $POD_CIDR_MASK ]] @@ -463,7 +429,7 @@ function get_host_ip() { } function ping_pod() { - inet=`crioctl ctr execsync --id $1 ip addr show dev eth0 scope global 2>&1 | grep inet` + inet=`crictl exec --sync $1 ip addr show dev eth0 scope global 2>&1 | grep inet` IFS=" " ip=`parse_pod_ip $inet` @@ -474,12 +440,12 @@ function ping_pod() { } function ping_pod_from_pod() { - inet=`crioctl ctr execsync --id $1 ip addr show dev eth0 scope global 2>&1 | grep inet` + inet=`crictl exec --sync $1 ip addr show dev eth0 scope global 2>&1 | grep inet` IFS=" " ip=`parse_pod_ip $inet` - run crioctl ctr execsync --id $2 ping -W 1 -c 2 $ip + run crictl exec --sync $2 ping -W 1 -c 2 $ip echo "$output" [ "$status" -eq 0 ] } diff --git a/test/hooks.bats b/test/hooks.bats index 0c1a51ea..2e1b7ae4 100644 --- a/test/hooks.bats +++ b/test/hooks.bats @@ -10,23 +10,23 @@ cp hooks/checkhook.sh ${HOOKSDIR} sed "s|HOOKSDIR|${HOOKSDIR}|" hooks/checkhook.json > ${HOOKSDIR}/checkhook.json @test "pod test hooks" { - run rm -f /run/hookscheck + rm -f /run/hookscheck start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] run cat /run/hookscheck diff --git a/test/image.bats b/test/image.bats index e62674a7..52336641 100644 --- a/test/image.bats +++ b/test/image.bats @@ -12,12 +12,16 @@ function teardown() { @test "run container in pod with image ID" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" sed -e "s/%VALUE%/$REDIS_IMAGEID/g" "$TESTDATA"/container_config_by_imageid.json > "$TESTDIR"/ctr_by_imageid.json - run crioctl ctr create --config "$TESTDIR"/ctr_by_imageid.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/ctr_by_imageid.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -25,70 +29,115 @@ function teardown() { stop_crio } -@test "container status return image:tag if created by image ID" { +@test "container status when created by image ID" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" sed -e "s/%VALUE%/$REDIS_IMAGEID/g" "$TESTDATA"/container_config_by_imageid.json > "$TESTDIR"/ctr_by_imageid.json - run crioctl ctr create --config "$TESTDIR"/ctr_by_imageid.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/ctr_by_imageid.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" --output yaml echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "Image: redis:alpine" ]] + [[ "$output" =~ "image: docker.io/library/redis:alpine" ]] + [[ "$output" =~ "imageRef: $REDIS_IMAGEREF" ]] cleanup_ctrs cleanup_pods stop_crio } -@test "container status return image@digest if created by image ID and digest available" { - skip "depends on https://github.com/kubernetes-incubator/cri-o/issues/531" - +@test "container status when created by image tagged reference" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - sed -e "s/%VALUE%/$REDIS_IMAGEID_DIGESTED/g" "$TESTDATA"/container_config_by_imageid.json > "$TESTDIR"/ctr_by_imageid.json + sed -e "s/%VALUE%/redis:alpine/g" "$TESTDATA"/container_config_by_imageid.json > "$TESTDIR"/ctr_by_imagetag.json - run crioctl ctr create --config "$TESTDIR"/ctr_by_imageid.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/ctr_by_imagetag.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" --output yaml echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "ImageRef: redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b" ]] + [[ "$output" =~ "image: docker.io/library/redis:alpine" ]] + [[ "$output" =~ "imageRef: $REDIS_IMAGEREF" ]] cleanup_ctrs cleanup_pods stop_crio } -@test "image pull" { +@test "container status when created by image canonical reference" { + start_crio + + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + + sed -e "s|%VALUE%|$REDIS_IMAGEREF|g" "$TESTDATA"/container_config_by_imageid.json > "$TESTDIR"/ctr_by_imageref.json + + run crictl create "$pod_id" "$TESTDIR"/ctr_by_imageref.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + + run crictl inspect "$ctr_id" --output yaml + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "image: docker.io/library/redis:alpine" ]] + [[ "$output" =~ "imageRef: $REDIS_IMAGEREF" ]] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "image pull and list" { start_crio "" "" --no-pause-image - run crioctl image pull "$IMAGE" + run crictl pull "$IMAGE" echo "$output" [ "$status" -eq 0 ] + + run crictl images --quiet "$IMAGE" + [ "$status" -eq 0 ] + echo "$output" + [ "$output" != "" ] + imageid="$output" + + run crictl images @"$imageid" + [ "$status" -eq 0 ] + [[ "$output" =~ "$IMAGE" ]] + + run crictl images --quiet "$imageid" + [ "$status" -eq 0 ] + echo "$output" + [ "$output" != "" ] cleanup_images stop_crio } @test "image pull with signature" { start_crio "" "" --no-pause-image - run crioctl image pull "$SIGNED_IMAGE" + run crictl pull "$SIGNED_IMAGE" echo "$output" [ "$status" -eq 0 ] cleanup_images @@ -97,35 +146,57 @@ function teardown() { @test "image pull without signature" { start_crio "" "" --no-pause-image - run crioctl image pull "$UNSIGNED_IMAGE" + run crictl image pull "$UNSIGNED_IMAGE" echo "$output" [ "$status" -ne 0 ] cleanup_images stop_crio } -@test "image pull and list by digest" { +@test "image pull and list by tag and ID" { start_crio "" "" --no-pause-image - run crioctl image pull nginx@sha256:33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc + run crictl pull "$IMAGE:go" echo "$output" [ "$status" -eq 0 ] - run crioctl image list --quiet nginx@sha256:33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc + run crictl images --quiet "$IMAGE:go" + [ "$status" -eq 0 ] + echo "$output" + [ "$output" != "" ] + imageid="$output" + + run crictl images --quiet @"$imageid" [ "$status" -eq 0 ] echo "$output" [ "$output" != "" ] - run crioctl image list --quiet nginx@33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc + run crictl images --quiet "$imageid" [ "$status" -eq 0 ] echo "$output" [ "$output" != "" ] - run crioctl image list --quiet @33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc + cleanup_images + stop_crio +} + +@test "image pull and list by digest and ID" { + start_crio "" "" --no-pause-image + run crictl pull nginx@sha256:33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc + echo "$output" + [ "$status" -eq 0 ] + + run crictl images --quiet nginx@sha256:33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc + [ "$status" -eq 0 ] + echo "$output" + [ "$output" != "" ] + imageid="$output" + + run crictl images --quiet @"$imageid" [ "$status" -eq 0 ] echo "$output" [ "$output" != "" ] - run crioctl image list --quiet 33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc + run crictl images --quiet "$imageid" [ "$status" -eq 0 ] echo "$output" [ "$output" != "" ] @@ -136,18 +207,18 @@ function teardown() { @test "image list with filter" { start_crio "" "" --no-pause-image - run crioctl image pull "$IMAGE" + run crictl pull "$IMAGE" echo "$output" [ "$status" -eq 0 ] - run crioctl image list --quiet "$IMAGE" + run crictl images --quiet "$IMAGE" echo "$output" [ "$status" -eq 0 ] printf '%s\n' "$output" | while IFS= read -r id; do - run crioctl image remove --id "$id" + run crictl rmi "$id" echo "$output" [ "$status" -eq 0 ] done - run crioctl image list --quiet + run crictl images --quiet echo "$output" [ "$status" -eq 0 ] printf '%s\n' "$output" | while IFS= read -r id; do @@ -160,19 +231,19 @@ function teardown() { @test "image list/remove" { start_crio "" "" --no-pause-image - run crioctl image pull "$IMAGE" + run crictl pull "$IMAGE" echo "$output" [ "$status" -eq 0 ] - run crioctl image list --quiet + run crictl images --quiet echo "$output" [ "$status" -eq 0 ] [ "$output" != "" ] printf '%s\n' "$output" | while IFS= read -r id; do - run crioctl image remove --id "$id" + run crictl rmi "$id" echo "$output" [ "$status" -eq 0 ] done - run crioctl image list --quiet + run crictl images --quiet echo "$output" [ "$status" -eq 0 ] [ "$output" = "" ] @@ -186,23 +257,23 @@ function teardown() { @test "image status/remove" { start_crio "" "" --no-pause-image - run crioctl image pull "$IMAGE" + run crictl pull "$IMAGE" echo "$output" [ "$status" -eq 0 ] - run crioctl image list --quiet + run crictl images --quiet echo "$output" [ "$status" -eq 0 ] [ "$output" != "" ] printf '%s\n' "$output" | while IFS= read -r id; do - run crioctl image status --id "$id" + run crictl images -v "$id" echo "$output" [ "$status" -eq 0 ] [ "$output" != "" ] - run crioctl image remove --id "$id" + run crictl rmi "$id" echo "$output" [ "$status" -eq 0 ] done - run crioctl image list --quiet + run crictl images --quiet echo "$output" [ "$status" -eq 0 ] [ "$output" = "" ] diff --git a/test/image_remove.bats b/test/image_remove.bats new file mode 100644 index 00000000..54b06c05 --- /dev/null +++ b/test/image_remove.bats @@ -0,0 +1,75 @@ +#!/usr/bin/env bats + +load helpers + +IMAGE=docker.io/kubernetes/pause + +function teardown() { + cleanup_test +} + +@test "image remove with multiple names, by name" { + start_crio "" "" --no-pause-image + # Pull the image, giving it one name. + run crictl pull "$IMAGE" + echo "$output" + [ "$status" -eq 0 ] + # Add a second name to the image. + run "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name="$IMAGE":latest --add-name="$IMAGE":othertag --signature-policy="$INTEGRATION_ROOT"/policy.json + echo "$output" + [ "$status" -eq 0 ] + # Get the list of image names and IDs. + run crictl images -v + echo "$output" + [ "$status" -eq 0 ] + [ "$output" != "" ] + # Cycle through each name, removing it by name. The image that we assigned a second + # name to should still be around when we get to removing its second name. + grep ^RepoTags: <<< "$output" | while read -r header tag ignored ; do + run crictl rmi "$tag" + echo "$output" + [ "$status" -eq 0 ] + done + # List all images and their names. There should be none now. + run crictl images --quiet + echo "$output" + [ "$status" -eq 0 ] + [ "$output" = "" ] + printf '%s\n' "$output" | while IFS= read -r id; do + echo "$id" + done + # All done. + cleanup_images + stop_crio +} + +@test "image remove with multiple names, by ID" { + start_crio "" "" --no-pause-image + # Pull the image, giving it one name. + run crictl pull "$IMAGE" + echo "$output" + [ "$status" -eq 0 ] + # Add a second name to the image. + run "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name="$IMAGE":latest --add-name="$IMAGE":othertag --signature-policy="$INTEGRATION_ROOT"/policy.json + echo "$output" + [ "$status" -eq 0 ] + # Get the list of the image's names and its ID. + run crictl images -v "$IMAGE":latest + echo "$output" + [ "$status" -eq 0 ] + [ "$output" != "" ] + # Try to remove the image using its ID. That should succeed. + grep ^ID: <<< "$output" | while read -r header id ; do + run crictl rmi "$id" + echo "$output" + [ "$status" -eq 0 ] + done + # The image should be gone now. + run crictl images -v "$IMAGE" + echo "$output" + [ "$status" -eq 0 ] + [ "$output" = "" ] + # All done. + cleanup_images + stop_crio +} diff --git a/test/image_volume.bats b/test/image_volume.bats index ff05e9cd..f5b39401 100644 --- a/test/image_volume.bats +++ b/test/image_volume.bats @@ -8,28 +8,28 @@ function teardown() { @test "image volume ignore" { IMAGE_VOLUMES=ignore start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" image_volume_config=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["image"]["image"] = "mrunalp/image-volume-test"; obj["command"] = ["/bin/sleep", "600"]; json.dump(obj, sys.stdout)') echo "$image_volume_config" > "$TESTDIR"/container_image_volume.json - run crioctl ctr create --config "$TESTDIR"/container_image_volume.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/container_image_volume.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" ls /imagevolume + run crictl exec --sync "$ctr_id" ls /imagevolume echo "$output" [ "$status" -eq 0 ] [[ "$output" =~ "Exit code: 1" ]] [[ "$output" =~ "ls: /imagevolume: No such file or directory" ]] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs @@ -39,27 +39,27 @@ function teardown() { @test "image volume bind" { IMAGE_VOLUMES=bind start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" image_volume_config=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["image"]["image"] = "mrunalp/image-volume-test"; obj["command"] = ["/bin/sleep", "600"]; json.dump(obj, sys.stdout)') echo "$image_volume_config" > "$TESTDIR"/container_image_volume.json - run crioctl ctr create --config "$TESTDIR"/container_image_volume.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDIR"/container_image_volume.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" touch /imagevolume/test_file + run crictl exec --sync "$ctr_id" touch /imagevolume/test_file echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "Exit code: 0" ]] - run crioctl pod stop --id "$pod_id" + [ "$output" = "" ] + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] cleanup_ctrs diff --git a/test/inspect.bats b/test/inspect.bats index c63a688e..bb7977e5 100644 --- a/test/inspect.bats +++ b/test/inspect.bats @@ -12,22 +12,17 @@ function teardown() { echo "$out" [[ "$out" =~ "\"cgroup_driver\":\"$CGROUP_MANAGER\"" ]] [[ "$out" =~ "\"storage_root\":\"$TESTDIR/crio\"" ]] - run crioctl info - echo "$output" - [ "$status" -eq 0 ] - [[ "$output" =~ "\"cgroup_driver\": \"$CGROUP_MANAGER\"" ]] - [[ "$output" =~ "\"storage_root\": \"$TESTDIR/crio\"" ]] stop_crio } @test "ctr inspect" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" @@ -35,22 +30,26 @@ function teardown() { out=`echo -e "GET /containers/$ctr_id HTTP/1.1\r\nHost: crio\r\n" | socat - UNIX-CONNECT:$CRIO_SOCKET` echo "$out" [[ "$out" =~ "\"sandbox\":\"$pod_id\"" ]] - [[ "$out" =~ "\"image\":\"redis:alpine\"" ]] + [[ "$out" =~ "\"image\":\"docker.io/library/redis:alpine\"" ]] + [[ "$out" =~ "\"image_ref\":\"$REDIS_IMAGEREF\"" ]] - run crioctl ctr inspect --id $ctr_id + run crictl inspect --output json "$ctr_id" echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "\"sandbox\": \"$pod_id\"" ]] - [[ "$output" =~ "\"image\": \"redis:alpine\"" ]] + [[ "$output" =~ "\"id\": \"$ctr_id\"" ]] + [[ "$output" =~ "\"image\": \"docker.io/library/redis:alpine\"" ]] + [[ "$output" =~ "\"imageRef\": \"$REDIS_IMAGEREF\"" ]] - inet=`crioctl ctr execsync --id $ctr_id ip addr show dev eth0 scope global 2>&1 | grep inet` + run crictl inspects --output json "$pod_id" + echo "$output" + [ "$status" -eq 0 ] + + inet=`crictl exec --sync "$ctr_id" ip addr show dev eth0 scope global 2>&1 | grep inet` IFS=" " ip=`parse_pod_ip $inet` [[ "$out" =~ "\"ip_address\":\"$ip\"" ]] - [[ "$out" =~ "\"name\":\"k8s_container1_podsandbox1_redhat.test.crio_redhat-test-crio_1\"" ]] - [[ "$output" =~ "\"ip_address\": \"$ip\"" ]] - [[ "$output" =~ "\"name\": \"k8s_container1_podsandbox1_redhat.test.crio_redhat-test-crio_1\"" ]] + [[ "$output" =~ "\"ip\": \"$ip\"" ]] # TODO: add some other check based on the json below: diff --git a/test/kpod_diff.bats b/test/kpod_diff.bats deleted file mode 100644 index 53a94d01..00000000 --- a/test/kpod_diff.bats +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="alpine:latest" - -function teardown() { - cleanup_test -} - -@test "test diff of image and parent" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS diff $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - echo "$output" - [ "$status" -eq 0 ] -} - -@test "test diff on non-existent layer" { - run ${KPOD_BINARY} $KPOD_OPTIONS diff "abc123" - echo "$output" - [ "$status" -ne 0 ] -} - -@test "test diff with json output" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull $IMAGE - echo "$output" - [ "$status" -eq 0 ] - # run bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} diff --format json $IMAGE | python -m json.tool" - run ${KPOD_BINARY} $KPOD_OPTIONS diff --format json $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - echo "$output" - [ "$status" -eq 0 ] -} diff --git a/test/kpod_export.bats b/test/kpod_export.bats deleted file mode 100644 index 9454db39..00000000 --- a/test/kpod_export.bats +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="redis:alpine" - -function teardown() { - cleanup_test -} - -@test "kpod export output flag" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run ${KPOD_BINARY} ${KPOD_OPTIONS} export -o container.tar "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio - rm -f container.tar -} diff --git a/test/kpod_history.bats b/test/kpod_history.bats deleted file mode 100644 index aa89cfe6..00000000 --- a/test/kpod_history.bats +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="alpine:latest" - -function teardown() { - cleanup_test -} - -@test "kpod history default" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} history $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod history with Go template format" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} history --format "{{.ID}} {{.Created}}" $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod history human flag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} history --human=false $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod history quiet flag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} history -q $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod history no-trunc flag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} history --no-trunc $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod history json flag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} history --format json $IMAGE | python -m json.tool" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - echo "$output" - [ "$status" -eq 0 ] -} diff --git a/test/kpod_images.bats b/test/kpod_images.bats deleted file mode 100644 index 92e63aa3..00000000 --- a/test/kpod_images.bats +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="debian:6.0.10" - -function teardown() { - cleanup_test -} - -@test "kpod images" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull ${IMAGE} - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} images - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi ${IMAGE} - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod images test valid json" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull ${IMAGE} - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} images --format json - echo "$output" | python -m json.tool - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi ${IMAGE} - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod images check name json output" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull ${IMAGE} - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} images --format json - echo "$output" - name=$(echo $output | python -c 'import sys; import json; print(json.loads(sys.stdin.read())[0])["names"][0]') - [ "$name" = "docker.io/library/${IMAGE}" ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi ${IMAGE} - echo "$output" - [ "$status" -eq 0 ] -} diff --git a/test/kpod_inspect.bats b/test/kpod_inspect.bats deleted file mode 100644 index ca4b7c8e..00000000 --- a/test/kpod_inspect.bats +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="redis:alpine" - -function teardown() { - cleanup_test -} - -@test "kpod inspect image" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull ${IMAGE} - echo "$output" - [ "$status" -eq 0 ] - run bash -c "${KPOD_BINARY} $KPOD_OPTIONS inspect ${IMAGE} | python -m json.tool" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi ${IMAGE} - echo "$output" - [ "$status" -eq 0 ] -} - - -@test "kpod inspect non-existent container" { - run ${KPOD_BINARY} $KPOD_OPTIONS inspect 14rcole/non-existent - echo "$output" - [ "$status" -ne 0 ] -} - -@test "kpod inspect with format" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull ${IMAGE} - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS inspect --format {{.ID}} ${IMAGE} - echo "$output" - [ "$status" -eq 0 ] - inspectOutput="$output" - run ${KPOD_BINARY} $KPOD_OPTIONS images --no-trunc --quiet ${IMAGE} - echo "$output" - [ "$status" -eq 0 ] - [ "$output" = "$inspectOutput" ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi ${IMAGE} - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod inspect specified type" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull ${IMAGE} - echo "$output" - [ "$status" -eq 0 ] - run bash -c "${KPOD_BINARY} $KPOD_OPTIONS inspect --type image ${IMAGE} | python -m json.tool" - echo "$output" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi ${IMAGE} - echo "$output" - [ "$status" -eq 0 ] -} diff --git a/test/kpod_kill.bats b/test/kpod_kill.bats deleted file mode 100644 index cf3e7859..00000000 --- a/test/kpod_kill.bats +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -ROOT="$TESTDIR/crio" -RUNROOT="$TESTDIR/crio-run" -KPOD_OPTIONS="--root $ROOT --runroot $RUNROOT ${STORAGE_OPTS} --runtime $RUNTIME_BINARY" -function teardown() { - cleanup_test -} - -function start_sleep_container () { - pod_id=$(crioctl pod run --config "$TESTDATA"/sandbox_config.json) - ctr_id=$(crioctl ctr create --config "$TESTDATA"/container_config_sleep.json --pod "$pod_id") - crioctl ctr start --id "$ctr_id" -} - -@test "kill a bogus container" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} kill foobar - echo "$output" - [ "$status" -ne 0 ] -} - -@test "kill a running container by id" { - start_crio - ${KPOD_BINARY} ${KPOD_OPTIONS} pull docker.io/library/busybox:latest - ctr_id=$( start_sleep_container ) - crioctl ctr status --id "$ctr_id" - ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a - ${KPOD_BINARY} ${KPOD_OPTIONS} logs "$ctr_id" - crioctl ctr status --id "$ctr_id" - run ${KPOD_BINARY} ${KPOD_OPTIONS} kill "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "kill a running container by id with TERM" { - start_crio - ${KPOD_BINARY} ${KPOD_OPTIONS} pull docker.io/library/busybox:latest - ctr_id=$( start_sleep_container ) - crioctl ctr status --id "$ctr_id" - ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a - ${KPOD_BINARY} ${KPOD_OPTIONS} logs "$ctr_id" - crioctl ctr status --id "$ctr_id" - run ${KPOD_BINARY} ${KPOD_OPTIONS} kill -s TERM "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "kill a running container by name" { - start_crio - ${KPOD_BINARY} ${KPOD_OPTIONS} pull docker.io/library/busybox:latest - ctr_id=$( start_sleep_container ) - crioctl ctr status --id "$ctr_id" - ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a - ${KPOD_BINARY} ${KPOD_OPTIONS} logs "$ctr_id" - crioctl ctr status --id "$ctr_id" - ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a - run ${KPOD_BINARY} ${KPOD_OPTIONS} kill "k8s_container999_podsandbox1_redhat.test.crio_redhat-test-crio_1" - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "kill a running container by id with a bogus signal" { - start_crio - ${KPOD_BINARY} ${KPOD_OPTIONS} pull docker.io/library/busybox:latest - ctr_id=$( start_sleep_container ) - crioctl ctr status --id "$ctr_id" - ${KPOD_BINARY} ${KPOD_OPTIONS} logs "$ctr_id" - crioctl ctr status --id "$ctr_id" - run ${KPOD_BINARY} ${KPOD_OPTIONS} kill -s foobar "$ctr_id" - echo "$output" - [ "$status" -ne 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} diff --git a/test/kpod_load.bats b/test/kpod_load.bats deleted file mode 100644 index bb8dd086..00000000 --- a/test/kpod_load.bats +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="alpine:latest" - -function teardown() { - cleanup_test -} - -@test "kpod load input flag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} save -o alpine.tar $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} load -i alpine.tar - echo "$output" - [ "$status" -eq 0 ] - rm -f alpine.tar - run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod load oci-archive image" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} save -o alpine.tar --format oci-archive $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} load -i alpine.tar - echo "$output" - [ "$status" -eq 0 ] - rm -f alpine.tar - run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE - [ "$status" -eq 0 ] -} - -@test "kpod load using quiet flag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} save -o alpine.tar $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} load -q -i alpine.tar - echo "$output" - [ "$status" -eq 0 ] - rm -f alpine.tar - run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE - [ "$status" -eq 0 ] -} - -@test "kpod load non-existent file" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} load -i alpine.tar - echo "$output" - [ "$status" -ne 0 ] -} diff --git a/test/kpod_logs.bats b/test/kpod_logs.bats deleted file mode 100644 index 1e301556..00000000 --- a/test/kpod_logs.bats +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="alpine:latest" - -function teardown() { - cleanup_test -} - -@test "display logs for container" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS logs "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "tail three lines of logs for container" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS logs --tail 3 $ctr_id - echo "$output" - lines=$(echo "$output" | wc -l) - [ "$status" -eq 0 ] - [[ $(wc -l < "$output" ) -le 3 ]] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "display logs for container since a given time" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS logs --since 2017-08-07T10:10:09.056611202-04:00 $ctr_id - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} diff --git a/test/kpod_mount.bats b/test/kpod_mount.bats deleted file mode 100644 index 237dd584..00000000 --- a/test/kpod_mount.bats +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="redis:alpine" - -function teardown() { - cleanup_test -} - -@test "mount" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run ${KPOD_BINARY} ${KPOD_OPTIONS} mount $ctr_id - echo "$output" - echo ${KPOD_BINARY} ${KPOD_OPTIONS} mount $ctr_id - [ "$status" -eq 0 ] - run bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} mount --notruncate | grep $ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} unmount $ctr_id - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} mount $ctr_id - echo "$output" - [ "$status" -eq 0 ] - root="$output" - run bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} mount --format=json | python -m json.tool | grep $ctr_id" - echo "$output" - [ "$status" -eq 0 ] - touch $root/foobar - run ${KPOD_BINARY} ${KPOD_OPTIONS} unmount $ctr_id - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} diff --git a/test/kpod_pause.bats b/test/kpod_pause.bats deleted file mode 100644 index 746d39db..00000000 --- a/test/kpod_pause.bats +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="redis:alpine" - -function teardown() { - cleanup_test -} - -@test "pause a bogus container" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pause foobar - echo "$output" - [ "$status" -eq 1 ] -} - -@test "unpause a bogus container" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} unpause foobar - echo "$output" - [ "$status" -eq 1 ] -} - -@test "pause a created container by id" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run ${KPOD_BINARY} ${KPOD_OPTIONS} pause "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} unpause "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --filter id="$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_pods - stop_crio -} - -@test "pause a running container by id" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - id="$output" - run ${KPOD_BINARY} ${KPOD_OPTIONS} pause "$id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} unpause "$id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --filter id="$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_pods - stop_crio -} - -@test "pause a running container by name" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - run ${KPOD_BINARY} ${KPOD_OPTIONS} pause "k8s_podsandbox1-redis_podsandbox1_redhat.test.crio_redhat-test-crio_0" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} unpause "k8s_podsandbox1-redis_podsandbox1_redhat.test.crio_redhat-test-crio_0" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --filter id="k8s_podsandbox1-redis_podsandbox1_redhat.test.crio_redhat-test-crio_0" - echo "$output" - [ "$status" -eq 0 ] - cleanup_pods - stop_crio -} - -@test "remove a paused container by id" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - id="$output" - run ${KPOD_BINARY} ${KPOD_OPTIONS} pause "$id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} rm "$id" - echo "$output" - [ "$status" -eq 1 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} rm --force "$id" - echo "$output" - [ "$status" -eq 1 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} unpause "$id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} stop "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} rm "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_pods - stop_crio -} - -@test "stop a paused container created by id" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run ${KPOD_BINARY} ${KPOD_OPTIONS} pause "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} stop "$ctr_id" - echo "$output" - [ "$status" -eq 1 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} unpause "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --filter id="$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_pods - stop_crio -} diff --git a/test/kpod_ps.bats b/test/kpod_ps.bats deleted file mode 100644 index 4b2628d3..00000000 --- a/test/kpod_ps.bats +++ /dev/null @@ -1,317 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="redis:alpine" - -@test "kpod ps with no containers" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod ps default" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "kpod ps all flag" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps --all - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "kpod ps size flag" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a -s - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --size - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "kpod ps quiet flag" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a -q - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --quiet - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "kpod ps latest flag" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps --latest - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -l - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "kpod ps last flag" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps --last 2 - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -n 2 - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "kpod ps no-trunc flag" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --no-trunc - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio - [ "$status" -eq 0 ] -} - -@test "kpod ps namespace flag" { - start_crio - [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --ns - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps --all --namespace - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "kpod ps namespace flag and format flag = json" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --ns --format json | python -m json.tool | grep namespace" - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "kpod ps without namespace flag and format flag = json" { - start_crio - [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --format json | python -m json.tool | grep namespace" - echo "$output" - [ "$status" -eq 1 ] - cleanup_ctrs - cleanup_pods - stop_crio - [ "$status" -eq 0 ] -} - -@test "kpod ps format flag = go template" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --format "table {{.ID}} {{.Image}} {{.Labels}}" - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "kpod ps filter flag - ancestor" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --filter ancestor=${IMAGE} - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "kpod ps filter flag - id" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --filter id="$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "kpod ps filter flag - status" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl image pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run ${KPOD_BINARY} ${KPOD_OPTIONS} ps -a --filter status=running - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} diff --git a/test/kpod_pull.bats b/test/kpod_pull.bats deleted file mode 100644 index 3e58397d..00000000 --- a/test/kpod_pull.bats +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="alpine:latest" - -function teardown() { - cleanup_test -} - -@test "kpod pull from docker with tag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull debian:6.0.10 - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi debian:6.0.10 - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod pull from docker without tag" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull debian - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi debian - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod pull from a non-docker registry with tag" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull registry.fedoraproject.org/fedora:rawhide - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi registry.fedoraproject.org/fedora:rawhide - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod pull from a non-docker registry without tag" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull registry.fedoraproject.org/fedora - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi registry.fedoraproject.org/fedora - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod pull using digest" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull alpine@sha256:1072e499f3f655a032e88542330cf75b02e7bdf673278f701d7ba61629ee3ebe - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi alpine:latest - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod pull from a non existent image" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull umohnani/get-started - echo "$output" - [ "$status" -ne 0 ] -} - -@test "kpod pull from docker with shortname" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull debian - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi docker.io/debian:latest - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod pull from docker with shortname and tag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull debian:6.0.10 - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi docker.io/debian:6.0.10 - echo "$output" - [ "$status" -eq 0 ] -} diff --git a/test/kpod_push.bats b/test/kpod_push.bats deleted file mode 100644 index d1507f12..00000000 --- a/test/kpod_push.bats +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="alpine:latest" - -function teardown() { - cleanup_test -} - -@test "kpod push to containers/storage" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS push "$IMAGE" containers-storage:[${TESTDIR}/crio]busybox:test - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rmi "$IMAGE" busybox:test - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod push to directory" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run mkdir /tmp/busybox - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS push "$IMAGE" dir:/tmp/busybox - echo "$output" - [ "$status" -eq 0 ] - rm -rf /tmp/busybox - run ${KPOD_BINARY} $KPOD_OPTIONS rmi "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod push to docker archive" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS push "$IMAGE" docker-archive:/tmp/busybox-archive:1.26 - echo "$output" - [ "$status" -eq 0 ] - rm /tmp/busybox-archive - run ${KPOD_BINARY} $KPOD_OPTIONS rmi "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod push to oci without compression" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run mkdir /tmp/oci-busybox - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS push "$IMAGE" oci:/tmp/oci-busybox:busybox - echo "$output" - [ "$status" -eq 0 ] - rm -rf /tmp/oci-busybox - run ${KPOD_BINARY} $KPOD_OPTIONS rmi "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] -} - -@test "kpod push without signatures" { - run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - run mkdir /tmp/busybox - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS push --remove-signatures "$IMAGE" dir:/tmp/busybox - echo "$output" - [ "$status" -eq 0 ] - rm -rf /tmp/busybox - run ${KPOD_BINARY} $KPOD_OPTIONS rmi "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] -} diff --git a/test/kpod_rename.bats b/test/kpod_rename.bats deleted file mode 100644 index 488449aa..00000000 --- a/test/kpod_rename.bats +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="redis:alpine" - -function teardown() { - cleanup_test -} - -@test "kpod rename successful" { - start_crio - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - pod_id="$output" - [ "$status" -eq 0 ] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - ctr_id="$output" - run ${KPOD_BINARY} $KPOD_OPTIONS rename "$ctr_id" "$NEW_NAME" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS inspect "$ctr_id" --format {{.Name}} - echo "$output" - [ "$status" -eq 0 ] - [ "$output" == "$NEW_NAME" ] - cleanup_ctrs - cleanup_pods - stop_crio -} diff --git a/test/kpod_rm.bats b/test/kpod_rm.bats deleted file mode 100644 index 022e3efc..00000000 --- a/test/kpod_rm.bats +++ /dev/null @@ -1,90 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="alpine:latest" - -function teardown() { - cleanup_test -} - -@test "remove a stopped container" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr stop --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rm "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_pods - stop_crio -} - -@test "refuse to remove a running container" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rm "$ctr_id" - echo "$output" - [ "$status" -ne 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "remove a created container" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run ${KPOD_BINARY} $KPOD_OPTIONS rm -f "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_pods - stop_crio -} - -@test "remove a running container" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} $KPOD_OPTIONS rm -f "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_pods - stop_crio -} diff --git a/test/kpod_save.bats b/test/kpod_save.bats deleted file mode 100644 index 4f71ae78..00000000 --- a/test/kpod_save.bats +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="alpine:latest" - -function teardown() { - cleanup_test -} - -@test "kpod save output flag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} save -o alpine.tar $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE - echo "$output" - [ "$status" -eq 0 ] - rm -f alpine.tar -} - -@test "kpod save oci flag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} save -o alpine.tar --format oci-archive $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE - [ "$status" -eq 0 ] - rm -f alpine.tar - [ "$status" -eq 0 ] -} - -@test "kpod save using stdout" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} save > alpine.tar $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE - echo "$output" - [ "$status" -eq 0 ] - rm -f alpine.tar -} - -@test "kpod save quiet flag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} save -q -o alpine.tar $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE - echo "$output" - [ "$status" -eq 0 ] - rm -f alpine.tar -} - -@test "kpod save non-existent image" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} save -o alpine.tar $IMAGE - echo "$output" - [ "$status" -ne 0 ] -} diff --git a/test/kpod_stats.bats b/test/kpod_stats.bats deleted file mode 100644 index a4b8e61e..00000000 --- a/test/kpod_stats.bats +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -function teardown() { - cleanup_test -} - -@test "stats single output" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} stats --no-stream "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "stats does not output stopped container" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run ${KPOD_BINARY} ${KPOD_OPTIONS} stats --no-stream - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "stats outputs stopped container with all flag" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run ${KPOD_BINARY} ${KPOD_OPTIONS} stats --no-stream --all - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "stats output only id" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} stats --no-stream --format {{.ID}} "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - # once ps is implemented, run ps -q and see if that equals the output from above - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "stats streaming output" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run timeout 5s bash -c "${KPOD_BINARY} ${KPOD_OPTIONS} stats --all" - echo "$output" - [ "$status" -eq 124 ] #124 is the status set by timeout when it has to kill the command at the end of the given time - cleanup_ctrs - cleanup_pods - stop_crio -} diff --git a/test/kpod_stop.bats b/test/kpod_stop.bats deleted file mode 100644 index 08b4c933..00000000 --- a/test/kpod_stop.bats +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -function teardown() { - cleanup_test -} - -@test "stop a bogus container" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} stop foobar - echo "$output" - [ "$status" -eq 1 ] -} - -@test "stop a running container by id" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - id="$output" - run ${KPOD_BINARY} ${KPOD_OPTIONS} stop "$id" - cleanup_pods - stop_crio -} - -@test "stop a running container by name" { - start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - run ${KPOD_BINARY} ${KPOD_OPTIONS} stop "k8s_podsandbox1-redis_podsandbox1_redhat.test.crio_redhat-test-crio_0" - cleanup_pods - stop_crio -} diff --git a/test/kpod_tag.bats b/test/kpod_tag.bats deleted file mode 100644 index 93109db5..00000000 --- a/test/kpod_tag.bats +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="alpine:latest" - -function teardown() { - cleanup_test -} - -@test "kpod tag with shortname:latest" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} tag $IMAGE foobar:latest - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} inspect foobar:latest - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi --force foobar:latest - [ "$status" -eq 0 ] -} - -@test "kpod tag with shortname" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} tag $IMAGE foobar - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} inspect foobar:latest - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi --force foobar:latest - [ "$status" -eq 0 ] -} - -@test "kpod tag with shortname:tag" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull $IMAGE - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} tag $IMAGE foobar:v - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} inspect foobar:v - echo "$output" - [ "$status" -eq 0 ] - run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi --force foobar:v - [ "$status" -eq 0 ] -} diff --git a/test/kpod_version.bats b/test/kpod_version.bats deleted file mode 100644 index e6c062b8..00000000 --- a/test/kpod_version.bats +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -function teardown() { - cleanup_test -} - -@test "kpod version test" { - run ${KPOD_BINARY} version - echo "$output" - [ "$status" -eq 0 ] -} diff --git a/test/kpod_wait.bats b/test/kpod_wait.bats deleted file mode 100644 index f1e02b7c..00000000 --- a/test/kpod_wait.bats +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE="redis:alpine" - -# Returns the POD ID -function pod_run_from_template(){ - #1=name, 2=uid, 3=namespace) { - NAME=$1 CUID=$2 NAMESPACE=$3 envsubst < ${TESTDATA}/template_sandbox_config.json > ${TESTDIR}/pod-${1}.json - crioctl pod run --config ${TESTDIR}/pod-${1}.json -} - -# Returns the container ID -function container_create_from_template() { - #1=name, 2=image, 3=command, 4=id) { - NAME=$1 IMAGE=$2 COMMAND=$3 envsubst < ${TESTDATA}/template_container_config.json > ${TESTDIR}/ctr-${1}.json - crioctl ctr create --config ${TESTDIR}/ctr-${1}.json --pod "$4" -} - -function container_start() { - #1=id - crioctl ctr start --id "$1" - -} -@test "wait on a bogus container" { - start_crio - run ${KPOD_BINARY} ${KPOD_OPTIONS} wait 12343 - echo $output - [ "$status" -eq 1 ] - stop_crio -} - -@test "wait on a stopped container" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull docker.io/library/busybox:latest - echo $output - start_crio - pod_id=$( pod_run_from_template "test" "test" "test1-1" ) - echo $pod_id - ctr_id=$(container_create_from_template "test-CTR" "docker.io/library/busybox:latest" '["ls"]' "${pod_id}") - echo $ctr_id - container_start $ctr_id - run ${KPOD_BINARY} ${KPOD_OPTIONS} wait $ctr_id - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} - -@test "wait on a sleeping container" { - run ${KPOD_BINARY} ${KPOD_OPTIONS} pull docker.io/library/busybox:latest - echo $output - start_crio - pod_id=$( pod_run_from_template "test" "test" "test1-1" ) - echo $pod_id - ctr_id=$(container_create_from_template "test-CTR" "docker.io/library/busybox:latest" '["sleep", "5"]' "${pod_id}") - echo $ctr_id - run container_start $ctr_id - echo $output - run ${KPOD_BINARY} ${KPOD_OPTIONS} wait $ctr_id - echo $output - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio -} diff --git a/test/namespaces.bats b/test/namespaces.bats new file mode 100644 index 00000000..033cbab2 --- /dev/null +++ b/test/namespaces.bats @@ -0,0 +1,45 @@ +#!/usr/bin/env bats + +load helpers + +function teardown() { + cleanup_test +} + +function pid_namespace_test() { + start_crio + + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + [ "$status" -eq 0 ] + + run crictl exec --sync "$ctr_id" cat /proc/1/cmdline + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "${EXPECTED_INIT:-redis}" ]] + + run crictl stops "$pod_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl rms "$pod_id" + echo "$output" + [ "$status" -eq 0 ] + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "pod disable shared pid namespace" { + ENABLE_SHARED_PID_NAMESPACE=false pid_namespace_test +} + +@test "pod enable shared pid namespace" { + ENABLE_SHARED_PID_NAMESPACE=true EXPECTED_INIT=pause pid_namespace_test +} diff --git a/test/network.bats b/test/network.bats index eef4bbe0..1ed95ce1 100644 --- a/test/network.bats +++ b/test/network.bats @@ -7,62 +7,63 @@ function teardown() { cleanup_pods stop_crio rm -f /var/lib/cni/networks/crionet_test_args/* + chmod 0755 $CONMON_BINARY cleanup_test } @test "ensure correct hostname" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" sh -c "hostname" + run crictl exec --sync "$ctr_id" sh -c "hostname" echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "crioctl_host" ]] - run crioctl ctr execsync --id "$ctr_id" sh -c "echo \$HOSTNAME" + [[ "$output" =~ "crictl_host" ]] + run crictl exec --sync "$ctr_id" sh -c "echo \$HOSTNAME" echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "crioctl_host" ]] - run crioctl ctr execsync --id "$ctr_id" sh -c "cat /etc/hostname" + [[ "$output" =~ "crictl_host" ]] + run crictl exec --sync "$ctr_id" sh -c "cat /etc/hostname" echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "crioctl_host" ]] + [[ "$output" =~ "crictl_host" ]] } @test "ensure correct hostname for hostnetwork:true" { start_crio hostnetworkconfig=$(cat "$TESTDATA"/sandbox_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["linux"]["security_context"]["namespace_options"]["host_network"] = True; obj["annotations"] = {}; obj["hostname"] = ""; json.dump(obj, sys.stdout)') echo "$hostnetworkconfig" > "$TESTDIR"/sandbox_hostnetwork_config.json - run crioctl pod run --config "$TESTDIR"/sandbox_hostnetwork_config.json + run crictl runs "$TESTDIR"/sandbox_hostnetwork_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDIR"/sandbox_hostnetwork_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" sh -c "hostname" + run crictl exec --sync "$ctr_id" sh -c "hostname" echo "$output" [ "$status" -eq 0 ] [[ "$output" =~ "$HOSTNAME" ]] - run crioctl ctr execsync --id "$ctr_id" sh -c "echo \$HOSTNAME" + run crictl exec --sync "$ctr_id" sh -c "echo \$HOSTNAME" echo "$output" [ "$status" -eq 0 ] [[ "$output" =~ "$HOSTNAME" ]] - run crioctl ctr execsync --id "$ctr_id" sh -c "cat /etc/hostname" + run crictl exec --sync "$ctr_id" sh -c "cat /etc/hostname" echo "$output" [ "$status" -eq 0 ] [[ "$output" =~ "$HOSTNAME" ]] @@ -70,27 +71,28 @@ function teardown() { @test "Check for valid pod netns CIDR" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" check_pod_cidr $ctr_id + } @test "Ping pod from the host" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" @@ -100,36 +102,37 @@ function teardown() { @test "Ping pod from another pod" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod1_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod1_id" + run crictl create "$pod1_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr1_id="$output" temp_sandbox_conf cni_test - run crioctl pod run --config "$TESTDIR"/sandbox_config_cni_test.json + run crictl runs "$TESTDIR"/sandbox_config_cni_test.json echo "$output" [ "$status" -eq 0 ] pod2_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod2_id" + run crictl create "$pod2_id" "$TESTDATA"/container_redis.json "$TESTDIR"/sandbox_config_cni_test.json echo "$output" [ "$status" -eq 0 ] ctr2_id="$output" ping_pod_from_pod $ctr1_id $ctr2_id - [ "$status" -eq 0 ] ping_pod_from_pod $ctr2_id $ctr1_id - [ "$status" -eq 0 ] } @test "Ensure correct CNI plugin namespace/name/container-id arguments" { + if [[ ! -e "$CRIO_CNI_PLUGIN"/bridge-custom ]]; then + skip "bridge-custom plugin not available" + fi start_crio "" "" "" "prepare_plugin_test_args_network_conf" - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json [ "$status" -eq 0 ] . /tmp/plugin_test_args.out @@ -144,7 +147,7 @@ function teardown() { @test "Connect to pod hostport from the host" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config_hostport.json + run crictl runs "$TESTDATA"/sandbox_config_hostport.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" @@ -152,29 +155,35 @@ function teardown() { get_host_ip echo $host_ip - run crioctl ctr create --config "$TESTDATA"/container_config_hostport.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config_hostport.json "$TESTDATA"/sandbox_config_hostport.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] run nc -w 5 $host_ip 4888 "$TESTDIR"/sandbox_wrong_cgroup_parent.json start_crio - run crioctl pod run --config "$TESTDIR"/sandbox_wrong_cgroup_parent.json + run crictl runs "$TESTDIR"/sandbox_wrong_cgroup_parent.json echo "$output" [ "$status" -eq 1 ] @@ -350,7 +347,7 @@ function teardown() { echo "$cgroup_parent_config" > "$TESTDIR"/sandbox_systemd_cgroup_parent.json start_crio - run crioctl pod run --config "$TESTDIR"/sandbox_systemd_cgroup_parent.json + run crictl runs "$TESTDIR"/sandbox_systemd_cgroup_parent.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" diff --git a/test/restore.bats b/test/restore.bats index 264096ed..09f4f2b2 100644 --- a/test/restore.bats +++ b/test/restore.bats @@ -8,69 +8,71 @@ function teardown() { @test "crio restore" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl pod list --id "$pod_id" + run crictl sandboxes --quiet --id "$pod_id" echo "$output" [ "$status" -eq 0 ] pod_list_info="$output" - run crioctl pod status --id "$pod_id" + run crictl inspects "$pod_id" echo "$output" [ "$status" -eq 0 ] - pod_status_info="$output" + pod_status_info=`echo "$output" | grep Status` - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr list --id "$ctr_id" + run crictl ps --quiet --id "$ctr_id" echo "$output" [ "$status" -eq 0 ] ctr_list_info="$output" - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] - ctr_status_info="$output" + ctr_status_info=`echo "$output" | grep State` stop_crio start_crio - run crioctl pod list + run crictl sandboxes --quiet echo "$output" [ "$status" -eq 0 ] [[ "${output}" != "" ]] - [[ "${output}" =~ "${pod_id}" ]] + [[ "${output}" == "${pod_id}" ]] - run crioctl pod list --id "$pod_id" + run crictl sandboxes --quiet --id "$pod_id" echo "$output" [ "$status" -eq 0 ] [[ "${output}" == "${pod_list_info}" ]] - run crioctl pod status --id "$pod_id" + run crictl inspects "$pod_id" echo "$output" [ "$status" -eq 0 ] + output=`echo "$output" | grep Status` [[ "${output}" == "${pod_status_info}" ]] - run crioctl ctr list + run crictl ps --quiet echo "$output" [ "$status" -eq 0 ] [[ "${output}" != "" ]] - [[ "${output}" =~ "${ctr_id}" ]] + [[ "${output}" == "${ctr_id}" ]] - run crioctl ctr list --id "$ctr_id" + run crictl ps --quiet --id "$ctr_id" echo "$output" [ "$status" -eq 0 ] [[ "${output}" == "${ctr_list_info}" ]] - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] + output=`echo "$output" | grep State` [[ "${output}" == "${ctr_status_info}" ]] cleanup_ctrs @@ -80,12 +82,12 @@ function teardown() { @test "crio restore with bad state and pod stopped" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] @@ -96,7 +98,7 @@ function teardown() { start_crio - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] @@ -106,17 +108,17 @@ function teardown() { @test "crio restore with bad state and ctr stopped" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr stop --id "$ctr_id" + run crictl stop "$ctr_id" echo "$output" [ "$status" -eq 0 ] @@ -127,7 +129,7 @@ function teardown() { start_crio - run crioctl ctr stop --id "$ctr_id" + run crictl stop "$ctr_id" echo "$output" [ "$status" -eq 0 ] @@ -138,21 +140,21 @@ function teardown() { @test "crio restore with bad state and ctr removed" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr stop --id "$ctr_id" + run crictl stop "$ctr_id" echo "$output" [ "$status" -eq 0 ] - run crioctl ctr remove --id "$ctr_id" + run crictl rm "$ctr_id" echo "$output" [ "$status" -eq 0 ] @@ -163,7 +165,7 @@ function teardown() { start_crio - run crioctl ctr stop --id "$ctr_id" + run crictl stop "$ctr_id" echo "$output" [ "$status" -eq 1 ] [[ "${output}" =~ "not found" ]] @@ -175,16 +177,16 @@ function teardown() { @test "crio restore with bad state and pod removed" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] @@ -195,7 +197,7 @@ function teardown() { start_crio - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] @@ -205,22 +207,22 @@ function teardown() { @test "crio restore with bad state" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config.json + run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl pod status --id "$pod_id" + run crictl inspects "$pod_id" echo "$output" [ "$status" -eq 0 ] [[ "${output}" =~ "SANDBOX_READY" ]] - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_config.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] [[ "${output}" =~ "CONTAINER_CREATED" ]] @@ -231,33 +233,34 @@ function teardown() { for i in $("$RUNTIME" list -q | xargs); do "$RUNTIME" delete -f $i; done start_crio - run crioctl pod list + run crictl sandboxes --quiet echo "$output" [ "$status" -eq 0 ] [[ "${output}" != "" ]] [[ "${output}" =~ "${pod_id}" ]] - run crioctl pod status --id "$pod_id" + run crictl inspects "$pod_id" echo "$output" [ "$status" -eq 0 ] [[ "${output}" =~ "SANDBOX_NOTREADY" ]] - run crioctl ctr list + run crictl ps --quiet echo "$output" [ "$status" -eq 0 ] [[ "${output}" != "" ]] [[ "${output}" =~ "${ctr_id}" ]] - run crioctl ctr status --id "$ctr_id" + run crictl inspect "$ctr_id" echo "$output" [ "$status" -eq 0 ] [[ "${output}" =~ "CONTAINER_EXITED" ]] - [[ "${output}" =~ "Exit Code: 255" ]] + # TODO: may be cri-tool should display Exit Code + #[[ "${output}" =~ "Exit Code: 255" ]] - run crioctl pod stop --id "$pod_id" + run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] - run crioctl pod remove --id "$pod_id" + run crictl rms "$pod_id" echo "$output" [ "$status" -eq 0 ] diff --git a/test/runtimeversion.bats b/test/runtimeversion.bats index f0d2a436..81d0b531 100644 --- a/test/runtimeversion.bats +++ b/test/runtimeversion.bats @@ -6,9 +6,9 @@ function teardown() { cleanup_test } -@test "crioctl runtimeversion" { +@test "crictl runtimeversion" { start_crio - run crioctl runtimeversion + run crictl info echo "$output" [ "$status" -eq 0 ] stop_crio diff --git a/test/seccomp.bats b/test/seccomp.bats deleted file mode 100644 index b77a7f8c..00000000 --- a/test/seccomp.bats +++ /dev/null @@ -1,368 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -function teardown() { - cleanup_test -} - -# 1. test running with ctr unconfined -# test that we can run with a syscall which would be otherwise blocked -@test "ctr seccomp profiles unconfined" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - start_crio "$TESTDIR"/seccomp_profile1.json - - sed -e 's/%VALUE%/,"container\.seccomp\.security\.alpha\.kubernetes\.io\/testname": "unconfined"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp1.json - run crioctl pod run --name seccomp1 --config "$TESTDIR"/seccomp1.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --name testname --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" chmod 777 . - echo "$output" - [ "$status" -eq 0 ] - - cleanup_ctrs - cleanup_pods - stop_crio -} - -# 2. test running with ctr runtime/default -# test that we cannot run with a syscall blocked by the default seccomp profile -@test "ctr seccomp profiles runtime/default" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - start_crio "$TESTDIR"/seccomp_profile1.json - - sed -e 's/%VALUE%/,"container\.seccomp\.security\.alpha\.kubernetes\.io\/testname2": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp2.json - run crioctl pod run --name seccomp2 --config "$TESTDIR"/seccomp2.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --name testname2 --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" chmod 777 . - echo "$output" - [ "$status" -eq 0 ] - [[ "$output" =~ "Exit code: 1" ]] - [[ "$output" =~ "Operation not permitted" ]] - - cleanup_ctrs - cleanup_pods - stop_crio -} - -# 3. test running with ctr wrong profile name -@test "ctr seccomp profiles wrong profile name" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - start_crio "$TESTDIR"/seccomp_profile1.json - - sed -e 's/%VALUE%/,"container\.seccomp\.security\.alpha\.kubernetes\.io\/testname3": "notgood"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp3.json - run crioctl pod run --name seccomp3 --config "$TESTDIR"/seccomp3.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --name testname3 --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -ne 0 ] - [[ "$output" =~ "unknown seccomp profile option:" ]] - [[ "$output" =~ "notgood" ]] - - cleanup_ctrs - cleanup_pods - stop_crio -} - -# TODO(runcom): need https://issues.k8s.io/36997 -# 4. test running with ctr localhost/profile_name -@test "ctr seccomp profiles localhost/profile_name" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - #sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - #sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - #sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - #start_crio "$TESTDIR"/seccomp_profile1.json - - skip "need https://issues.k8s.io/36997" -} - -# 5. test running with unkwown ctr profile falls back to pod profile -# unknown ctr -> unconfined -# pod -> runtime/default -# result: fail chmod -@test "ctr seccomp profiles falls back to pod profile" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - start_crio "$TESTDIR"/seccomp_profile1.json - - sed -e 's/%VALUE%/,"container\.seccomp\.security\.alpha\.kubernetes\.io\/redhat\.test\.crio-seccomp2-1-testname2-0-not-exists": "unconfined", "seccomp\.security\.alpha\.kubernetes\.io\/pod": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp5.json - run crioctl pod run --name seccomp5 --config "$TESTDIR"/seccomp5.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" chmod 777 . - echo "$output" - [ "$status" -eq 0 ] - [[ "$output" =~ "Exit code: 1" ]] - [[ "$output" =~ "Operation not permitted" ]] - - cleanup_ctrs - cleanup_pods - stop_crio -} - -# 6. test running with unkwown ctr profile and no pod, falls back to unconfined -# unknown ctr -> runtime/default -# pod -> NO -# result: success, running unconfined -@test "ctr seccomp profiles falls back to unconfined" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - start_crio "$TESTDIR"/seccomp_profile1.json - - sed -e 's/%VALUE%/,"container\.seccomp\.security\.alpha\.kubernetes\.io\/redhat\.test\.crio-seccomp6-1-testname6-0-not-exists": "runtime-default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp6.json - run crioctl pod run --name seccomp6 --config "$TESTDIR"/seccomp6.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --name testname6 --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" chmod 777 . - echo "$output" - [ "$status" -eq 0 ] - - cleanup_ctrs - cleanup_pods - stop_crio -} - -# 1. test running with pod unconfined -# test that we can run with a syscall which would be otherwise blocked -@test "pod seccomp profiles unconfined" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - start_crio "$TESTDIR"/seccomp_profile1.json - - sed -e 's/%VALUE%/,"seccomp\.security\.alpha\.kubernetes\.io\/pod": "unconfined"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp1.json - run crioctl pod run --name seccomp1 --config "$TESTDIR"/seccomp1.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" chmod 777 . - echo "$output" - [ "$status" -eq 0 ] - - cleanup_ctrs - cleanup_pods - stop_crio -} - -# 2. test running with pod runtime/default -# test that we cannot run with a syscall blocked by the default seccomp profile -@test "pod seccomp profiles runtime/default" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - start_crio "$TESTDIR"/seccomp_profile1.json - - sed -e 's/%VALUE%/,"seccomp\.security\.alpha\.kubernetes\.io\/pod": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp2.json - run crioctl pod run --name seccomp2 --config "$TESTDIR"/seccomp2.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" chmod 777 . - echo "$output" - [ "$status" -eq 0 ] - [[ "$output" =~ "Exit code: 1" ]] - [[ "$output" =~ "Operation not permitted" ]] - - cleanup_ctrs - cleanup_pods - stop_crio -} - -# 3. test running with pod wrong profile name -@test "pod seccomp profiles wrong profile name" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - start_crio "$TESTDIR"/seccomp_profile1.json - - # 3. test running with pod wrong profile name - sed -e 's/%VALUE%/,"seccomp\.security\.alpha\.kubernetes\.io\/pod": "notgood"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp3.json - run crioctl pod run --name seccomp3 --config "$TESTDIR"/seccomp3.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id" - echo "$output" - [ "$status" -ne 0 ] - [[ "$output" =~ "unknown seccomp profile option:" ]] - [[ "$output" =~ "notgood" ]] - - cleanup_ctrs - cleanup_pods - stop_crio -} - -# TODO(runcom): need https://issues.k8s.io/36997 -# 4. test running with pod localhost/profile_name -@test "pod seccomp profiles localhost/profile_name" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - #sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - #sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - #sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - #start_crio "$TESTDIR"/seccomp_profile1.json - - skip "need https://issues.k8s.io/36997" -} - -# test running with ctr docker/default -# test that we cannot run with a syscall blocked by the default seccomp profile -@test "ctr seccomp profiles docker/default" { - # this test requires seccomp, so skip this test if seccomp is not enabled. - enabled=$(is_seccomp_enabled) - if [[ "$enabled" -eq 0 ]]; then - skip "skip this test since seccomp is not enabled." - fi - - sed -e 's/"chmod",//' "$CRIO_ROOT"/cri-o/seccomp.json > "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json - sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json - - start_crio "$TESTDIR"/seccomp_profile1.json - - sed -e 's/%VALUE%/,"container\.seccomp\.security\.alpha\.kubernetes\.io\/testname2": "docker\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp2.json - run crioctl pod run --name seccomp2 --config "$TESTDIR"/seccomp2.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crioctl ctr create --name testname2 --config "$TESTDATA"/container_redis.json --pod "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crioctl ctr start --id "$ctr_id" - echo "$output" - [ "$status" -eq 0 ] - run crioctl ctr execsync --id "$ctr_id" chmod 777 . - echo "$output" - [ "$status" -eq 0 ] - [[ "$output" =~ "Exit code: 1" ]] - [[ "$output" =~ "Operation not permitted" ]] - - cleanup_ctrs - cleanup_pods - stop_crio -} diff --git a/test/selinux.bats b/test/selinux.bats index 1617e554..b876afc1 100644 --- a/test/selinux.bats +++ b/test/selinux.bats @@ -8,15 +8,15 @@ function teardown() { @test "ctr termination reason Completed" { start_crio - run crioctl pod run --config "$TESTDATA"/sandbox_config_selinux.json + run crictl runs "$TESTDATA"/sandbox_config_selinux.json echo "$output" [ "$status" -eq 0 ] pod_id="$output" - run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config_selinux.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" - run crioctl ctr start --id "$ctr_id" + run crictl start "$ctr_id" echo "$output" [ "$status" -eq 0 ] diff --git a/test/testdata/README.md b/test/testdata/README.md index afc6b32f..780d5713 100644 --- a/test/testdata/README.md +++ b/test/testdata/README.md @@ -5,11 +5,11 @@ sudo ./crio In terminal 2: ``` -sudo ./crioctl runtimeversion +sudo ./crictl runtimeversion sudo rm -rf /var/lib/containers/storage/sandboxes/podsandbox1 -sudo ./crioctl pod run --config testdata/sandbox_config.json +sudo ./crictl runs testdata/sandbox_config.json sudo rm -rf /var/lib/containers/storage/containers/container1 -sudo ./crioctl container create --pod podsandbox1 --config testdata/container_config.json +sudo ./crictl create podsandbox1 testdata/container_config.json testdata/sandbox_config.json ``` diff --git a/test/testdata/container_config_resolvconf.json b/test/testdata/container_config_resolvconf.json index 52b77e08..80093db1 100644 --- a/test/testdata/container_config_resolvconf.json +++ b/test/testdata/container_config_resolvconf.json @@ -1,6 +1,6 @@ { "metadata": { - "name": "container1", + "name": "test-resolve", "attempt": 1 }, "image": { diff --git a/test/testdata/container_config_resolvconf_ro.json b/test/testdata/container_config_resolvconf_ro.json index 7e121c07..faa0f303 100644 --- a/test/testdata/container_config_resolvconf_ro.json +++ b/test/testdata/container_config_resolvconf_ro.json @@ -1,6 +1,6 @@ { "metadata": { - "name": "container1", + "name": "test-resolve-ro", "attempt": 1 }, "image": { diff --git a/test/testdata/container_config_seccomp.json b/test/testdata/container_config_seccomp.json index 582132b0..6097050a 100644 --- a/test/testdata/container_config_seccomp.json +++ b/test/testdata/container_config_seccomp.json @@ -6,13 +6,11 @@ "image": { "image": "redis:alpine" }, - "command": [ - "/bin/bash" - ], "args": [ - "/bin/chmod", "777", "." + "docker-entrypoint.sh", + "redis-server" ], - "working_dir": "/", + "working_dir": "/data", "envs": [ { "key": "PATH", @@ -53,6 +51,7 @@ "oom_score_adj": 30 }, "security_context": { + "seccomp_profile_path": "%VALUE%", "capabilities": { "add_capabilities": [ "setuid", diff --git a/test/testdata/container_redis.json b/test/testdata/container_redis.json index 638aba4f..39a0865b 100644 --- a/test/testdata/container_redis.json +++ b/test/testdata/container_redis.json @@ -45,10 +45,13 @@ "tty": false, "linux": { "resources": { + "memory_limit_in_bytes": 209715200, "cpu_period": 10000, "cpu_quota": 20000, "cpu_shares": 512, - "oom_score_adj": 30 + "oom_score_adj": 30, + "cpuset_cpus": "0-1", + "cpuset_mems": "0" }, "security_context": { "capabilities": { diff --git a/test/testdata/container_redis_default_mounts.json b/test/testdata/container_redis_default_mounts.json new file mode 100644 index 00000000..dff3db5a --- /dev/null +++ b/test/testdata/container_redis_default_mounts.json @@ -0,0 +1,67 @@ +{ + "metadata": { + "name": "podsandbox1-redis" + }, + "image": { + "image": "redis:alpine" + }, + "args": [ + "docker-entrypoint.sh", + "redis-server" + ], + "mounts": [ + { + "container_path": "%CPATH%", + "host_path": "%HPATH%" + } + ], + "working_dir": "/data", + "envs": [ + { + "key": "PATH", + "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + }, + { + "key": "TERM", + "value": "xterm" + }, + { + "key": "REDIS_VERSION", + "value": "3.2.3" + }, + { + "key": "REDIS_DOWNLOAD_URL", + "value": "http://download.redis.io/releases/redis-3.2.3.tar.gz" + }, + { + "key": "REDIS_DOWNLOAD_SHA1", + "value": "92d6d93ef2efc91e595c8bf578bf72baff397507" + } + ], + "labels": { + "tier": "backend" + }, + "annotations": { + "pod": "podsandbox1" + }, + "readonly_rootfs": false, + "log_path": "", + "stdin": false, + "stdin_once": false, + "tty": false, + "linux": { + "resources": { + "cpu_period": 10000, + "cpu_quota": 20000, + "cpu_shares": 512, + "oom_score_adj": 30 + }, + "security_context": { + "capabilities": { + "add_capabilities": [ + "sys_admin" + ] + } + } + } +} diff --git a/test/testdata/container_redis_env_custom.json b/test/testdata/container_redis_env_custom.json new file mode 100644 index 00000000..3ec41001 --- /dev/null +++ b/test/testdata/container_redis_env_custom.json @@ -0,0 +1,62 @@ +{ + "metadata": { + "name": "podsandbox1-redis" + }, + "image": { + "image": "redis:alpine" + }, + "args": [ + "docker-entrypoint.sh", + "redis-server" + ], + "working_dir": "/data", + "envs": [ + { + "key": "PATH", + "value": "/acustompathinpath:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + }, + { + "key": "TERM", + "value": "xterm" + }, + { + "key": "REDIS_VERSION", + "value": "3.2.3" + }, + { + "key": "REDIS_DOWNLOAD_URL", + "value": "http://download.redis.io/releases/redis-3.2.3.tar.gz" + }, + { + "key": "REDIS_DOWNLOAD_SHA1", + "value": "92d6d93ef2efc91e595c8bf578bf72baff397507" + } + ], + "labels": { + "tier": "backend" + }, + "annotations": { + "pod": "podsandbox1" + }, + "readonly_rootfs": false, + "log_path": "", + "stdin": false, + "stdin_once": false, + "tty": false, + "linux": { + "resources": { + "memory_limit_in_bytes": 209715200, + "cpu_period": 10000, + "cpu_quota": 20000, + "cpu_shares": 512, + "oom_score_adj": 30 + }, + "security_context": { + "capabilities": { + "add_capabilities": [ + "sys_admin" + ] + } + } + } +} diff --git a/test/testdata/sandbox1_config.json b/test/testdata/sandbox1_config.json new file mode 100644 index 00000000..23b9a67e --- /dev/null +++ b/test/testdata/sandbox1_config.json @@ -0,0 +1,51 @@ +{ + "metadata": { + "name": "podsandbox1", + "uid": "redhat-test-crio-1", + "namespace": "redhat.test.crio", + "attempt": 1 + }, + "hostname": "crictl_host", + "log_directory": "", + "dns_config": { + "searches": [ + "8.8.8.8" + ] + }, + "port_mappings": [], + "resources": { + "cpu": { + "limits": 3, + "requests": 2 + }, + "memory": { + "limits": 50000000, + "requests": 2000000 + } + }, + "labels": { + "name": "podsandbox1", + "group": "test", + "version": "v1.0.0" + }, + "annotations": { + "owner": "hmeng", + "security.alpha.kubernetes.io/seccomp/pod": "unconfined" + }, + "linux": { + "cgroup_parent": "/Burstable/pod_123-456", + "security_context": { + "namespace_options": { + "host_network": false, + "host_pid": false, + "host_ipc": false + }, + "selinux_options": { + "user": "system_u", + "role": "system_r", + "type": "svirt_lxc_net_t", + "level": "s0:c4,c5" + } + } + } +} diff --git a/test/testdata/sandbox2_config.json b/test/testdata/sandbox2_config.json new file mode 100644 index 00000000..7f76e628 --- /dev/null +++ b/test/testdata/sandbox2_config.json @@ -0,0 +1,51 @@ +{ + "metadata": { + "name": "podsandbox2", + "uid": "redhat-test-crio-2", + "namespace": "redhat.test.crio", + "attempt": 1 + }, + "hostname": "crictl_host", + "log_directory": "", + "dns_config": { + "searches": [ + "8.8.8.8" + ] + }, + "port_mappings": [], + "resources": { + "cpu": { + "limits": 3, + "requests": 2 + }, + "memory": { + "limits": 50000000, + "requests": 2000000 + } + }, + "labels": { + "name": "podsandbox2", + "group": "test", + "version": "v1.0.0" + }, + "annotations": { + "owner": "hmeng", + "security.alpha.kubernetes.io/seccomp/pod": "unconfined" + }, + "linux": { + "cgroup_parent": "/Burstable/pod_123-456", + "security_context": { + "namespace_options": { + "host_network": false, + "host_pid": false, + "host_ipc": false + }, + "selinux_options": { + "user": "system_u", + "role": "system_r", + "type": "svirt_lxc_net_t", + "level": "s0:c4,c5" + } + } + } +} diff --git a/test/testdata/sandbox3_config.json b/test/testdata/sandbox3_config.json new file mode 100644 index 00000000..8b15a422 --- /dev/null +++ b/test/testdata/sandbox3_config.json @@ -0,0 +1,51 @@ +{ + "metadata": { + "name": "podsandbox3", + "uid": "redhat-test-crio-3", + "namespace": "redhat.test.crio", + "attempt": 1 + }, + "hostname": "crictl_host", + "log_directory": "", + "dns_config": { + "searches": [ + "8.8.8.8" + ] + }, + "port_mappings": [], + "resources": { + "cpu": { + "limits": 3, + "requests": 2 + }, + "memory": { + "limits": 50000000, + "requests": 2000000 + } + }, + "labels": { + "name": "podsandbox3", + "group": "test", + "version": "v1.1.0" + }, + "annotations": { + "owner": "hmeng", + "security.alpha.kubernetes.io/seccomp/pod": "unconfined" + }, + "linux": { + "cgroup_parent": "/Burstable/pod_123-456", + "security_context": { + "namespace_options": { + "host_network": false, + "host_pid": false, + "host_ipc": false + }, + "selinux_options": { + "user": "system_u", + "role": "system_r", + "type": "svirt_lxc_net_t", + "level": "s0:c4,c5" + } + } + } +} diff --git a/test/testdata/sandbox_config.json b/test/testdata/sandbox_config.json index 57e211bd..c424f748 100644 --- a/test/testdata/sandbox_config.json +++ b/test/testdata/sandbox_config.json @@ -5,7 +5,7 @@ "namespace": "redhat.test.crio", "attempt": 1 }, - "hostname": "crioctl_host", + "hostname": "crictl_host", "log_directory": "", "dns_config": { "searches": [ @@ -28,8 +28,6 @@ }, "annotations": { "owner": "hmeng", - "security.alpha.kubernetes.io/sysctls": "kernel.shm_rmid_forced=1,net.ipv4.ip_local_port_range=1024 65000", - "security.alpha.kubernetes.io/unsafe-sysctls": "kernel.msgmax=8192" , "security.alpha.kubernetes.io/seccomp/pod": "unconfined" }, "linux": { diff --git a/test/testdata/sandbox_config_hostnet.json b/test/testdata/sandbox_config_hostnet.json index 99a7560d..a035e56a 100644 --- a/test/testdata/sandbox_config_hostnet.json +++ b/test/testdata/sandbox_config_hostnet.json @@ -5,7 +5,7 @@ "namespace": "redhat.test.crio", "attempt": 1 }, - "hostname": "crioctl_host", + "hostname": "crictl_host", "log_directory": "", "dns_options": { "servers": [ @@ -32,7 +32,6 @@ }, "annotations": { "owner": "hmeng", - "security.alpha.kubernetes.io/unsafe-sysctls": "kernel.msgmax=8192" , "security.alpha.kubernetes.io/seccomp/pod": "unconfined" }, "linux": { diff --git a/test/testdata/sandbox_config_hostport.json b/test/testdata/sandbox_config_hostport.json index 5feda866..0d39dcc4 100644 --- a/test/testdata/sandbox_config_hostport.json +++ b/test/testdata/sandbox_config_hostport.json @@ -5,7 +5,7 @@ "namespace": "redhat.test.crio", "attempt": 1 }, - "hostname": "crioctl_host", + "hostname": "crictl_host", "log_directory": "", "dns_options": { "servers": [ @@ -38,8 +38,6 @@ }, "annotations": { "owner": "hmeng", - "security.alpha.kubernetes.io/sysctls": "kernel.shm_rmid_forced=1,net.ipv4.ip_local_port_range=1024 65000", - "security.alpha.kubernetes.io/unsafe-sysctls": "kernel.msgmax=8192" , "security.alpha.kubernetes.io/seccomp/pod": "unconfined" }, "linux": { diff --git a/test/testdata/sandbox_config_seccomp.json b/test/testdata/sandbox_config_seccomp.json index 8e440b16..702cfc3b 100644 --- a/test/testdata/sandbox_config_seccomp.json +++ b/test/testdata/sandbox_config_seccomp.json @@ -5,7 +5,7 @@ "namespace": "redhat.test.crio", "attempt": 1 }, - "hostname": "crioctl_host", + "hostname": "crictl_host", "log_directory": "", "dns_options": { "servers": [ @@ -32,11 +32,11 @@ }, "annotations": { "owner": "hmeng" - %VALUE% }, "linux": { "cgroup_parent": "/Burstable/pod_123-456", "security_context": { + "seccomp_profile_path": "%VALUE%", "namespace_options": { "host_network": false, "host_pid": false, diff --git a/test/testdata/sandbox_config_selinux.json b/test/testdata/sandbox_config_selinux.json index 916a10ec..57cecacd 100644 --- a/test/testdata/sandbox_config_selinux.json +++ b/test/testdata/sandbox_config_selinux.json @@ -5,7 +5,7 @@ "namespace": "redhat.test.crio", "attempt": 1 }, - "hostname": "crioctl_host", + "hostname": "crictl_host", "log_directory": "", "dns_config": { "searches": [ @@ -28,8 +28,6 @@ }, "annotations": { "owner": "hmeng", - "security.alpha.kubernetes.io/sysctls": "kernel.shm_rmid_forced=1,net.ipv4.ip_local_port_range=1024 65000", - "security.alpha.kubernetes.io/unsafe-sysctls": "kernel.msgmax=8192" , "security.alpha.kubernetes.io/seccomp/pod": "unconfined" }, "linux": { diff --git a/test/testdata/sandbox_config_sysctl.json b/test/testdata/sandbox_config_sysctl.json new file mode 100644 index 00000000..4146dc4f --- /dev/null +++ b/test/testdata/sandbox_config_sysctl.json @@ -0,0 +1,54 @@ +{ + "metadata": { + "name": "podsandbox1", + "uid": "redhat-test-crio", + "namespace": "redhat.test.crio", + "attempt": 1 + }, + "hostname": "crictl_host", + "log_directory": "", + "dns_config": { + "searches": [ + "8.8.8.8" + ] + }, + "port_mappings": [], + "resources": { + "cpu": { + "limits": 3, + "requests": 2 + }, + "memory": { + "limits": 50000000, + "requests": 2000000 + } + }, + "labels": { + "group": "test" + }, + "annotations": { + "owner": "hmeng", + "security.alpha.kubernetes.io/seccomp/pod": "unconfined" + }, + "linux": { + "sysctls": { + "kernel.shm_rmid_forced": "1", + "net.ipv4.ip_local_port_range": "1024 65000", + "kernel.msgmax": "8192" + }, + "cgroup_parent": "/Burstable/pod_123-456", + "security_context": { + "namespace_options": { + "host_network": false, + "host_pid": false, + "host_ipc": false + }, + "selinux_options": { + "user": "system_u", + "role": "system_r", + "type": "svirt_lxc_net_t", + "level": "s0:c4,c5" + } + } + } +} diff --git a/test/testdata/template_sandbox_config.json b/test/testdata/template_sandbox_config.json index f43ffb0d..c2f3f197 100644 --- a/test/testdata/template_sandbox_config.json +++ b/test/testdata/template_sandbox_config.json @@ -5,7 +5,7 @@ "namespace": "${NAMESPACE}", "attempt": 1 }, - "hostname": "crioctl_host", + "hostname": "crictl_host", "log_directory": "", "dns_config": { "searches": [ @@ -28,8 +28,6 @@ }, "annotations": { "owner": "hmeng", - "security.alpha.kubernetes.io/sysctls": "kernel.shm_rmid_forced=1,net.ipv4.ip_local_port_range=1024 65000", - "security.alpha.kubernetes.io/unsafe-sysctls": "kernel.msgmax=8192" , "security.alpha.kubernetes.io/seccomp/pod": "unconfined" }, "linux": { diff --git a/transfer.md b/transfer.md index 644c6953..8f6379b4 100644 --- a/transfer.md +++ b/transfer.md @@ -1,14 +1,14 @@ -# cri-o Usage Transfer +# CRI-O Usage Transfer -This document outlines useful information for ops and dev transfer as it relates to infrastructure that utilizes cri-o. +This document outlines useful information for ops and dev transfer as it relates to infrastructure that utilizes CRI-O. ## Operational Transfer ## Abstract -The `crio` daemon is intended to provide the [CRI](https://github.com/kubernetes/community/blob/master/contributors/devel/container-runtime-interface.md) socket needed for Kubernetes to use for automating deployment, scaling, and management of containerized applications (See the document for [configuring kubernetes to use cri-o](./kubernetes.md) for more information on that). -Therefore the `crioctl` command line is a client that interfaces to the same grpc socket as the kubernetes daemon would, for talking to the `crio` daemon. -In many ways `crioctl` is only as feature rich as the Kubernetes CRI requires. +The `crio` daemon is intended to provide the [CRI](https://github.com/kubernetes/community/blob/master/contributors/devel/container-runtime-interface.md) socket needed for Kubernetes to use for automating deployment, scaling, and management of containerized applications (See the document for [configuring kubernetes to use CRI-O](./kubernetes.md) for more information on that). +Therefore the [crictl](https://github.com/kubernetes-incubator/cri-tools) command line is a client that interfaces to the same grpc socket as the kubernetes daemon would, for talking to the `crio` daemon. +In many ways [crictl](https://github.com/kubernetes-incubator/cri-tools) is only as feature rich as the Kubernetes CRI requires. There are additional tools e.g. `kpod` and [`buildah`](https://github.com/projectatomic/buildah) that provide a feature rich set of commands for all operational needs in a Kubernetes environment. @@ -20,16 +20,16 @@ As well as some systemd helpers like `systemd-cgls` and `systemd-cgtop` are stil ## Equivalents For many troubleshooting and information collection steps, there may be an existing pattern. -Following provides equivalent with cri-o tools for gathering information or jumping into containers, for operational use. +Following provides equivalent with CRI-O tools for gathering information or jumping into containers, for operational use. | Existing Step | CRI-O (and friends) | | :---: | :---: | -| `docker exec` | [`crioctl ctr exec`](./docs/crio.8.md) | +| `docker exec` | [`crictl exec`](https://github.com/kubernetes-incubator/cri-tools/blob/master/docs/crictl.md) | | `docker info` | [`kpod info`](./docs/kpod-info.1.md) | | `docker inspect` | [`kpod inspect`](./docs/kpod-inspect.1.md) | | `docker logs` | [`kpod logs`](./docs/kpod-logs.1.md) | -| `docker ps` | [`crioctl ctr list`](./docs/crio.8.md) or [`runc list`](https://github.com/opencontainers/runc/blob/master/man/runc-list.8.md) | -| `docker stats` | [`kpod stats`](./docs/kpod-stats.1.md) or `crioctl ctr status`| +| `docker ps` | [`crictl ps`](https://github.com/kubernetes-incubator/cri-tools/blob/master/docs/crictl.md) or [`runc list`](https://github.com/opencontainers/runc/blob/master/man/runc-list.8.md) | +| `docker stats` | [`kpod stats`](./docs/kpod-stats.1.md) | If you were already using steps like `kubectl exec` (or `oc exec` on OpenShift), they will continue to function the same way. @@ -42,6 +42,7 @@ There are other equivalents for these tools | `docker attach` | [`kpod exec`](./docs/kpod-attach.1.md) ***| | `docker build` | [`buildah bud`](https://github.com/projectatomic/buildah/blob/master/docs/buildah-bud.md) | | `docker cp` | [`kpod mount`](./docs/kpod-cp.1.md) **** | +| `docker create` | [`kpod create`](./docs/kpod-create.1.md) | | `docker diff` | [`kpod diff`](./docs/kpod-diff.1.md) | | `docker export` | [`kpod export`](./docs/kpod-export.1.md) | | `docker history`| [`kpod history`](./docs/kpod-history.1.md)| @@ -57,12 +58,13 @@ There are other equivalents for these tools | `docker rename` | [`kpod rename`](./docs/kpod-rename.1.md) | | `docker rm` | [`kpod rm`](./docs/kpod-rm.1.md) | | `docker rmi` | [`kpod rmi`](./docs/kpod-rmi.1.md) | +| `docker run` | [`kpod run`](./docs/kpod-run.1.md) | | `docker save` | [`kpod save`](./docs/kpod-save.1.md) | | `docker stop` | [`kpod stop`](./docs/kpod-stop.1.md) | | `docker tag` | [`kpod tag`](./docs/kpod-tag.1.md) | | `docker unpause`| [`kpod unpause`](./docs/kpod-unpause.1.md)| | `docker version`| [`kpod version`](./docs/kpod-version.1.md)| -| `docker wait` | [`kpod wait`](./docs/kpod-wait.1.md)| +| `docker wait` | [`kpod wait`](./docs/kpod-wait.1.md) | *** Use `kpod exec` to enter a container and `kpod logs` to view the output of pid 1 of a container. **** Use mount to take advantage of the entire linux tool chain rather then just cp. Read [`here`](./docs/kpod-cp.1.md) for more information. diff --git a/tutorial.md b/tutorial.md index 07a03964..5f89ccb8 100644 --- a/tutorial.md +++ b/tutorial.md @@ -1,10 +1,10 @@ -# cri-o Tutorial +# CRI-O Tutorial -This tutorial will walk you through the installation of [cri-o](https://github.com/kubernetes-incubator/cri-o), an Open Container Initiative-based implementation of [Kubernetes Container Runtime Interface](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/container-runtime-interface-v1.md), and the creation of [Redis](https://redis.io/) server running in a [Pod](http://kubernetes.io/docs/user-guide/pods/). +This tutorial will walk you through the installation of [CRI-O](https://github.com/kubernetes-incubator/cri-o), an Open Container Initiative-based implementation of [Kubernetes Container Runtime Interface](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/container-runtime-interface-v1.md), and the creation of [Redis](https://redis.io/) server running in a [Pod](http://kubernetes.io/docs/user-guide/pods/). ## Prerequisites -A Linux machine is required to download and build the `cri-o` components and run the commands in this tutorial. +A Linux machine is required to download and build the `CRI-O` components and run the commands in this tutorial. Create a machine running Ubuntu 16.10: @@ -26,7 +26,7 @@ gcloud compute ssh cri-o This section will walk you through installing the following components: * crio - The implementation of the Kubernetes CRI, which manages Pods. -* crioctl - The crio client for testing. +* crictl - The CRI client for testing. * cni - The Container Network Interface * runc - The OCI runtime to launch the container @@ -36,17 +36,17 @@ This section will walk you through installing the following components: Download the `runc` release binary: ``` -wget https://github.com/opencontainers/runc/releases/download/v1.0.0-rc4/runc-linux-amd64 +wget https://github.com/opencontainers/runc/releases/download/v1.0.0-rc4/runc.amd64 ``` Set the executable bit and copy the `runc` binary into your PATH: ``` -chmod +x runc-linux-amd64 +chmod +x runc.amd64 ``` ``` -sudo mv runc-linux-amd64 /usr/bin/runc +sudo mv runc.amd64 /usr/bin/runc ``` Print the `runc` version: @@ -66,16 +66,16 @@ The `crio` project does not ship binary releases so you'll need to build it from #### Install the Go runtime and tool chain -Download the Go 1.7.4 binary release: +Download the Go 1.8.5 binary release: ``` -wget https://storage.googleapis.com/golang/go1.7.4.linux-amd64.tar.gz +wget https://storage.googleapis.com/golang/go1.8.5.linux-amd64.tar.gz ``` -Install Go 1.7.4: +Install Go 1.8.5: ``` -sudo tar -xvf go1.7.4.linux-amd64.tar.gz -C /usr/local/ +sudo tar -xvf go1.8.5.linux-amd64.tar.gz -C /usr/local/ ``` ``` @@ -90,20 +90,32 @@ export GOPATH=$HOME/go export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin ``` -At this point the Go 1.7.4 tool chain should be installed: +At this point the Go 1.8.5 tool chain should be installed: ``` go version ``` ``` -go version go1.7.4 linux/amd64 +go version go1.8.5 linux/amd64 +``` + +#### Get crictl + +``` +go get github.com/kubernetes-incubator/cri-tools/cmd/crictl ``` #### Build crio from source ``` -sudo apt-get install -y libglib2.0-dev libseccomp-dev libapparmor-dev +sudo apt-get update && apt-get install -y libglib2.0-dev \ + libseccomp-dev \ + libapparmor-dev \ + libgpgme11-dev \ + libdevmapper-dev \ + make \ + git ``` ``` @@ -126,33 +138,10 @@ make sudo make install ``` -Output: +If you are installing for the first time, generate and install configuration files with: ``` -install -D -m 755 kpod /usr/local/bin/kpod -install -D -m 755 crio /usr/local/bin/crio -install -D -m 755 crioctl /usr/local/bin/crioctl -install -D -m 755 conmon/conmon /usr/local/libexec/crio/conmon -install -D -m 755 pause/pause /usr/local/libexec/crio/pause -install -d -m 755 /usr/local/share/man/man{1,5,8} -install -m 644 docs/kpod.1 docs/kpod-launch.1 -t /usr/local/share/man/man1 -install -m 644 docs/crio.conf.5 -t /usr/local/share/man/man5 -install -m 644 docs/crio.8 -t /usr/local/share/man/man8 -install -D -m 644 crio.conf /etc/crio/crio.conf -install -D -m 644 seccomp.json /etc/crio/seccomp.json -``` - -If you are installing for the first time, generate config as follows: - -``` -make install.config -``` - -Output: - -``` -install -D -m 644 crio.conf /etc/crio/crio.conf -install -D -m 644 seccomp.json /etc/crio/seccomp.json +sudo make install.config ``` #### Start the crio system daemon @@ -184,12 +173,19 @@ sudo systemctl start crio #### Ensure the crio service is running ``` -sudo crioctl runtimeversion +sudo crictl --runtime-endpoint /var/run/crio/crio.sock info ``` ``` -VersionResponse: Version: 0.1.0, RuntimeName: runc, RuntimeVersion: 1.0.0-rc4, RuntimeApiVersion: v1alpha1 +Version: 0.1.0 +RuntimeName: cri-o +RuntimeVersion: 1.9.0-dev +RuntimeApiVersion: v1alpha1 ``` +> to avoid set --runtime-endpoint when call crictl, +> you can export $CRI_RUNTIME_ENDPOINT=/var/run/crio/crio.sock +> or cp crictl.yaml /etc/crictl.yaml from this repo + ### CNI plugins This tutorial will use the latest version of `CNI` plugins from the master branch and build it from source. @@ -273,11 +269,25 @@ sudo sh -c 'cat >/etc/cni/net.d/99-loopback.conf <<-EOF EOF' ``` +Install `skopeo-containers` package from `ppa:projectatomic/ppa` + +``` +sudo add-apt-repository ppa:projectatomic/ppa +sudo apt-get update +sudo apt-get install skopeo-containers -y +``` + +Restart crio in order to apply CNI config + +``` +systemctl restart crio +``` + At this point `CNI` is installed and configured to allocation IP address to containers from the `10.88.0.0/16` subnet. ## Pod Tutorial -Now that the `cri-o` components have been installed and configured we are ready to create a Pod. This section will walk you through launching a Redis server in a Pod. Once the Redis server is running we'll use telnet to verify it's working, then we'll stop the Redis server and clean up the Pod. +Now that the `CRI-O` components have been installed and configured we are ready to create a Pod. This section will walk you through launching a Redis server in a Pod. Once the Redis server is running we'll use telnet to verify it's working, then we'll stop the Redis server and clean up the Pod. ### Creating a Pod @@ -290,15 +300,15 @@ cd $GOPATH/src/github.com/kubernetes-incubator/cri-o Next create the Pod and capture the Pod ID for later use: ``` -POD_ID=$(sudo crioctl pod run --config test/testdata/sandbox_config.json) +POD_ID=$(sudo crictl runp test/testdata/sandbox_config.json) ``` -> sudo crioctl pod run --config test/testdata/sandbox_config.json +> sudo crictl runp test/testdata/sandbox_config.json -Use the `crioctl` command to get the status of the Pod: +Use the `crictl` command to get the status of the Pod: ``` -sudo crioctl pod status --id $POD_ID +sudo crictl inspectp --output table $POD_ID ``` Output: @@ -324,27 +334,27 @@ Annotations: ### Create a Redis container inside the Pod -Use the `crioctl` command to pull the redis image, create a redis container from a container configuration and attach it to the Pod created earlier: +Use the `crictl` command to pull the redis image, create a redis container from a container configuration and attach it to the Pod created earlier: ``` -sudo crioctl image pull redis:alpine -CONTAINER_ID=$(sudo crioctl ctr create --pod $POD_ID --config test/testdata/container_redis.json) +sudo crictl pull redis:alpine +CONTAINER_ID=$(sudo crictl create $POD_ID test/testdata/container_redis.json test/testdata/sandbox_config.json) ``` -> sudo crioctl ctr create --pod $POD_ID --config test/testdata/container_redis.json +> sudo crictl create $POD_ID test/testdata/container_redis.json test/testdata/sandbox_config.json -The `crioctl ctr create` command will take a few seconds to return because the redis container needs to be pulled. +The `crictl create` command will take a few seconds to return because the redis container needs to be pulled. Start the Redis container: ``` -sudo crioctl ctr start --id $CONTAINER_ID +sudo crictl start $CONTAINER_ID ``` Get the status for the Redis container: ``` -sudo crioctl ctr status --id $CONTAINER_ID +sudo crictl inspect $CONTAINER_ID ``` Output: @@ -401,25 +411,25 @@ sudo journalctl -u crio --no-pager ### Stop the redis container and delete the Pod ``` -sudo crioctl ctr stop --id $CONTAINER_ID +sudo crictl stop $CONTAINER_ID ``` ``` -sudo crioctl ctr remove --id $CONTAINER_ID +sudo crictl rm $CONTAINER_ID ``` ``` -sudo crioctl pod stop --id $POD_ID +sudo crictl stops $POD_ID ``` ``` -sudo crioctl pod remove --id $POD_ID +sudo crictl rms $POD_ID ``` ``` -sudo crioctl pod list +sudo crictl sandboxes ``` ``` -sudo crioctl ctr list +sudo crictl ps ``` diff --git a/types/types.go b/types/types.go index 63780143..cedc3abd 100644 --- a/types/types.go +++ b/types/types.go @@ -5,6 +5,7 @@ type ContainerInfo struct { Name string `json:"name"` Pid int `json:"pid"` Image string `json:"image"` + ImageRef string `json:"image_ref"` CreatedTime int64 `json:"created_time"` Labels map[string]string `json:"labels"` Annotations map[string]string `json:"annotations"` diff --git a/vendor.conf b/vendor.conf index 557701e9..acd066aa 100644 --- a/vendor.conf +++ b/vendor.conf @@ -1,18 +1,26 @@ -k8s.io/kubernetes v1.7.8 https://github.com/kubernetes/kubernetes -k8s.io/client-go release-4.0 https://github.com/kubernetes/client-go -k8s.io/apimachinery release-1.7 https://github.com/kubernetes/apimachinery -k8s.io/apiserver release-1.7 https://github.com/kubernetes/apiserver +k8s.io/kubernetes a48f11c2257d84b0bec89864025508b0ef626b4f https://github.com/kubernetes/kubernetes +k8s.io/client-go master https://github.com/kubernetes/client-go +k8s.io/apimachinery master https://github.com/kubernetes/apimachinery +k8s.io/apiserver master https://github.com/kubernetes/apiserver +k8s.io/utils 4fe312863be2155a7b68acd2aff1c9221b24e68c https://github.com/kubernetes/utils +k8s.io/api master https://github.com/kubernetes/api +k8s.io/kube-openapi 39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1 https://github.com/kubernetes/kube-openapi +k8s.io/apiextensions-apiserver master https://github.com/kubernetes/apiextensions-apiserver # +github.com/googleapis/gnostic 0c5108395e2debce0d731cf0287ddf7242066aba +github.com/gregjones/httpcache 787624de3eb7bd915c329cba748687a3b22666a6 +github.com/json-iterator/go 1.0.0 +github.com/peterbourgon/diskv v2.0.1 github.com/sirupsen/logrus v1.0.0 -github.com/containers/image 57b257d128d6075ea3287991ee408d24c7bd2758 +github.com/containers/image 3d0304a02154dddc8f97cc833aa0861cea5e9ade github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/ostreedev/ostree-go master -github.com/containers/storage 64bf27465d0d1edd89e7a4ce49866fea01145782 +github.com/containers/storage 0d32dfce498e06c132c60dac945081bf44c22464 github.com/containernetworking/cni v0.4.0 google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go -github.com/opencontainers/selinux v1.0.0-rc1 +github.com/opencontainers/selinux b29023b86e4a69d1b46b7e7b4e2b6fda03f0b9cd github.com/opencontainers/go-digest v1.0.0-rc0 -github.com/opencontainers/runtime-tools d3f7e9e9e631c7e87552d67dc7c86de33c3fb68a +github.com/opencontainers/runtime-tools 625e2322645b151a7cbb93a8b42920933e72167f github.com/opencontainers/runc 45bde006ca8c90e089894508708bcf0e2cdf9e13 github.com/mrunalp/fileutils master github.com/vishvananda/netlink master @@ -56,11 +64,12 @@ github.com/ugorji/go d23841a297e5489e787e72fceffabf9d2994b52a github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7 golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 golang.org/x/net c427ad74c6d7a814201695e9ffde0c5d400a7674 -golang.org/x/sys 4cd6d1a821c7175768725b55ca82f14683a29ea4 +golang.org/x/sys 9aade4d3a3b7e6d876cd3823ad20ec45fc035402 golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756 github.com/kr/pty v1.0.0 -github.com/gogo/protobuf v0.3 -github.com/golang/protobuf 748d386b5c1ea99658fd69fe9f03991ce86a90c1 +github.com/google/btree 7d79101e329e5a3adf994758c578dab82b90c017 +github.com/gogo/protobuf c0656edd0d9eab7c66d1eb0c568f9039345796f7 +github.com/golang/protobuf 4bd1920723d7b7c925de087aa32e2187708897f7 github.com/coreos/go-systemd v14 github.com/coreos/pkg v3 github.com/golang/groupcache b710c8433bd175204919eb38776e944233235d03 @@ -71,9 +80,9 @@ github.com/Microsoft/hcsshim 43f9725307998e09f2e3816c2c0c36dc98f0c982 github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46 github.com/emicklei/go-restful-swagger12 1.0.1 github.com/pkg/errors v0.8.0 -github.com/godbus/dbus v4.0.0 +github.com/godbus/dbus a389bdde4dd695d414e47b755e95e72b7826432c github.com/urfave/cli v1.20.0 -github.com/vbatts/tar-split v0.10.1 +github.com/vbatts/tar-split v0.10.2 github.com/renstrom/dedent v1.0.0 github.com/hpcloud/tail v1.0.0 gopkg.in/fsnotify.v1 v1.4.2 @@ -101,3 +110,9 @@ github.com/go-zoo/bone 031b4005dfe248ccba241a0c9de0f9e112fd6b7c github.com/soheilhy/cmux v0.1.3 github.com/hashicorp/go-multierror 83588e72410abfbe4df460eeb6f30841ae47d4c4 github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55 +github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac +github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 +github.com/pmezard/go-difflib v1.0.0 +github.com/xeipuuv/gojsonreference master +github.com/xeipuuv/gojsonschema master +github.com/xeipuuv/gojsonpointer master diff --git a/vendor/github.com/buger/goterm/README.md b/vendor/github.com/buger/goterm/README.md deleted file mode 100644 index 536b7b88..00000000 --- a/vendor/github.com/buger/goterm/README.md +++ /dev/null @@ -1,119 +0,0 @@ -## Description - -This library provides basic building blocks for building advanced console UIs. - -Initially created for [Gor](http://github.com/buger/gor). - -Full API documentation: http://godoc.org/github.com/buger/goterm - -## Basic usage - -Full screen console app, printing current time: - -```go -import ( - tm "github.com/buger/goterm" - "time" -) - -func main() { - tm.Clear() // Clear current screen - - for { - // By moving cursor to top-left position we ensure that console output - // will be overwritten each time, instead of adding new. - tm.MoveCursor(1,1) - - tm.Println("Current Time:", time.Now().Format(time.RFC1123)) - - tm.Flush() // Call it every time at the end of rendering - - time.Sleep(time.Second) - } -} -``` - -This can be seen in [examples/time_example.go](examples/time_example.go). To -run it yourself, go into your `$GOPATH/src/github.com/buger/goterm` directory -and run `go run ./examples/time_example.go` - - -Print red bold message on white background: - -```go -tm.Println(tm.Background(tm.Color(tm.Bold("Important header"), tm.RED), tm.WHITE)) -``` - - -Create box and move it to center of the screen: - -```go -tm.Clear() - -// Create Box with 30% width of current screen, and height of 20 lines -box := tm.NewBox(30|tm.PCT, 20, 0) - -// Add some content to the box -// Note that you can add ANY content, even tables -fmt.Fprint(box, "Some box content") - -// Move Box to approx center of the screen -tm.Print(tm.MoveTo(box.String(), 40|tm.PCT, 40|tm.PCT)) - -tm.Flush() -``` - -This can be found in [examples/box_example.go](examples/box_example.go). - -Draw table: - -```go -// Based on http://golang.org/pkg/text/tabwriter -totals := tm.NewTable(0, 10, 5, ' ', 0) -fmt.Fprintf(totals, "Time\tStarted\tActive\tFinished\n") -fmt.Fprintf(totals, "%s\t%d\t%d\t%d\n", "All", started, started-finished, finished) -tm.Println(totals) -tm.Flush() -``` - -This can be found in [examples/table_example.go](examples/table_example.go). - -## Line charts - -Chart example: - -![screen shot 2013-07-09 at 5 05 37 pm](https://f.cloud.github.com/assets/14009/767676/e3dd35aa-e887-11e2-9cd2-f6451eb26adc.png) - - -```go - import ( - tm "github.com/buger/goterm" - ) - - chart := tm.NewLineChart(100, 20) - - data := new(tm.DataTable) - data.addColumn("Time") - data.addColumn("Sin(x)") - data.addColumn("Cos(x+1)") - - for i := 0.1; i < 10; i += 0.1 { - data.addRow(i, math.Sin(i), math.Cos(i+1)) - } - - tm.Println(chart.Draw(data)) -``` - -This can be found in [examples/chart_example.go](examples/chart_example.go). - -Drawing 2 separate graphs in different scales. Each graph have its own Y axe. - -```go -chart.Flags = tm.DRAW_INDEPENDENT -``` - -Drawing graph with relative scale (Grapwh draw starting from min value instead of zero) - -```go -chart.Flags = tm.DRAW_RELATIVE -``` diff --git a/vendor/github.com/buger/goterm/box.go b/vendor/github.com/buger/goterm/box.go deleted file mode 100644 index 7df929d7..00000000 --- a/vendor/github.com/buger/goterm/box.go +++ /dev/null @@ -1,122 +0,0 @@ -package goterm - -import ( - "bytes" - "strings" -) - -const DEFAULT_BORDER = "- │ ┌ ┐ └ ┘" - -// Box allows you to create independent parts of screen, with its own buffer and borders. -// Can be used for creating modal windows -// -// Generates boxes likes this: -// ┌--------┐ -// │hello │ -// │world │ -// │ │ -// └--------┘ -// -type Box struct { - Buf *bytes.Buffer - - Width int - Height int - - // To get even padding: PaddingX ~= PaddingY*4 - PaddingX int - PaddingY int - - // Should contain 6 border pieces separated by spaces - // - // Example border: - // "- │ ┌ ┐ └ ┘" - Border string - - Flags int // Not used now -} - -// Create new Box. -// Width and height can be relative: -// -// // Create box with 50% with of current screen and 10 lines height -// box := tm.NewBox(50|tm.PCT, 10, 0) -// -func NewBox(width, height int, flags int) *Box { - width, height = GetXY(width, height) - - box := new(Box) - box.Buf = new(bytes.Buffer) - box.Width = width - box.Height = height - box.Border = DEFAULT_BORDER - box.PaddingX = 1 - box.PaddingY = 0 - box.Flags = flags - - return box -} - -func (b *Box) Write(p []byte) (int, error) { - return b.Buf.Write(p) -} - -// Render Box -func (b *Box) String() (out string) { - borders := strings.Split(b.Border, " ") - lines := strings.Split(b.Buf.String(), "\n") - - // Border + padding - prefix := borders[1] + strings.Repeat(" ", b.PaddingX) - suffix := strings.Repeat(" ", b.PaddingX) + borders[1] - - offset := b.PaddingY + 1 // 1 is border width - - // Content width without borders and padding - contentWidth := b.Width - (b.PaddingX+1)*2 - - for y := 0; y < b.Height; y++ { - var line string - - switch { - // Draw borders for first line - case y == 0: - line = borders[2] + strings.Repeat(borders[0], b.Width-2) + borders[3] - - // Draw borders for last line - case y == (b.Height - 1): - line = borders[4] + strings.Repeat(borders[0], b.Width-2) + borders[5] - - // Draw top and bottom padding - case y <= b.PaddingY || y >= (b.Height-b.PaddingY): - line = borders[1] + strings.Repeat(" ", b.Width-2) + borders[1] - - // Render content - default: - if len(lines) > y-offset { - line = lines[y-offset] - } else { - line = "" - } - - if len(line) > contentWidth-1 { - // If line is too large limit it - line = line[0:contentWidth] - } else { - // If line is too small enlarge it by adding spaces - line = line + strings.Repeat(" ", contentWidth-len(line)) - } - - line = prefix + line + suffix - } - - // Don't add newline for last element - if y != b.Height-1 { - line = line + "\n" - } - - out += line - } - - return out -} diff --git a/vendor/github.com/buger/goterm/plot.go b/vendor/github.com/buger/goterm/plot.go deleted file mode 100644 index 77b9fb09..00000000 --- a/vendor/github.com/buger/goterm/plot.go +++ /dev/null @@ -1,328 +0,0 @@ -package goterm - -import ( - "fmt" - "math" - "strings" -) - -const ( - AXIS_LEFT = iota - AXIS_RIGHT -) - -const ( - DRAW_INDEPENDENT = 1 << iota - DRAW_RELATIVE -) - -type DataTable struct { - columns []string - - rows [][]float64 -} - -func (d *DataTable) AddColumn(name string) { - d.columns = append(d.columns, name) -} - -func (d *DataTable) AddRow(elms ...float64) { - d.rows = append(d.rows, elms) -} - -type Chart interface { - Draw(data DataTable, flags int) string -} - -type LineChart struct { - Buf []string - chartBuf []string - - data *DataTable - - Width int - Height int - - chartHeight int - chartWidth int - - paddingX int - - paddingY int - - Flags int -} - -func genBuf(size int) []string { - buf := make([]string, size) - - for i := 0; i < size; i++ { - buf[i] = " " - } - - return buf -} - -// Format float -func ff(num interface{}) string { - return fmt.Sprintf("%.1f", num) -} - -func NewLineChart(width, height int) *LineChart { - chart := new(LineChart) - chart.Width = width - chart.Height = height - chart.Buf = genBuf(width * height) - - // axis lines + axies text - chart.paddingY = 2 - - return chart -} - -func (c *LineChart) DrawAxes(maxX, minX, maxY, minY float64, index int) { - side := AXIS_LEFT - - if c.Flags&DRAW_INDEPENDENT != 0 { - if index%2 == 0 { - side = AXIS_RIGHT - } - - c.DrawLine(c.paddingX-1, 1, c.Width-c.paddingX, 1, "-") - } else { - c.DrawLine(c.paddingX-1, 1, c.Width-1, 1, "-") - } - - if side == AXIS_LEFT { - c.DrawLine(c.paddingX-1, 1, c.paddingX-1, c.Height-1, "│") - } else { - c.DrawLine(c.Width-c.paddingX, 1, c.Width-c.paddingX, c.Height-1, "│") - } - - left := 0 - if side == AXIS_RIGHT { - left = c.Width - c.paddingX + 1 - } - - if c.Flags&DRAW_RELATIVE != 0 { - c.writeText(ff(minY), left, 1) - } else { - if minY > 0 { - c.writeText("0", left, 1) - } else { - c.writeText(ff(minY), left, 1) - } - } - - c.writeText(ff(maxY), left, c.Height-1) - - c.writeText(ff(minX), c.paddingX, 0) - - x_col := c.data.columns[0] - c.writeText(c.data.columns[0], c.Width/2-len(x_col)/2, 1) - - if c.Flags&DRAW_INDEPENDENT != 0 || len(c.data.columns) < 3 { - col := c.data.columns[index] - - for idx, char := range strings.Split(col, "") { - start_from := c.Height/2 + len(col)/2 - idx - - if side == AXIS_LEFT { - c.writeText(char, c.paddingX-1, start_from) - } else { - c.writeText(char, c.Width-c.paddingX, start_from) - } - } - } - - if c.Flags&DRAW_INDEPENDENT != 0 { - c.writeText(ff(maxX), c.Width-c.paddingX-len(ff(maxX)), 0) - } else { - c.writeText(ff(maxX), c.Width-len(ff(maxX)), 0) - } -} - -func (c *LineChart) writeText(text string, x, y int) { - coord := y*c.Width + x - - for idx, char := range strings.Split(text, "") { - c.Buf[coord+idx] = char - } -} - -func (c *LineChart) Draw(data *DataTable) (out string) { - var scaleY, scaleX float64 - - c.data = data - - if c.Flags&DRAW_INDEPENDENT != 0 && len(data.columns) > 3 { - fmt.Println("Error: Can't use DRAW_INDEPENDENT for more then 2 graphs") - return "" - } - - charts := len(data.columns) - 1 - - prevPoint := [2]int{-1, -1} - - maxX, minX, maxY, minY := getBoundaryValues(data, -1) - - c.paddingX = int(math.Max(float64(len(ff(minY))), float64(len(ff(maxY))))) + 1 - - c.chartHeight = c.Height - c.paddingY - - if c.Flags&DRAW_INDEPENDENT != 0 { - c.chartWidth = c.Width - 2*c.paddingX - } else { - c.chartWidth = c.Width - c.paddingX - 1 - } - - scaleX = float64(c.chartWidth) / (maxX - minX) - - if c.Flags&DRAW_RELATIVE != 0 || minY < 0 { - scaleY = float64(c.chartHeight) / (maxY - minY) - } else { - scaleY = float64(c.chartHeight) / maxY - } - - for i := 1; i < charts+1; i++ { - if c.Flags&DRAW_INDEPENDENT != 0 { - maxX, minX, maxY, minY = getBoundaryValues(data, i) - - scaleX = float64(c.chartWidth-1) / (maxX - minX) - scaleY = float64(c.chartHeight) / maxY - - if c.Flags&DRAW_RELATIVE != 0 || minY < 0 { - scaleY = float64(c.chartHeight) / (maxY - minY) - } - } - - symbol := Color("•", i) - - c_data := getChartData(data, i) - - for _, point := range c_data { - x := int((point[0]-minX)*scaleX) + c.paddingX - y := int((point[1])*scaleY) + c.paddingY - - if c.Flags&DRAW_RELATIVE != 0 || minY < 0 { - y = int((point[1]-minY)*scaleY) + c.paddingY - } - - if prevPoint[0] == -1 { - prevPoint[0] = x - prevPoint[1] = y - } - - if prevPoint[0] <= x { - c.DrawLine(prevPoint[0], prevPoint[1], x, y, symbol) - } - - prevPoint[0] = x - prevPoint[1] = y - } - - c.DrawAxes(maxX, minX, maxY, minY, i) - } - - for row := c.Height - 1; row >= 0; row-- { - out += strings.Join(c.Buf[row*c.Width:(row+1)*c.Width], "") + "\n" - } - - return -} - -func (c *LineChart) DrawLine(x0, y0, x1, y1 int, symbol string) { - drawLine(x0, y0, x1, y1, func(x, y int) { - coord := y*c.Width + x - - if coord > 0 && coord < len(c.Buf) { - c.Buf[coord] = symbol - } - }) -} - -func getBoundaryValues(data *DataTable, index int) (maxX, minX, maxY, minY float64) { - maxX = data.rows[0][0] - minX = data.rows[0][0] - maxY = data.rows[0][1] - minY = data.rows[0][1] - - for _, r := range data.rows { - maxX = math.Max(maxX, r[0]) - minX = math.Min(minX, r[0]) - - for idx, c := range r { - if idx > 0 { - if index == -1 || index == idx { - maxY = math.Max(maxY, c) - minY = math.Min(minY, c) - } - } - } - } - - if maxY > 0 { - maxY = maxY * 1.1 - } else { - maxY = maxY * 0.9 - } - - if minY > 0 { - minY = minY * 0.9 - } else { - minY = minY * 1.1 - } - - return -} - -// DataTable can contain data for multiple graphs, we need to extract only 1 -func getChartData(data *DataTable, index int) (out [][]float64) { - for _, r := range data.rows { - out = append(out, []float64{r[0], r[index]}) - } - - return -} - -// Algorithm for drawing line between two points -// -// http://en.wikipedia.org/wiki/Bresenham's_line_algorithm -func drawLine(x0, y0, x1, y1 int, plot func(int, int)) { - dx := x1 - x0 - if dx < 0 { - dx = -dx - } - dy := y1 - y0 - if dy < 0 { - dy = -dy - } - var sx, sy int - if x0 < x1 { - sx = 1 - } else { - sx = -1 - } - if y0 < y1 { - sy = 1 - } else { - sy = -1 - } - err := dx - dy - - for { - plot(x0, y0) - if x0 == x1 && y0 == y1 { - break - } - e2 := 2 * err - if e2 > -dy { - err -= dy - x0 += sx - } - if e2 < dx { - err += dx - y0 += sy - } - } -} diff --git a/vendor/github.com/buger/goterm/table.go b/vendor/github.com/buger/goterm/table.go deleted file mode 100644 index d8dae55c..00000000 --- a/vendor/github.com/buger/goterm/table.go +++ /dev/null @@ -1,34 +0,0 @@ -package goterm - -import ( - "bytes" - "text/tabwriter" -) - -// Tabwriter with own buffer: -// -// totals := tm.NewTable(0, 10, 5, ' ', 0) -// fmt.Fprintf(totals, "Time\tStarted\tActive\tFinished\n") -// fmt.Fprintf(totals, "%s\t%d\t%d\t%d\n", "All", started, started-finished, finished) -// tm.Println(totals) -// -// Based on http://golang.org/pkg/text/tabwriter -type Table struct { - tabwriter.Writer - - Buf *bytes.Buffer -} - -// Same as here http://golang.org/pkg/text/tabwriter/#Writer.Init -func NewTable(minwidth, tabwidth, padding int, padchar byte, flags uint) *Table { - tbl := new(Table) - tbl.Buf = new(bytes.Buffer) - tbl.Init(tbl.Buf, minwidth, tabwidth, padding, padchar, flags) - - return tbl -} - -func (t *Table) String() string { - t.Flush() - return t.Buf.String() -} diff --git a/vendor/github.com/buger/goterm/terminal.go b/vendor/github.com/buger/goterm/terminal.go deleted file mode 100644 index 6b45c78b..00000000 --- a/vendor/github.com/buger/goterm/terminal.go +++ /dev/null @@ -1,258 +0,0 @@ -// Provides basic bulding blocks for advanced console UI -// -// Coordinate system: -// -// 1/1---X----> -// | -// Y -// | -// v -// -// Documentation for ANSI codes: http://en.wikipedia.org/wiki/ANSI_escape_code#Colors -// -// Inspired by: http://www.darkcoding.net/software/pretty-command-line-console-output-on-unix-in-python-and-go-lang/ -package goterm - -import ( - "bufio" - "bytes" - "fmt" - "os" - "strings" -) - -// Reset all custom styles -const RESET = "\033[0m" - -// Reset to default color -const RESET_COLOR = "\033[32m" - -// Return curor to start of line and clean it -const RESET_LINE = "\r\033[K" - -// List of possible colors -const ( - BLACK = iota - RED - GREEN - YELLOW - BLUE - MAGENTA - CYAN - WHITE -) - -var Output *bufio.Writer = bufio.NewWriter(os.Stdout) - -func getColor(code int) string { - return fmt.Sprintf("\033[3%dm", code) -} - -func getBgColor(code int) string { - return fmt.Sprintf("\033[4%dm", code) -} - -// Set percent flag: num | PCT -// -// Check percent flag: num & PCT -// -// Reset percent flag: num & 0xFF -const shift = uint(^uint(0)>>63) << 4 -const PCT = 0x8000 << shift - -type winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -// Global screen buffer -// Its not recommented write to buffer dirrectly, use package Print,Printf,Println fucntions instead. -var Screen *bytes.Buffer = new(bytes.Buffer) - -// Get relative or absolute coorditantes -// To get relative, set PCT flag to number: -// -// // Get 10% of total width to `x` and 20 to y -// x, y = tm.GetXY(10|tm.PCT, 20) -// -func GetXY(x int, y int) (int, int) { - if y == -1 { - y = CurrentHeight() + 1 - } - - if x&PCT != 0 { - x = int((x & 0xFF) * Width() / 100) - } - - if y&PCT != 0 { - y = int((y & 0xFF) * Height() / 100) - } - - return x, y -} - -type sf func(int, string) string - -// Apply given transformation func for each line in string -func applyTransform(str string, transform sf) (out string) { - out = "" - - for idx, line := range strings.Split(str, "\n") { - out += transform(idx, line) - } - - return -} - -// Clear screen -func Clear() { - Output.WriteString("\033[2J") -} - -// Move cursor to given position -func MoveCursor(x int, y int) { - fmt.Fprintf(Screen, "\033[%d;%dH", x, y) -} - -// Move cursor up relative the current position -func MoveCursorUp(bias int) { - fmt.Fprintf(Screen, "\033[%dA", bias) -} - -// Move cursor down relative the current position -func MoveCursorDown(bias int) { - fmt.Fprintf(Screen, "\033[%dB", bias) -} - -// Move cursor forward relative the current position -func MoveCursorForward(bias int) { - fmt.Fprintf(Screen, "\033[%dC", bias) -} - -// Move cursor backward relative the current position -func MoveCursorBackward(bias int) { - fmt.Fprintf(Screen, "\033[%dD", bias) -} - -// Move string to possition -func MoveTo(str string, x int, y int) (out string) { - x, y = GetXY(x, y) - - return applyTransform(str, func(idx int, line string) string { - return fmt.Sprintf("\033[%d;%dH%s", y+idx, x, line) - }) -} - -// Return carrier to start of line -func ResetLine(str string) (out string) { - return applyTransform(str, func(idx int, line string) string { - return fmt.Sprintf(RESET_LINE, line) - }) -} - -// Make bold -func Bold(str string) string { - return applyTransform(str, func(idx int, line string) string { - return fmt.Sprintf("\033[1m%s\033[0m", line) - }) -} - -// Apply given color to string: -// -// tm.Color("RED STRING", tm.RED) -// -func Color(str string, color int) string { - return applyTransform(str, func(idx int, line string) string { - return fmt.Sprintf("%s%s%s", getColor(color), line, RESET) - }) -} - -func Highlight(str, substr string, color int) string { - hiSubstr := Color(substr, color) - return strings.Replace(str, substr, hiSubstr, -1) -} - -func HighlightRegion(str string, from, to, color int) string { - return str[:from] + Color(str[from:to], color) + str[to:] -} - -// Change background color of string: -// -// tm.Background("string", tm.RED) -// -func Background(str string, color int) string { - return applyTransform(str, func(idx int, line string) string { - return fmt.Sprintf("%s%s%s", getBgColor(color), line, RESET) - }) -} - -// Get console width -func Width() int { - ws, err := getWinsize() - - if err != nil { - return -1 - } - - return int(ws.Col) -} - -// Get console height -func Height() int { - ws, err := getWinsize() - if err != nil { - return -1 - } - return int(ws.Row) -} - -// Get current height. Line count in Screen buffer. -func CurrentHeight() int { - return strings.Count(Screen.String(), "\n") -} - -// Flush buffer and ensure that it will not overflow screen -func Flush() { - for idx, str := range strings.Split(Screen.String(), "\n") { - if idx > Height() { - return - } - - Output.WriteString(str + "\n") - } - - Output.Flush() - Screen.Reset() -} - -func Print(a ...interface{}) (n int, err error) { - return fmt.Fprint(Screen, a...) -} - -func Println(a ...interface{}) (n int, err error) { - return fmt.Fprintln(Screen, a...) -} - -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(Screen, format, a...) -} - -func Context(data string, idx, max int) string { - var start, end int - - if len(data[:idx]) < (max / 2) { - start = 0 - } else { - start = idx - max/2 - } - - if len(data)-idx < (max / 2) { - end = len(data) - 1 - } else { - end = idx + max/2 - } - - return data[start:end] -} diff --git a/vendor/github.com/buger/goterm/terminal_nosysioctl.go b/vendor/github.com/buger/goterm/terminal_nosysioctl.go deleted file mode 100644 index 69061500..00000000 --- a/vendor/github.com/buger/goterm/terminal_nosysioctl.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build windows plan9 solaris - -package goterm - -func getWinsize() (*winsize, error) { - ws := new(winsize) - - ws.Col = 80 - ws.Row = 24 - - return ws, nil -} diff --git a/vendor/github.com/buger/goterm/terminal_sysioctl.go b/vendor/github.com/buger/goterm/terminal_sysioctl.go deleted file mode 100644 index e98430fb..00000000 --- a/vendor/github.com/buger/goterm/terminal_sysioctl.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build !windows,!plan9,!solaris - -package goterm - -import ( - "fmt" - "os" - "runtime" - "syscall" - "unsafe" -) - -func getWinsize() (*winsize, error) { - ws := new(winsize) - - var _TIOCGWINSZ int64 - - switch runtime.GOOS { - case "linux": - _TIOCGWINSZ = 0x5413 - case "darwin": - _TIOCGWINSZ = 1074295912 - } - - r1, _, errno := syscall.Syscall(syscall.SYS_IOCTL, - uintptr(syscall.Stdin), - uintptr(_TIOCGWINSZ), - uintptr(unsafe.Pointer(ws)), - ) - - if int(r1) == -1 { - fmt.Println("Error:", os.NewSyscallError("GetWinsize", errno)) - return nil, os.NewSyscallError("GetWinsize", errno) - } - return ws, nil -} diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go index 590b3787..29065e03 100644 --- a/vendor/github.com/containers/image/copy/copy.go +++ b/vendor/github.com/containers/image/copy/copy.go @@ -12,8 +12,6 @@ import ( "strings" "time" - pb "gopkg.in/cheggaaa/pb.v1" - "github.com/containers/image/image" "github.com/containers/image/pkg/compression" "github.com/containers/image/signature" @@ -22,6 +20,7 @@ import ( "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" + pb "gopkg.in/cheggaaa/pb.v1" ) type digestingReader struct { @@ -31,23 +30,6 @@ type digestingReader struct { validationFailed bool } -// imageCopier allows us to keep track of diffID values for blobs, and other -// data, that we're copying between images, and cache other information that -// might allow us to take some shortcuts -type imageCopier struct { - copiedBlobs map[digest.Digest]digest.Digest - cachedDiffIDs map[digest.Digest]digest.Digest - manifestUpdates *types.ManifestUpdateOptions - dest types.ImageDestination - src types.Image - rawSource types.ImageSource - diffIDsAreNeeded bool - canModifyManifest bool - reportWriter io.Writer - progressInterval time.Duration - progress chan types.ProgressProperties -} - // newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error // and set validationFailed to true if the source stream does not match expectedDigest. func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { @@ -86,6 +68,27 @@ func (d *digestingReader) Read(p []byte) (int, error) { return n, err } +// copier allows us to keep track of diffID values for blobs, and other +// data shared across one or more images in a possible manifest list. +type copier struct { + copiedBlobs map[digest.Digest]digest.Digest + cachedDiffIDs map[digest.Digest]digest.Digest + dest types.ImageDestination + rawSource types.ImageSource + reportWriter io.Writer + progressInterval time.Duration + progress chan types.ProgressProperties +} + +// imageCopier tracks state specific to a single image (possibly an item of a manifest list) +type imageCopier struct { + c *copier + manifestUpdates *types.ManifestUpdateOptions + src types.Image + diffIDsAreNeeded bool + canModifyManifest bool +} + // Options allows supplying non-default configuration modifying the behavior of CopyImage. type Options struct { RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. @@ -95,6 +98,8 @@ type Options struct { DestinationCtx *types.SystemContext ProgressInterval time.Duration // time to wait between reports to signal the progress channel Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. + // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type + ForceManifestMIMEType string } // Image copies image from srcRef to destRef, using policyContext to validate @@ -115,10 +120,6 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe reportWriter = options.ReportWriter } - writeReport := func(f string, a ...interface{}) { - fmt.Fprintf(reportWriter, f, a...) - } - dest, err := destRef.NewImageDestination(options.DestinationCtx) if err != nil { return errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef)) @@ -133,43 +134,89 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe if err != nil { return errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef)) } - unparsedImage := image.UnparsedFromSource(rawSource) defer func() { - if unparsedImage != nil { - if err := unparsedImage.Close(); err != nil { - retErr = errors.Wrapf(retErr, " (unparsed: %v)", err) - } + if err := rawSource.Close(); err != nil { + retErr = errors.Wrapf(retErr, " (src: %v)", err) } }() + c := &copier{ + copiedBlobs: make(map[digest.Digest]digest.Digest), + cachedDiffIDs: make(map[digest.Digest]digest.Digest), + dest: dest, + rawSource: rawSource, + reportWriter: reportWriter, + progressInterval: options.ProgressInterval, + progress: options.Progress, + } + + unparsedToplevel := image.UnparsedInstance(rawSource, nil) + multiImage, err := isMultiImage(unparsedToplevel) + if err != nil { + return errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef)) + } + + if !multiImage { + // The simple case: Just copy a single image. + if err := c.copyOneImage(policyContext, options, unparsedToplevel); err != nil { + return err + } + } else { + // This is a manifest list. Choose a single image and copy it. + // FIXME: Copy to destinations which support manifest lists, one image at a time. + instanceDigest, err := image.ChooseManifestInstanceFromManifestList(options.SourceCtx, unparsedToplevel) + if err != nil { + return errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef)) + } + logrus.Debugf("Source is a manifest list; copying (only) instance %s", instanceDigest) + unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) + + if err := c.copyOneImage(policyContext, options, unparsedInstance); err != nil { + return err + } + } + + if err := c.dest.Commit(); err != nil { + return errors.Wrap(err, "Error committing the finished image") + } + + return nil +} + +// Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate +// source image admissibility. +func (c *copier) copyOneImage(policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (retErr error) { + // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. + // Make sure we fail cleanly in such cases. + multiImage, err := isMultiImage(unparsedImage) + if err != nil { + // FIXME FIXME: How to name a reference for the sub-image? + return errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference())) + } + if multiImage { + return fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") + } + // Please keep this policy check BEFORE reading any other information about the image. + // (the multiImage check above only matches the MIME type, which we have received anyway. + // Actual parsing of anything should be deferred.) if allowed, err := policyContext.IsRunningImageAllowed(unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. return errors.Wrap(err, "Source image rejected") } - src, err := image.FromUnparsedImage(unparsedImage) + src, err := image.FromUnparsedImage(options.SourceCtx, unparsedImage) if err != nil { - return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(srcRef)) + return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference())) } - unparsedImage = nil - defer func() { - if err := src.Close(); err != nil { - retErr = errors.Wrapf(retErr, " (source: %v)", err) - } - }() - if err := checkImageDestinationForCurrentRuntimeOS(src, dest); err != nil { + if err := checkImageDestinationForCurrentRuntimeOS(options.DestinationCtx, src, c.dest); err != nil { return err } - if src.IsMultiImage() { - return errors.Errorf("can not copy %s: manifest contains multiple images", transports.ImageName(srcRef)) - } - var sigs [][]byte if options.RemoveSignatures { sigs = [][]byte{} } else { - writeReport("Getting image source signatures\n") + c.Printf("Getting image source signatures\n") s, err := src.Signatures(context.TODO()) if err != nil { return errors.Wrap(err, "Error reading signatures") @@ -177,41 +224,33 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe sigs = s } if len(sigs) != 0 { - writeReport("Checking if image destination supports signatures\n") - if err := dest.SupportsSignatures(); err != nil { + c.Printf("Checking if image destination supports signatures\n") + if err := c.dest.SupportsSignatures(); err != nil { return errors.Wrap(err, "Can not copy signatures") } } - canModifyManifest := len(sigs) == 0 - manifestUpdates := types.ManifestUpdateOptions{} - manifestUpdates.InformationOnly.Destination = dest + ic := imageCopier{ + c: c, + manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, + src: src, + // diffIDsAreNeeded is computed later + canModifyManifest: len(sigs) == 0, + } - if err := updateEmbeddedDockerReference(&manifestUpdates, dest, src, canModifyManifest); err != nil { + if err := ic.updateEmbeddedDockerReference(); err != nil { return err } // We compute preferredManifestMIMEType only to show it in error messages. // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed. - preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := determineManifestConversion(&manifestUpdates, src, dest.SupportedManifestMIMETypes(), canModifyManifest) + preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType) if err != nil { return err } - // If src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates) will be true, it needs to be true by the time we get here. - ic := imageCopier{ - copiedBlobs: make(map[digest.Digest]digest.Digest), - cachedDiffIDs: make(map[digest.Digest]digest.Digest), - manifestUpdates: &manifestUpdates, - dest: dest, - src: src, - rawSource: rawSource, - diffIDsAreNeeded: src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates), - canModifyManifest: canModifyManifest, - reportWriter: reportWriter, - progressInterval: options.ProgressInterval, - progress: options.Progress, - } + // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. + ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) if err := ic.copyLayers(); err != nil { return err @@ -233,9 +272,9 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe } // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. // So if we are here, we will definitely be trying to convert the manifest. - // With !canModifyManifest, that would just be a string of repeated failures for the same reason, + // With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason, // so let’s bail out early and with a better error message. - if !canModifyManifest { + if !ic.canModifyManifest { return errors.Wrap(err, "Writing manifest failed (and converting it is not possible)") } @@ -243,7 +282,7 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)} for _, manifestMIMEType := range otherManifestMIMETypeCandidates { logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) - manifestUpdates.ManifestMIMEType = manifestMIMEType + ic.manifestUpdates.ManifestMIMEType = manifestMIMEType attemptedManifest, err := ic.copyUpdatedConfigAndManifest() if err != nil { logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) @@ -262,35 +301,44 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe } if options.SignBy != "" { - newSig, err := createSignature(dest, manifest, options.SignBy, reportWriter) + newSig, err := c.createSignature(manifest, options.SignBy) if err != nil { return err } sigs = append(sigs, newSig) } - writeReport("Storing signatures\n") - if err := dest.PutSignatures(sigs); err != nil { + c.Printf("Storing signatures\n") + if err := c.dest.PutSignatures(sigs); err != nil { return errors.Wrap(err, "Error writing signatures") } - if err := dest.Commit(); err != nil { - return errors.Wrap(err, "Error committing the finished image") - } - return nil } -func checkImageDestinationForCurrentRuntimeOS(src types.Image, dest types.ImageDestination) error { +// Printf writes a formatted string to c.reportWriter. +// Note that the method name Printf is not entirely arbitrary: (go tool vet) +// has a built-in list of functions/methods (whatever object they are for) +// which have their format strings checked; for other names we would have +// to pass a parameter to every (go tool vet) invocation. +func (c *copier) Printf(format string, a ...interface{}) { + fmt.Fprintf(c.reportWriter, format, a...) +} + +func checkImageDestinationForCurrentRuntimeOS(ctx *types.SystemContext, src types.Image, dest types.ImageDestination) error { if dest.MustMatchRuntimeOS() { + wantedOS := runtime.GOOS + if ctx != nil && ctx.OSChoice != "" { + wantedOS = ctx.OSChoice + } c, err := src.OCIConfig() if err != nil { return errors.Wrapf(err, "Error parsing image configuration") } - osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, runtime.GOOS) - if runtime.GOOS == "windows" && c.OS == "linux" { + osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, wantedOS) + if wantedOS == "windows" && c.OS == "linux" { return osErr - } else if runtime.GOOS != "windows" && c.OS == "windows" { + } else if wantedOS != "windows" && c.OS == "windows" { return osErr } } @@ -298,35 +346,44 @@ func checkImageDestinationForCurrentRuntimeOS(src types.Image, dest types.ImageD } // updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. -func updateEmbeddedDockerReference(manifestUpdates *types.ManifestUpdateOptions, dest types.ImageDestination, src types.Image, canModifyManifest bool) error { - destRef := dest.Reference().DockerReference() +func (ic *imageCopier) updateEmbeddedDockerReference() error { + destRef := ic.c.dest.Reference().DockerReference() if destRef == nil { return nil // Destination does not care about Docker references } - if !src.EmbeddedDockerReferenceConflicts(destRef) { + if !ic.src.EmbeddedDockerReferenceConflicts(destRef) { return nil // No reference embedded in the manifest, or it matches destRef already. } - if !canModifyManifest { + if !ic.canModifyManifest { return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would invalidate existing signatures. Explicitly enable signature removal to proceed anyway", - transports.ImageName(dest.Reference()), destRef.String()) + transports.ImageName(ic.c.dest.Reference()), destRef.String()) } - manifestUpdates.EmbeddedDockerReference = destRef + ic.manifestUpdates.EmbeddedDockerReference = destRef return nil } -// copyLayers copies layers from src/rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. +// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. func (ic *imageCopier) copyLayers() error { srcInfos := ic.src.LayerInfos() destInfos := []types.BlobInfo{} diffIDs := []digest.Digest{} + updatedSrcInfos := ic.src.LayerInfosForCopy() + srcInfosUpdated := false + if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { + if !ic.canModifyManifest { + return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden") + } + srcInfos = updatedSrcInfos + srcInfosUpdated = true + } for _, srcLayer := range srcInfos { var ( destInfo types.BlobInfo diffID digest.Digest err error ) - if ic.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { + if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { // DiffIDs are, currently, needed only when converting from schema1. // In which case src.LayerInfos will not have URLs because schema1 // does not support them. @@ -334,7 +391,7 @@ func (ic *imageCopier) copyLayers() error { return errors.New("getting DiffID for foreign layers is unimplemented") } destInfo = srcLayer - fmt.Fprintf(ic.reportWriter, "Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.dest.Reference().Transport().Name()) + ic.c.Printf("Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.c.dest.Reference().Transport().Name()) } else { destInfo, diffID, err = ic.copyLayer(srcLayer) if err != nil { @@ -348,7 +405,7 @@ func (ic *imageCopier) copyLayers() error { if ic.diffIDsAreNeeded { ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs } - if layerDigestsDiffer(srcInfos, destInfos) { + if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { ic.manifestUpdates.LayerInfos = destInfos } return nil @@ -379,7 +436,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest() ([]byte, error) { // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. // So, this can only happen if we are trying to upload using one of the other MIME type candidates. // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise - // when ic.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. + // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. return nil, errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) @@ -395,27 +452,27 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest() ([]byte, error) { return nil, errors.Wrap(err, "Error reading manifest") } - if err := ic.copyConfig(pendingImage); err != nil { + if err := ic.c.copyConfig(pendingImage); err != nil { return nil, err } - fmt.Fprintf(ic.reportWriter, "Writing manifest to image destination\n") - if err := ic.dest.PutManifest(manifest); err != nil { + ic.c.Printf("Writing manifest to image destination\n") + if err := ic.c.dest.PutManifest(manifest); err != nil { return nil, errors.Wrap(err, "Error writing manifest") } return manifest, nil } // copyConfig copies config.json, if any, from src to dest. -func (ic *imageCopier) copyConfig(src types.Image) error { +func (c *copier) copyConfig(src types.Image) error { srcInfo := src.ConfigInfo() if srcInfo.Digest != "" { - fmt.Fprintf(ic.reportWriter, "Copying config %s\n", srcInfo.Digest) + c.Printf("Copying config %s\n", srcInfo.Digest) configBlob, err := src.ConfigBlob() if err != nil { return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest) } - destInfo, err := ic.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false) + destInfo, err := c.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false) if err != nil { return err } @@ -437,12 +494,12 @@ type diffIDResult struct { // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) { // Check if we already have a blob with this digest - haveBlob, extantBlobSize, err := ic.dest.HasBlob(srcInfo) + haveBlob, extantBlobSize, err := ic.c.dest.HasBlob(srcInfo) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest) } // If we already have a cached diffID for this blob, we don't need to compute it - diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.cachedDiffIDs[srcInfo.Digest] == "") + diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.c.cachedDiffIDs[srcInfo.Digest] == "") // If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again if haveBlob && !diffIDIsNeeded { // Check the blob sizes match, if we were given a size this time @@ -451,17 +508,17 @@ func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest } srcInfo.Size = extantBlobSize // Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob - blobinfo, err := ic.dest.ReapplyBlob(srcInfo) + blobinfo, err := ic.c.dest.ReapplyBlob(srcInfo) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest) } - fmt.Fprintf(ic.reportWriter, "Skipping fetch of repeat blob %s\n", srcInfo.Digest) - return blobinfo, ic.cachedDiffIDs[srcInfo.Digest], err + ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest) + return blobinfo, ic.c.cachedDiffIDs[srcInfo.Digest], err } // Fallback: copy the layer, computing the diffID if we need to do so - fmt.Fprintf(ic.reportWriter, "Copying blob %s\n", srcInfo.Digest) - srcStream, srcBlobSize, err := ic.rawSource.GetBlob(srcInfo) + ic.c.Printf("Copying blob %s\n", srcInfo.Digest) + srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(srcInfo) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) } @@ -479,7 +536,7 @@ func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID") } logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) - ic.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest + ic.c.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest } return blobInfo, diffIDResult.digest, nil } @@ -513,7 +570,7 @@ func (ic *imageCopier) copyLayerFromStream(srcStream io.Reader, srcInfo types.Bl return pipeWriter } } - blobInfo, err := ic.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest) // Sets err to nil on success + blobInfo, err := ic.c.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest) // Sets err to nil on success return blobInfo, diffIDChan, err // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan } @@ -547,7 +604,7 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) // perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, // perhaps compressing it if canCompress, // and returns a complete blobInfo of the copied blob. -func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo, +func (c *copier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo, getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, canCompress bool) (types.BlobInfo, error) { // The copying happens through a pipeline of connected io.Readers. @@ -575,7 +632,7 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo // === Report progress using a pb.Reader. bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES) - bar.Output = ic.reportWriter + bar.Output = c.reportWriter bar.SetMaxWidth(80) bar.ShowTimeLeft = false bar.ShowPercent = false @@ -592,7 +649,7 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo // === Compress the layer if it is uncompressed and compression is desired var inputInfo types.BlobInfo - if !canCompress || isCompressed || !ic.dest.ShouldCompressLayers() { + if !canCompress || isCompressed || !c.dest.ShouldCompressLayers() { logrus.Debugf("Using original blob without modification") inputInfo = srcInfo } else { @@ -609,19 +666,19 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo inputInfo.Size = -1 } - // === Report progress using the ic.progress channel, if required. - if ic.progress != nil && ic.progressInterval > 0 { + // === Report progress using the c.progress channel, if required. + if c.progress != nil && c.progressInterval > 0 { destStream = &progressReader{ source: destStream, - channel: ic.progress, - interval: ic.progressInterval, + channel: c.progress, + interval: c.progressInterval, artifact: srcInfo, lastTime: time.Now(), } } // === Finally, send the layer stream to dest. - uploadedInfo, err := ic.dest.PutBlob(destStream, inputInfo) + uploadedInfo, err := c.dest.PutBlob(destStream, inputInfo) if err != nil { return types.BlobInfo{}, errors.Wrap(err, "Error writing blob") } diff --git a/vendor/github.com/containers/image/copy/manifest.go b/vendor/github.com/containers/image/copy/manifest.go index e3b294dd..7e4cd10e 100644 --- a/vendor/github.com/containers/image/copy/manifest.go +++ b/vendor/github.com/containers/image/copy/manifest.go @@ -37,16 +37,20 @@ func (os *orderedSet) append(s string) { } } -// determineManifestConversion updates manifestUpdates to convert manifest to a supported MIME type, if necessary and canModifyManifest. -// Note that the conversion will only happen later, through src.UpdatedImage +// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest. +// Note that the conversion will only happen later, through ic.src.UpdatedImage // Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified), // and a list of other possible alternatives, in order. -func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, src types.Image, destSupportedManifestMIMETypes []string, canModifyManifest bool) (string, []string, error) { - _, srcType, err := src.Manifest() +func (ic *imageCopier) determineManifestConversion(destSupportedManifestMIMETypes []string, forceManifestMIMEType string) (string, []string, error) { + _, srcType, err := ic.src.Manifest() if err != nil { // This should have been cached?! return "", nil, errors.Wrap(err, "Error reading manifest") } + if forceManifestMIMEType != "" { + destSupportedManifestMIMETypes = []string{forceManifestMIMEType} + } + if len(destSupportedManifestMIMETypes) == 0 { return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions. } @@ -67,10 +71,10 @@ func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, s if _, ok := supportedByDest[srcType]; ok { prioritizedTypes.append(srcType) } - if !canModifyManifest { - // We could also drop the !canModifyManifest parameter and have the caller + if !ic.canModifyManifest { + // We could also drop the !ic.canModifyManifest check and have the caller // make the choice; it is already doing that to an extent, to improve error - // messages. But it is nice to hide the “if !canModifyManifest, do no conversion” + // messages. But it is nice to hide the “if !ic.canModifyManifest, do no conversion” // special case in here; the caller can then worry (or not) only about a good UI. logrus.Debugf("We can't modify the manifest, hoping for the best...") return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? @@ -94,9 +98,18 @@ func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, s } preferredType := prioritizedTypes.list[0] if preferredType != srcType { - manifestUpdates.ManifestMIMEType = preferredType + ic.manifestUpdates.ManifestMIMEType = preferredType } else { logrus.Debugf("... will first try using the original manifest unmodified") } return preferredType, prioritizedTypes.list[1:], nil } + +// isMultiImage returns true if img is a list of images +func isMultiImage(img types.UnparsedImage) (bool, error) { + _, mt, err := img.Manifest() + if err != nil { + return false, err + } + return manifest.MIMETypeIsMultiImage(mt), nil +} diff --git a/vendor/github.com/containers/image/copy/sign.go b/vendor/github.com/containers/image/copy/sign.go index 9187d70b..91394d2b 100644 --- a/vendor/github.com/containers/image/copy/sign.go +++ b/vendor/github.com/containers/image/copy/sign.go @@ -1,17 +1,13 @@ package copy import ( - "fmt" - "io" - "github.com/containers/image/signature" "github.com/containers/image/transports" - "github.com/containers/image/types" "github.com/pkg/errors" ) -// createSignature creates a new signature of manifest at (identified by) dest using keyIdentity. -func createSignature(dest types.ImageDestination, manifest []byte, keyIdentity string, reportWriter io.Writer) ([]byte, error) { +// createSignature creates a new signature of manifest using keyIdentity. +func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) { mech, err := signature.NewGPGSigningMechanism() if err != nil { return nil, errors.Wrap(err, "Error initializing GPG") @@ -21,12 +17,12 @@ func createSignature(dest types.ImageDestination, manifest []byte, keyIdentity s return nil, errors.Wrap(err, "Signing not supported") } - dockerReference := dest.Reference().DockerReference() + dockerReference := c.dest.Reference().DockerReference() if dockerReference == nil { - return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference())) + return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) } - fmt.Fprintf(reportWriter, "Signing manifest\n") + c.Printf("Signing manifest\n") newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity) if err != nil { return nil, errors.Wrap(err, "Error creating signature") diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go index ea46a27e..47d59d9f 100644 --- a/vendor/github.com/containers/image/directory/directory_dest.go +++ b/vendor/github.com/containers/image/directory/directory_dest.go @@ -4,19 +4,77 @@ import ( "io" "io/ioutil" "os" + "path/filepath" "github.com/containers/image/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) +const version = "Directory Transport Version: 1.0\n" + +// ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created +// using the 'dir' transport +var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data") + type dirImageDestination struct { - ref dirReference + ref dirReference + compress bool } -// newImageDestination returns an ImageDestination for writing to an existing directory. -func newImageDestination(ref dirReference) types.ImageDestination { - return &dirImageDestination{ref} +// newImageDestination returns an ImageDestination for writing to a directory. +func newImageDestination(ref dirReference, compress bool) (types.ImageDestination, error) { + d := &dirImageDestination{ref: ref, compress: compress} + + // If directory exists check if it is empty + // if not empty, check whether the contents match that of a container image directory and overwrite the contents + // if the contents don't match throw an error + dirExists, err := pathExists(d.ref.resolvedPath) + if err != nil { + return nil, errors.Wrapf(err, "error checking for path %q", d.ref.resolvedPath) + } + if dirExists { + isEmpty, err := isDirEmpty(d.ref.resolvedPath) + if err != nil { + return nil, err + } + + if !isEmpty { + versionExists, err := pathExists(d.ref.versionPath()) + if err != nil { + return nil, errors.Wrapf(err, "error checking if path exists %q", d.ref.versionPath()) + } + if versionExists { + contents, err := ioutil.ReadFile(d.ref.versionPath()) + if err != nil { + return nil, err + } + // check if contents of version file is what we expect it to be + if string(contents) != version { + return nil, ErrNotContainerImageDir + } + } else { + return nil, ErrNotContainerImageDir + } + // delete directory contents so that only one image is in the directory at a time + if err = removeDirContents(d.ref.resolvedPath); err != nil { + return nil, errors.Wrapf(err, "error erasing contents in %q", d.ref.resolvedPath) + } + logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath) + } + } else { + // create directory if it doesn't exist + if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil { + return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath) + } + } + // create version file + err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0755) + if err != nil { + return nil, errors.Wrapf(err, "error creating version file %q", d.ref.versionPath()) + } + return d, nil } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, @@ -42,7 +100,7 @@ func (d *dirImageDestination) SupportsSignatures() error { // ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. func (d *dirImageDestination) ShouldCompressLayers() bool { - return false + return d.compress } // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually @@ -147,3 +205,39 @@ func (d *dirImageDestination) PutSignatures(signatures [][]byte) error { func (d *dirImageDestination) Commit() error { return nil } + +// returns true if path exists +func pathExists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if err != nil && os.IsNotExist(err) { + return false, nil + } + return false, err +} + +// returns true if directory is empty +func isDirEmpty(path string) (bool, error) { + files, err := ioutil.ReadDir(path) + if err != nil { + return false, err + } + return len(files) == 0, nil +} + +// deletes the contents of a directory +func removeDirContents(path string) error { + files, err := ioutil.ReadDir(path) + if err != nil { + return err + } + + for _, file := range files { + if err := os.RemoveAll(filepath.Join(path, file.Name())); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go index fddc1c52..0a8acf6b 100644 --- a/vendor/github.com/containers/image/directory/directory_src.go +++ b/vendor/github.com/containers/image/directory/directory_src.go @@ -35,7 +35,12 @@ func (s *dirImageSource) Close() error { // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. -func (s *dirImageSource) GetManifest() ([]byte, string, error) { +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *dirImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`) + } m, err := ioutil.ReadFile(s.ref.manifestPath()) if err != nil { return nil, "", err @@ -43,10 +48,6 @@ func (s *dirImageSource) GetManifest() ([]byte, string, error) { return m, manifest.GuessMIMEType(m), err } -func (s *dirImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`) -} - // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). func (s *dirImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { r, err := os.Open(s.ref.layerPath(info.Digest)) @@ -60,7 +61,14 @@ func (s *dirImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, err return r, fi.Size(), nil } -func (s *dirImageSource) GetSignatures(ctx context.Context) ([][]byte, error) { +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.Errorf(`Manifests lists are not supported by "dir:"`) + } signatures := [][]byte{} for i := 0; ; i++ { signature, err := ioutil.ReadFile(s.ref.signaturePath(i)) @@ -74,3 +82,8 @@ func (s *dirImageSource) GetSignatures(ctx context.Context) ([][]byte, error) { } return signatures, nil } + +// LayerInfosForCopy() returns updated layer info that should be used when copying, in preference to values in the manifest, if specified. +func (s *dirImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/directory/directory_transport.go b/vendor/github.com/containers/image/directory/directory_transport.go index b9ce01a2..c3875308 100644 --- a/vendor/github.com/containers/image/directory/directory_transport.go +++ b/vendor/github.com/containers/image/directory/directory_transport.go @@ -134,13 +134,14 @@ func (ref dirReference) PolicyConfigurationNamespaces() []string { return res } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref dirReference) NewImage(ctx *types.SystemContext) (types.Image, error) { +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref dirReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { src := newImageSource(ref) - return image.FromSource(src) + return image.FromSource(ctx, src) } // NewImageSource returns a types.ImageSource for this reference. @@ -152,7 +153,11 @@ func (ref dirReference) NewImageSource(ctx *types.SystemContext) (types.ImageSou // NewImageDestination returns a types.ImageDestination for this reference. // The caller must call .Close() on the returned ImageDestination. func (ref dirReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ref), nil + compress := false + if ctx != nil { + compress = ctx.DirForceCompress + } + return newImageDestination(ref, compress) } // DeleteImage deletes the named image from the registry, if supported. @@ -175,3 +180,8 @@ func (ref dirReference) layerPath(digest digest.Digest) string { func (ref dirReference) signaturePath(index int) string { return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)) } + +// versionPath returns a path for the version file within a directory using our conventions. +func (ref dirReference) versionPath() string { + return filepath.Join(ref.path, "version") +} diff --git a/vendor/github.com/containers/image/docker/archive/src.go b/vendor/github.com/containers/image/docker/archive/src.go index aebcaa82..b2ffd965 100644 --- a/vendor/github.com/containers/image/docker/archive/src.go +++ b/vendor/github.com/containers/image/docker/archive/src.go @@ -34,3 +34,8 @@ func (s *archiveImageSource) Reference() types.ImageReference { func (s *archiveImageSource) Close() error { return nil } + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *archiveImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/docker/archive/transport.go b/vendor/github.com/containers/image/docker/archive/transport.go index f38d4ace..047df73d 100644 --- a/vendor/github.com/containers/image/docker/archive/transport.go +++ b/vendor/github.com/containers/image/docker/archive/transport.go @@ -125,13 +125,14 @@ func (ref archiveReference) PolicyConfigurationNamespaces() []string { return []string{} } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.Image, error) { +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { src := newImageSource(ctx, ref) - return ctrImage.FromSource(src) + return ctrImage.FromSource(ctx, src) } // NewImageSource returns a types.ImageSource for this reference. diff --git a/vendor/github.com/containers/image/docker/daemon/client.go b/vendor/github.com/containers/image/docker/daemon/client.go new file mode 100644 index 00000000..82fab4b1 --- /dev/null +++ b/vendor/github.com/containers/image/docker/daemon/client.go @@ -0,0 +1,69 @@ +package daemon + +import ( + "net/http" + "path/filepath" + + "github.com/containers/image/types" + dockerclient "github.com/docker/docker/client" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + // The default API version to be used in case none is explicitly specified + defaultAPIVersion = "1.22" +) + +// NewDockerClient initializes a new API client based on the passed SystemContext. +func newDockerClient(ctx *types.SystemContext) (*dockerclient.Client, error) { + host := dockerclient.DefaultDockerHost + if ctx != nil && ctx.DockerDaemonHost != "" { + host = ctx.DockerDaemonHost + } + + // Sadly, unix:// sockets don't work transparently with dockerclient.NewClient. + // They work fine with a nil httpClient; with a non-nil httpClient, the transport’s + // TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket + // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport. + // + // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client. + proto, _, _, err := dockerclient.ParseHost(host) + if err != nil { + return nil, err + } + var httpClient *http.Client + if proto != "unix" { + hc, err := tlsConfig(ctx) + if err != nil { + return nil, err + } + httpClient = hc + } + + return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil) +} + +func tlsConfig(ctx *types.SystemContext) (*http.Client, error) { + options := tlsconfig.Options{} + if ctx != nil && ctx.DockerDaemonInsecureSkipTLSVerify { + options.InsecureSkipVerify = true + } + + if ctx != nil && ctx.DockerDaemonCertPath != "" { + options.CAFile = filepath.Join(ctx.DockerDaemonCertPath, "ca.pem") + options.CertFile = filepath.Join(ctx.DockerDaemonCertPath, "cert.pem") + options.KeyFile = filepath.Join(ctx.DockerDaemonCertPath, "key.pem") + } + + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + CheckRedirect: dockerclient.CheckRedirect, + }, nil +} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go index 559e5c71..f73ac233 100644 --- a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go +++ b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go @@ -14,6 +14,7 @@ import ( type daemonImageDestination struct { ref daemonReference + mustMatchRuntimeOS bool *tarfile.Destination // Implements most of types.ImageDestination // For talking to imageLoadGoroutine goroutineCancel context.CancelFunc @@ -24,7 +25,7 @@ type daemonImageDestination struct { } // newImageDestination returns a types.ImageDestination for the specified image reference. -func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { +func newImageDestination(ctx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { if ref.ref == nil { return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) } @@ -33,7 +34,12 @@ func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (t return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) } - c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host + var mustMatchRuntimeOS = true + if ctx != nil && ctx.DockerDaemonHost != client.DefaultDockerHost { + mustMatchRuntimeOS = false + } + + c, err := newDockerClient(ctx) if err != nil { return nil, errors.Wrap(err, "Error initializing docker engine client") } @@ -42,16 +48,17 @@ func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (t // Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it. statusChannel := make(chan error, 1) - ctx, goroutineCancel := context.WithCancel(context.Background()) - go imageLoadGoroutine(ctx, c, reader, statusChannel) + goroutineContext, goroutineCancel := context.WithCancel(context.Background()) + go imageLoadGoroutine(goroutineContext, c, reader, statusChannel) return &daemonImageDestination{ - ref: ref, - Destination: tarfile.NewDestination(writer, namedTaggedRef), - goroutineCancel: goroutineCancel, - statusChannel: statusChannel, - writer: writer, - committed: false, + ref: ref, + mustMatchRuntimeOS: mustMatchRuntimeOS, + Destination: tarfile.NewDestination(writer, namedTaggedRef), + goroutineCancel: goroutineCancel, + statusChannel: statusChannel, + writer: writer, + committed: false, }, nil } @@ -80,7 +87,7 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. func (d *daemonImageDestination) MustMatchRuntimeOS() bool { - return true + return d.mustMatchRuntimeOS } // Close removes resources associated with an initialized ImageDestination, if any. diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/docker/daemon/daemon_src.go index 644dbeec..5cf7679b 100644 --- a/vendor/github.com/containers/image/docker/daemon/daemon_src.go +++ b/vendor/github.com/containers/image/docker/daemon/daemon_src.go @@ -6,14 +6,12 @@ import ( "os" "github.com/containers/image/docker/tarfile" + "github.com/containers/image/internal/tmpdir" "github.com/containers/image/types" - "github.com/docker/docker/client" "github.com/pkg/errors" "golang.org/x/net/context" ) -const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. - type daemonImageSource struct { ref daemonReference *tarfile.Source // Implements most of types.ImageSource @@ -35,7 +33,7 @@ type layerInfo struct { // is the config, and that the following len(RootFS) files are the layers, but that feels // way too brittle.) func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageSource, error) { - c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host + c, err := newDockerClient(ctx) if err != nil { return nil, errors.Wrap(err, "Error initializing docker engine client") } @@ -48,7 +46,7 @@ func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageS defer inputStream.Close() // FIXME: use SystemContext here. - tarCopyFile, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-daemon-tar") + tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-daemon-tar") if err != nil { return nil, err } @@ -83,3 +81,8 @@ func (s *daemonImageSource) Reference() types.ImageReference { func (s *daemonImageSource) Close() error { return os.Remove(s.tarCopyPath) } + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *daemonImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/docker/daemon/daemon_transport.go index 41be1b2d..8ad6b521 100644 --- a/vendor/github.com/containers/image/docker/daemon/daemon_transport.go +++ b/vendor/github.com/containers/image/docker/daemon/daemon_transport.go @@ -151,14 +151,17 @@ func (ref daemonReference) PolicyConfigurationNamespaces() []string { return []string{} } -// NewImage returns a types.Image for this reference. -// The caller must call .Close() on the returned Image. -func (ref daemonReference) NewImage(ctx *types.SystemContext) (types.Image, error) { +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref daemonReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { src, err := newImageSource(ctx, ref) if err != nil { return nil, err } - return image.FromSource(src) + return image.FromSource(ctx, src) } // NewImageSource returns a types.ImageSource for this reference. diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go index 24b82d6f..217e9dcb 100644 --- a/vendor/github.com/containers/image/docker/docker_client.go +++ b/vendor/github.com/containers/image/docker/docker_client.go @@ -8,7 +8,6 @@ import ( "io" "io/ioutil" "net/http" - "os" "path/filepath" "strings" "time" @@ -125,69 +124,6 @@ func dockerCertDir(ctx *types.SystemContext, hostPort string) string { return filepath.Join(hostCertDir, hostPort) } -func setupCertificates(dir string, tlsc *tls.Config) error { - logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) - fs, err := ioutil.ReadDir(dir) - if err != nil { - if os.IsNotExist(err) { - return nil - } - if os.IsPermission(err) { - logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err) - return nil - } - return err - } - - for _, f := range fs { - fullPath := filepath.Join(dir, f.Name()) - if strings.HasSuffix(f.Name(), ".crt") { - systemPool, err := tlsconfig.SystemCertPool() - if err != nil { - return errors.Wrap(err, "unable to get system cert pool") - } - tlsc.RootCAs = systemPool - logrus.Debugf(" crt: %s", fullPath) - data, err := ioutil.ReadFile(fullPath) - if err != nil { - return err - } - tlsc.RootCAs.AppendCertsFromPEM(data) - } - if strings.HasSuffix(f.Name(), ".cert") { - certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" - logrus.Debugf(" cert: %s", fullPath) - if !hasFile(fs, keyName) { - return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) - if err != nil { - return err - } - tlsc.Certificates = append(tlsc.Certificates, cert) - } - if strings.HasSuffix(f.Name(), ".key") { - keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" - logrus.Debugf(" key: %s", fullPath) - if !hasFile(fs, certName) { - return errors.Errorf("missing client certificate %s for key %s", certName, keyName) - } - } - } - return nil -} - -func hasFile(files []os.FileInfo, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false -} - // newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) // “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) func newDockerClientFromRef(ctx *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { diff --git a/vendor/github.com/containers/image/docker/docker_image.go b/vendor/github.com/containers/image/docker/docker_image.go index 8be35b73..2148ed8b 100644 --- a/vendor/github.com/containers/image/docker/docker_image.go +++ b/vendor/github.com/containers/image/docker/docker_image.go @@ -12,26 +12,26 @@ import ( "github.com/pkg/errors" ) -// Image is a Docker-specific implementation of types.Image with a few extra methods +// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods // which are specific to Docker. type Image struct { - types.Image + types.ImageCloser src *dockerImageSource } // newImage returns a new Image interface type after setting up // a client to the registry hosting the given image. // The caller must call .Close() on the returned Image. -func newImage(ctx *types.SystemContext, ref dockerReference) (types.Image, error) { +func newImage(ctx *types.SystemContext, ref dockerReference) (types.ImageCloser, error) { s, err := newImageSource(ctx, ref) if err != nil { return nil, err } - img, err := image.FromSource(s) + img, err := image.FromSource(ctx, s) if err != nil { return nil, err } - return &Image{Image: img, src: s}, nil + return &Image{ImageCloser: img, src: s}, nil } // SourceRefFullName returns a fully expanded name for the repository this image is in. diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go index 32d5a18b..79c38622 100644 --- a/vendor/github.com/containers/image/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/docker/docker_image_dest.go @@ -236,7 +236,7 @@ func (d *dockerImageDestination) PutManifest(m []byte) error { return err } defer res.Body.Close() - if res.StatusCode != http.StatusCreated { + if !successStatus(res.StatusCode) { err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest to %s", path) if isManifestInvalidError(errors.Cause(err)) { err = types.ManifestTypeRejectedError{Err: err} @@ -246,6 +246,12 @@ func (d *dockerImageDestination) PutManifest(m []byte) error { return nil } +// successStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func successStatus(status int) bool { + return status >= 200 && status <= 399 +} + // isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error. func isManifestInvalidError(err error) bool { errors, ok := err.(errcode.Errors) diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go index 232c3cf9..63bfe8aa 100644 --- a/vendor/github.com/containers/image/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/docker/docker_image_src.go @@ -52,6 +52,11 @@ func (s *dockerImageSource) Close() error { return nil } +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *dockerImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} + // simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) // Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string. func simplifyContentType(contentType string) string { @@ -67,7 +72,12 @@ func simplifyContentType(contentType string) string { // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. -func (s *dockerImageSource) GetManifest() ([]byte, string, error) { +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *dockerImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return s.fetchManifest(context.TODO(), instanceDigest.String()) + } err := s.ensureManifestIsLoaded(context.TODO()) if err != nil { return nil, "", err @@ -94,18 +104,12 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil } -// GetTargetManifest returns an image's manifest given a digest. -// This is mainly used to retrieve a single image's manifest out of a manifest list. -func (s *dockerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - return s.fetchManifest(context.TODO(), digest.String()) -} - // ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType // // ImageSource implementations are not required or expected to do any caching, // but because our signatures are “attached” to the manifest digest, -// we need to ensure that the digest of the manifest returned by GetManifest -// and used by GetSignatures are consistent, otherwise we would get spurious +// we need to ensure that the digest of the manifest returned by GetManifest(nil) +// and used by GetSignatures(ctx, nil) are consistent, otherwise we would get spurious // signature verification failures when pulling while a tag is being updated. func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error { if s.cachedManifest != nil { @@ -176,22 +180,30 @@ func (s *dockerImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, return res.Body, getBlobSize(res), nil } -func (s *dockerImageSource) GetSignatures(ctx context.Context) ([][]byte, error) { +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *dockerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { if err := s.c.detectProperties(ctx); err != nil { return nil, err } switch { case s.c.signatureBase != nil: - return s.getSignaturesFromLookaside(ctx) + return s.getSignaturesFromLookaside(ctx, instanceDigest) case s.c.supportsSignatures: - return s.getSignaturesFromAPIExtension(ctx) + return s.getSignaturesFromAPIExtension(ctx, instanceDigest) default: return [][]byte{}, nil } } -// manifestDigest returns a digest of the manifest, either from the supplied reference or from a fetched manifest. -func (s *dockerImageSource) manifestDigest(ctx context.Context) (digest.Digest, error) { +// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference, +// or finally, from a fetched manifest. +func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *digest.Digest) (digest.Digest, error) { + if instanceDigest != nil { + return *instanceDigest, nil + } if digested, ok := s.ref.ref.(reference.Digested); ok { d := digested.Digest() if d.Algorithm() == digest.Canonical { @@ -206,8 +218,8 @@ func (s *dockerImageSource) manifestDigest(ctx context.Context) (digest.Digest, // getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase, // which is not nil. -func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context) ([][]byte, error) { - manifestDigest, err := s.manifestDigest(ctx) +func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { return nil, err } @@ -276,8 +288,8 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( } // getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension. -func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context) ([][]byte, error) { - manifestDigest, err := s.manifestDigest(ctx) +func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/docker/docker_transport.go b/vendor/github.com/containers/image/docker/docker_transport.go index 1d67cc4f..cc0aa298 100644 --- a/vendor/github.com/containers/image/docker/docker_transport.go +++ b/vendor/github.com/containers/image/docker/docker_transport.go @@ -122,11 +122,12 @@ func (ref dockerReference) PolicyConfigurationNamespaces() []string { return policyconfiguration.DockerReferenceNamespaces(ref.ref) } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref dockerReference) NewImage(ctx *types.SystemContext) (types.Image, error) { +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref dockerReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { return newImage(ctx, ref) } diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go index 72c85c70..eb11ca86 100644 --- a/vendor/github.com/containers/image/docker/tarfile/dest.go +++ b/vendor/github.com/containers/image/docker/tarfile/dest.go @@ -11,6 +11,7 @@ import ( "time" "github.com/containers/image/docker/reference" + "github.com/containers/image/internal/tmpdir" "github.com/containers/image/manifest" "github.com/containers/image/types" "github.com/opencontainers/go-digest" @@ -18,8 +19,6 @@ import ( "github.com/sirupsen/logrus" ) -const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. - // Destination is a partial implementation of types.ImageDestination for writing to an io.Writer. type Destination struct { writer io.Writer @@ -107,7 +106,7 @@ func (d *Destination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size. logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") - streamCopy, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-tarfile-blob") + streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tarfile-blob") if err != nil { return types.BlobInfo{}, err } @@ -168,7 +167,7 @@ func (d *Destination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { func (d *Destination) PutManifest(m []byte) error { // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative, // so the caller trying a different manifest kind would be pointless. - var man schema2Manifest + var man manifest.Schema2 if err := json.Unmarshal(m, &man); err != nil { return errors.Wrap(err, "Error parsing manifest") } @@ -177,12 +176,12 @@ func (d *Destination) PutManifest(m []byte) error { } layerPaths := []string{} - for _, l := range man.Layers { + for _, l := range man.LayersDescriptors { layerPaths = append(layerPaths, l.Digest.String()) } items := []ManifestItem{{ - Config: man.Config.Digest.String(), + Config: man.ConfigDescriptor.Digest.String(), RepoTags: []string{d.repoTag}, Layers: layerPaths, Parent: "", diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go index f77cb713..a18e2105 100644 --- a/vendor/github.com/containers/image/docker/tarfile/src.go +++ b/vendor/github.com/containers/image/docker/tarfile/src.go @@ -24,8 +24,8 @@ type Source struct { tarManifest *ManifestItem // nil if not available yet. configBytes []byte configDigest digest.Digest - orderedDiffIDList []diffID - knownLayers map[diffID]*layerInfo + orderedDiffIDList []digest.Digest + knownLayers map[digest.Digest]*layerInfo // Other state generatedManifest []byte // Private cache for GetManifest(), nil if not set yet. } @@ -156,7 +156,7 @@ func (s *Source) ensureCachedDataIsPresent() error { if err != nil { return err } - var parsedConfig image // Most fields ommitted, we only care about layer DiffIDs. + var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs. if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config) } @@ -194,12 +194,12 @@ func (s *Source) LoadTarManifest() ([]ManifestItem, error) { return s.loadTarManifest() } -func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *image) (map[diffID]*layerInfo, error) { +func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) { // Collect layer data available in manifest and config. if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) } - knownLayers := map[diffID]*layerInfo{} + knownLayers := map[digest.Digest]*layerInfo{} unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes. for i, diffID := range parsedConfig.RootFS.DiffIDs { if _, ok := knownLayers[diffID]; ok { @@ -249,28 +249,34 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *image // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. -func (s *Source) GetManifest() ([]byte, string, error) { +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *Source) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) + } if s.generatedManifest == nil { if err := s.ensureCachedDataIsPresent(); err != nil { return nil, "", err } - m := schema2Manifest{ + m := manifest.Schema2{ SchemaVersion: 2, MediaType: manifest.DockerV2Schema2MediaType, - Config: distributionDescriptor{ + ConfigDescriptor: manifest.Schema2Descriptor{ MediaType: manifest.DockerV2Schema2ConfigMediaType, Size: int64(len(s.configBytes)), Digest: s.configDigest, }, - Layers: []distributionDescriptor{}, + LayersDescriptors: []manifest.Schema2Descriptor{}, } for _, diffID := range s.orderedDiffIDList { li, ok := s.knownLayers[diffID] if !ok { return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID) } - m.Layers = append(m.Layers, distributionDescriptor{ - Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball + m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{ + Digest: diffID, // diffID is a digest of the uncompressed tarball MediaType: manifest.DockerV2Schema2LayerMediaType, Size: li.size, }) @@ -284,13 +290,6 @@ func (s *Source) GetManifest() ([]byte, string, error) { return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil } -// GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest -// out of a manifest list. -func (s *Source) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - // How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType. - return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) -} - type readCloseWrapper struct { io.Reader closeFunc func() error @@ -313,7 +312,7 @@ func (s *Source) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil } - if li, ok := s.knownLayers[diffID(info.Digest)]; ok { // diffID is a digest of the uncompressed tarball, + if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, stream, err := s.openTarComponent(li.path) if err != nil { return nil, 0, err @@ -355,6 +354,13 @@ func (s *Source) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { } // GetSignatures returns the image's signatures. It may use a remote (= slow) service. -func (s *Source) GetSignatures(ctx context.Context) ([][]byte, error) { +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) + } return [][]byte{}, nil } diff --git a/vendor/github.com/containers/image/docker/tarfile/types.go b/vendor/github.com/containers/image/docker/tarfile/types.go index f16cc8c6..2aa56754 100644 --- a/vendor/github.com/containers/image/docker/tarfile/types.go +++ b/vendor/github.com/containers/image/docker/tarfile/types.go @@ -1,6 +1,9 @@ package tarfile -import "github.com/opencontainers/go-digest" +import ( + "github.com/containers/image/manifest" + "github.com/opencontainers/go-digest" +) // Various data structures. @@ -18,37 +21,8 @@ type ManifestItem struct { Config string RepoTags []string Layers []string - Parent imageID `json:",omitempty"` - LayerSources map[diffID]distributionDescriptor `json:",omitempty"` + Parent imageID `json:",omitempty"` + LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"` } type imageID string -type diffID digest.Digest - -// Based on github.com/docker/distribution/blobs.go -type distributionDescriptor struct { - MediaType string `json:"mediaType,omitempty"` - Size int64 `json:"size,omitempty"` - Digest digest.Digest `json:"digest,omitempty"` - URLs []string `json:"urls,omitempty"` -} - -// Based on github.com/docker/distribution/manifest/schema2/manifest.go -// FIXME: We are repeating this all over the place; make a public copy? -type schema2Manifest struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType,omitempty"` - Config distributionDescriptor `json:"config"` - Layers []distributionDescriptor `json:"layers"` -} - -// Based on github.com/docker/docker/image/image.go -// MOST CONTENT OMITTED AS UNNECESSARY -type image struct { - RootFS *rootFS `json:"rootfs,omitempty"` -} - -type rootFS struct { - Type string `json:"type"` - DiffIDs []diffID `json:"diff_ids,omitempty"` -} diff --git a/vendor/github.com/containers/image/image/docker_list.go b/vendor/github.com/containers/image/image/docker_list.go index c79adacc..412261dd 100644 --- a/vendor/github.com/containers/image/image/docker_list.go +++ b/vendor/github.com/containers/image/image/docker_list.go @@ -2,6 +2,7 @@ package image import ( "encoding/json" + "fmt" "runtime" "github.com/containers/image/manifest" @@ -21,7 +22,7 @@ type platformSpec struct { // A manifestDescriptor references a platform-specific manifest. type manifestDescriptor struct { - descriptor + manifest.Schema2Descriptor Platform platformSpec `json:"platform"` } @@ -31,22 +32,36 @@ type manifestList struct { Manifests []manifestDescriptor `json:"manifests"` } -func manifestSchema2FromManifestList(src types.ImageSource, manblob []byte) (genericManifest, error) { - list := manifestList{} - if err := json.Unmarshal(manblob, &list); err != nil { - return nil, err +// chooseDigestFromManifestList parses blob as a schema2 manifest list, +// and returns the digest of the image appropriate for the current environment. +func chooseDigestFromManifestList(ctx *types.SystemContext, blob []byte) (digest.Digest, error) { + wantedArch := runtime.GOARCH + if ctx != nil && ctx.ArchitectureChoice != "" { + wantedArch = ctx.ArchitectureChoice + } + wantedOS := runtime.GOOS + if ctx != nil && ctx.OSChoice != "" { + wantedOS = ctx.OSChoice + } + + list := manifestList{} + if err := json.Unmarshal(blob, &list); err != nil { + return "", err } - var targetManifestDigest digest.Digest for _, d := range list.Manifests { - if d.Platform.Architecture == runtime.GOARCH && d.Platform.OS == runtime.GOOS { - targetManifestDigest = d.Digest - break + if d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS { + return d.Digest, nil } } - if targetManifestDigest == "" { - return nil, errors.New("no supported platform found in manifest list") + return "", fmt.Errorf("no image found in manifest list for architecture %s, OS %s", wantedArch, wantedOS) +} + +func manifestSchema2FromManifestList(ctx *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { + targetManifestDigest, err := chooseDigestFromManifestList(ctx, manblob) + if err != nil { + return nil, err } - manblob, mt, err := src.GetTargetManifest(targetManifestDigest) + manblob, mt, err := src.GetManifest(&targetManifestDigest) if err != nil { return nil, err } @@ -59,5 +74,20 @@ func manifestSchema2FromManifestList(src types.ImageSource, manblob []byte) (gen return nil, errors.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest) } - return manifestInstanceFromBlob(src, manblob, mt) + return manifestInstanceFromBlob(ctx, src, manblob, mt) +} + +// ChooseManifestInstanceFromManifestList returns a digest of a manifest appropriate +// for the current system from the manifest available from src. +func ChooseManifestInstanceFromManifestList(ctx *types.SystemContext, src types.UnparsedImage) (digest.Digest, error) { + // For now this only handles manifest.DockerV2ListMediaType; we can generalize it later, + // probably along with manifest list editing. + blob, mt, err := src.Manifest() + if err != nil { + return "", err + } + if mt != manifest.DockerV2ListMediaType { + return "", fmt.Errorf("Internal error: Trying to select an image from a non-manifest-list manifest type %s", mt) + } + return chooseDigestFromManifestList(ctx, blob) } diff --git a/vendor/github.com/containers/image/image/docker_schema1.go b/vendor/github.com/containers/image/image/docker_schema1.go index 4152b3cd..c6a6989d 100644 --- a/vendor/github.com/containers/image/image/docker_schema1.go +++ b/vendor/github.com/containers/image/image/docker_schema1.go @@ -2,9 +2,6 @@ package image import ( "encoding/json" - "regexp" - "strings" - "time" "github.com/containers/image/docker/reference" "github.com/containers/image/manifest" @@ -14,87 +11,25 @@ import ( "github.com/pkg/errors" ) -var ( - validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) -) - -type fsLayersSchema1 struct { - BlobSum digest.Digest `json:"blobSum"` -} - -type historySchema1 struct { - V1Compatibility string `json:"v1Compatibility"` -} - -// historySchema1 is a string containing this. It is similar to v1Image but not the same, in particular note the ThrowAway field. -type v1Compatibility struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig struct { - Cmd []string - } `json:"container_config,omitempty"` - Author string `json:"author,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` -} - type manifestSchema1 struct { - Name string `json:"name"` - Tag string `json:"tag"` - Architecture string `json:"architecture"` - FSLayers []fsLayersSchema1 `json:"fsLayers"` - History []historySchema1 `json:"history"` - SchemaVersion int `json:"schemaVersion"` + m *manifest.Schema1 } -func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) { - mschema1 := &manifestSchema1{} - if err := json.Unmarshal(manifest, mschema1); err != nil { - return nil, err - } - if mschema1.SchemaVersion != 1 { - return nil, errors.Errorf("unsupported schema version %d", mschema1.SchemaVersion) - } - if len(mschema1.FSLayers) != len(mschema1.History) { - return nil, errors.New("length of history not equal to number of layers") - } - if len(mschema1.FSLayers) == 0 { - return nil, errors.New("no FSLayers in manifest") - } - - if err := fixManifestLayers(mschema1); err != nil { - return nil, err - } - return mschema1, nil -} - -// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. -func manifestSchema1FromComponents(ref reference.Named, fsLayers []fsLayersSchema1, history []historySchema1, architecture string) genericManifest { - var name, tag string - if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. - name = reference.Path(ref) - if tagged, ok := ref.(reference.NamedTagged); ok { - tag = tagged.Tag() - } - } - return &manifestSchema1{ - Name: name, - Tag: tag, - Architecture: architecture, - FSLayers: fsLayers, - History: history, - SchemaVersion: 1, - } -} - -func (m *manifestSchema1) serialize() ([]byte, error) { - // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. - unsigned, err := json.Marshal(*m) +func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema1FromManifest(manifestBlob) if err != nil { return nil, err } - return manifest.AddDummyV2S1Signature(unsigned) + return &manifestSchema1{m: m}, nil +} + +// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. +func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) genericManifest { + return &manifestSchema1{m: manifest.Schema1FromComponents(ref, fsLayers, history, architecture)} +} + +func (m *manifestSchema1) serialize() ([]byte, error) { + return m.m.Serialize() } func (m *manifestSchema1) manifestMIMEType() string { @@ -104,7 +39,7 @@ func (m *manifestSchema1) manifestMIMEType() string { // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. func (m *manifestSchema1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{} + return m.m.ConfigInfo() } // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. @@ -128,11 +63,7 @@ func (m *manifestSchema1) OCIConfig() (*imgspecv1.Image, error) { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestSchema1) LayerInfos() []types.BlobInfo { - layers := make([]types.BlobInfo, len(m.FSLayers)) - for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) - layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1} - } - return layers + return m.m.LayerInfos() } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -153,22 +84,11 @@ func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) } else { tag = "" } - return m.Name != name || m.Tag != tag + return m.m.Name != name || m.m.Tag != tag } func (m *manifestSchema1) imageInspectInfo() (*types.ImageInspectInfo, error) { - v1 := &v1Image{} - if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), v1); err != nil { - return nil, err - } - return &types.ImageInspectInfo{ - Tag: m.Tag, - DockerVersion: v1.DockerVersion, - Created: v1.Created, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Os: v1.OS, - }, nil + return m.m.Inspect(nil) } // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. @@ -181,25 +101,18 @@ func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := *m + copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} if options.LayerInfos != nil { - // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well. - if len(copy.FSLayers) != len(options.LayerInfos) { - return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos)) - } - for i, info := range options.LayerInfos { - // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest, - // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. - // So, we don't bother recomputing the IDs in m.History.V1Compatibility. - copy.FSLayers[(len(options.LayerInfos)-1)-i].BlobSum = info.Digest + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err } } if options.EmbeddedDockerReference != nil { - copy.Name = reference.Path(options.EmbeddedDockerReference) + copy.m.Name = reference.Path(options.EmbeddedDockerReference) if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { - copy.Tag = tagged.Tag() + copy.m.Tag = tagged.Tag() } else { - copy.Tag = "" + copy.m.Tag = "" } } @@ -209,7 +122,21 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, // handle conversions between them by doing nothing. case manifest.DockerV2Schema2MediaType: - return copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) + m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) + if err != nil { + return nil, err + } + return memoryImageFromManifest(m2), nil + case imgspecv1.MediaTypeImageManifest: + // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest + m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) + if err != nil { + return nil, err + } + return m2.UpdatedImage(types.ManifestUpdateOptions{ + ManifestMIMEType: imgspecv1.MediaTypeImageManifest, + InformationOnly: options.InformationOnly, + }) default: return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType) } @@ -217,102 +144,32 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ return memoryImageFromManifest(©), nil } -// fixManifestLayers, after validating the supplied manifest -// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History), -// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates, -// both from manifest.History and manifest.FSLayers). -// Note that even after this succeeds, manifest.FSLayers may contain duplicate entries -// (for Dockerfile operations which change the configuration but not the filesystem). -func fixManifestLayers(manifest *manifestSchema1) error { - type imageV1 struct { - ID string - Parent string - } - // Per the specification, we can assume that len(manifest.FSLayers) == len(manifest.History) - imgs := make([]*imageV1, len(manifest.FSLayers)) - for i := range manifest.FSLayers { - img := &imageV1{} - - if err := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), img); err != nil { - return err - } - - imgs[i] = img - if err := validateV1ID(img.ID); err != nil { - return err - } - } - if imgs[len(imgs)-1].Parent != "" { - return errors.New("Invalid parent ID in the base layer of the image") - } - // check general duplicates to error instead of a deadlock - idmap := make(map[string]struct{}) - var lastID string - for _, img := range imgs { - // skip IDs that appear after each other, we handle those later - if _, exists := idmap[img.ID]; img.ID != lastID && exists { - return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) - } - lastID = img.ID - idmap[lastID] = struct{}{} - } - // backwards loop so that we keep the remaining indexes after removing items - for i := len(imgs) - 2; i >= 0; i-- { - if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue - manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...) - manifest.History = append(manifest.History[:i], manifest.History[i+1:]...) - } else if imgs[i].Parent != imgs[i+1].ID { - return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) - } - } - return nil -} - -func validateV1ID(id string) error { - if ok := validHex.MatchString(id); !ok { - return errors.Errorf("image ID %q is invalid", id) - } - return nil -} - // Based on github.com/docker/docker/distribution/pull_v2.go -func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (types.Image, error) { - if len(m.History) == 0 { +func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) { + if len(m.m.History) == 0 { // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) } - if len(m.History) != len(m.FSLayers) { - return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers)) + if len(m.m.History) != len(m.m.FSLayers) { + return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.History), len(m.m.FSLayers)) } - if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.FSLayers) { - return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers)) + if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) } - if layerDiffIDs != nil && len(layerDiffIDs) != len(m.FSLayers) { - return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers)) + if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) } - rootFS := rootFS{ - Type: "layers", - DiffIDs: []digest.Digest{}, - BaseLayer: "", - } - var layers []descriptor - history := make([]imageHistory, len(m.History)) - for v1Index := len(m.History) - 1; v1Index >= 0; v1Index-- { - v2Index := (len(m.History) - 1) - v1Index + // Build a list of the diffIDs for the non-empty layers. + diffIDs := []digest.Digest{} + var layers []manifest.Schema2Descriptor + for v1Index := len(m.m.History) - 1; v1Index >= 0; v1Index-- { + v2Index := (len(m.m.History) - 1) - v1Index - var v1compat v1Compatibility - if err := json.Unmarshal([]byte(m.History[v1Index].V1Compatibility), &v1compat); err != nil { + var v1compat manifest.Schema1V1Compatibility + if err := json.Unmarshal([]byte(m.m.History[v1Index].V1Compatibility), &v1compat); err != nil { return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index) } - history[v2Index] = imageHistory{ - Created: v1compat.Created, - Author: v1compat.Author, - CreatedBy: strings.Join(v1compat.ContainerConfig.Cmd, " "), - Comment: v1compat.Comment, - EmptyLayer: v1compat.ThrowAway, - } - if !v1compat.ThrowAway { var size int64 if uploadedLayerInfos != nil { @@ -322,54 +179,23 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl if layerDiffIDs != nil { d = layerDiffIDs[v2Index] } - layers = append(layers, descriptor{ + layers = append(layers, manifest.Schema2Descriptor{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Size: size, - Digest: m.FSLayers[v1Index].BlobSum, + Digest: m.m.FSLayers[v1Index].BlobSum, }) - rootFS.DiffIDs = append(rootFS.DiffIDs, d) + diffIDs = append(diffIDs, d) } } - configJSON, err := configJSONFromV1Config([]byte(m.History[0].V1Compatibility), rootFS, history) + configJSON, err := m.m.ToSchema2(diffIDs) if err != nil { return nil, err } - configDescriptor := descriptor{ + configDescriptor := manifest.Schema2Descriptor{ MediaType: "application/vnd.docker.container.image.v1+json", Size: int64(len(configJSON)), Digest: digest.FromBytes(configJSON), } - m2 := manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers) - return memoryImageFromManifest(m2), nil -} - -func configJSONFromV1Config(v1ConfigJSON []byte, rootFS rootFS, history []imageHistory) ([]byte, error) { - // github.com/docker/docker/image/v1/imagev1.go:MakeConfigFromV1Config unmarshals and re-marshals the input if docker_version is < 1.8.3 to remove blank fields; - // we don't do that here. FIXME? Should we? AFAICT it would only affect the digest value of the schema2 manifest, and we don't particularly need that to be - // a consistently reproducible value. - - // Preserve everything we don't specifically know about. - // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) - rawContents := map[string]*json.RawMessage{} - if err := json.Unmarshal(v1ConfigJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! - return nil, err - } - - delete(rawContents, "id") - delete(rawContents, "parent") - delete(rawContents, "Size") - delete(rawContents, "parent_id") - delete(rawContents, "layer_id") - delete(rawContents, "throwaway") - - updates := map[string]interface{}{"rootfs": rootFS, "history": history} - for field, value := range updates { - encoded, err := json.Marshal(value) - if err != nil { - return nil, err - } - rawContents[field] = (*json.RawMessage)(&encoded) - } - return json.Marshal(rawContents) + return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil } diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go index 8cc3c495..b43bc17c 100644 --- a/vendor/github.com/containers/image/image/docker_schema2.go +++ b/vendor/github.com/containers/image/image/docker_schema2.go @@ -29,54 +29,44 @@ var gzippedEmptyLayer = []byte{ // gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") -type descriptor struct { - MediaType string `json:"mediaType"` - Size int64 `json:"size"` - Digest digest.Digest `json:"digest"` - URLs []string `json:"urls,omitempty"` -} - type manifestSchema2 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of ConfigDescriptor. - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - ConfigDescriptor descriptor `json:"config"` - LayersDescriptors []descriptor `json:"layers"` + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of ConfigDescriptor. + m *manifest.Schema2 } -func manifestSchema2FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) { - v2s2 := manifestSchema2{src: src} - if err := json.Unmarshal(manifest, &v2s2); err != nil { +func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema2FromManifest(manifestBlob) + if err != nil { return nil, err } - return &v2s2, nil + return &manifestSchema2{ + src: src, + m: m, + }, nil } // manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: -func manifestSchema2FromComponents(config descriptor, src types.ImageSource, configBlob []byte, layers []descriptor) genericManifest { +func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest { return &manifestSchema2{ - src: src, - configBlob: configBlob, - SchemaVersion: 2, - MediaType: manifest.DockerV2Schema2MediaType, - ConfigDescriptor: config, - LayersDescriptors: layers, + src: src, + configBlob: configBlob, + m: manifest.Schema2FromComponents(config, layers), } } func (m *manifestSchema2) serialize() ([]byte, error) { - return json.Marshal(*m) + return m.m.Serialize() } func (m *manifestSchema2) manifestMIMEType() string { - return m.MediaType + return m.m.MediaType } // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. func (m *manifestSchema2) ConfigInfo() types.BlobInfo { - return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size} + return m.m.ConfigInfo() } // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about @@ -105,9 +95,9 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) { return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") } stream, _, err := m.src.GetBlob(types.BlobInfo{ - Digest: m.ConfigDescriptor.Digest, - Size: m.ConfigDescriptor.Size, - URLs: m.ConfigDescriptor.URLs, + Digest: m.m.ConfigDescriptor.Digest, + Size: m.m.ConfigDescriptor.Size, + URLs: m.m.ConfigDescriptor.URLs, }) if err != nil { return nil, err @@ -118,8 +108,8 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) { return nil, err } computedDigest := digest.FromBytes(blob) - if computedDigest != m.ConfigDescriptor.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest) + if computedDigest != m.m.ConfigDescriptor.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) } m.configBlob = blob } @@ -130,15 +120,7 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestSchema2) LayerInfos() []types.BlobInfo { - blobs := []types.BlobInfo{} - for _, layer := range m.LayersDescriptors { - blobs = append(blobs, types.BlobInfo{ - Digest: layer.Digest, - Size: layer.Size, - URLs: layer.URLs, - }) - } - return blobs + return m.m.LayerInfos() } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -149,21 +131,18 @@ func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) } func (m *manifestSchema2) imageInspectInfo() (*types.ImageInspectInfo, error) { - config, err := m.ConfigBlob() - if err != nil { - return nil, err + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob() + if err != nil { + return nil, err + } + return config, nil } - v1 := &v1Image{} - if err := json.Unmarshal(config, v1); err != nil { - return nil, err - } - return &types.ImageInspectInfo{ - DockerVersion: v1.DockerVersion, - Created: v1.Created, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Os: v1.OS, - }, nil + return m.m.Inspect(getter) } // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. @@ -176,17 +155,14 @@ func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := *m // NOTE: This is not a deep copy, it still shares slices etc. + copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.Schema2Clone(m.m), + } if options.LayerInfos != nil { - if len(copy.LayersDescriptors) != len(options.LayerInfos) { - return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos)) - } - copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos)) - for i, info := range options.LayerInfos { - copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType - copy.LayersDescriptors[i].Digest = info.Digest - copy.LayersDescriptors[i].Size = info.Size - copy.LayersDescriptors[i].URLs = info.URLs + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err } } // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. @@ -204,6 +180,15 @@ func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (typ return memoryImageFromManifest(©), nil } +func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { + return imgspecv1.Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) { configOCI, err := m.OCIConfig() if err != nil { @@ -214,18 +199,16 @@ func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) { return nil, err } - config := descriptorOCI1{ - descriptor: descriptor{ - MediaType: imgspecv1.MediaTypeImageConfig, - Size: int64(len(configOCIBytes)), - Digest: digest.FromBytes(configOCIBytes), - }, + config := imgspecv1.Descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + Size: int64(len(configOCIBytes)), + Digest: digest.FromBytes(configOCIBytes), } - layers := make([]descriptorOCI1, len(m.LayersDescriptors)) + layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) for idx := range layers { - layers[idx] = descriptorOCI1{descriptor: m.LayersDescriptors[idx]} - if m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType { + layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) + if m.m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType { layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable } else { // we assume layers are gzip'ed because docker v2s2 only deals with @@ -244,14 +227,14 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) if err != nil { return nil, err } - imageConfig := &image{} + imageConfig := &manifest.Schema2Image{} if err := json.Unmarshal(configBytes, imageConfig); err != nil { return nil, err } // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. - fsLayers := make([]fsLayersSchema1, len(imageConfig.History)) - history := make([]historySchema1, len(imageConfig.History)) + fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) + history := make([]manifest.Schema1History, len(imageConfig.History)) nonemptyLayerIndex := 0 var parentV1ID string // Set in the loop v1ID := "" @@ -279,10 +262,10 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) } blobDigest = gzippedEmptyLayerDigest } else { - if nonemptyLayerIndex >= len(m.LayersDescriptors) { - return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors)) + if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { + return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) } - blobDigest = m.LayersDescriptors[nonemptyLayerIndex].Digest + blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest nonemptyLayerIndex++ } @@ -293,7 +276,7 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) } v1ID = v - fakeImage := v1Compatibility{ + fakeImage := manifest.Schema1V1Compatibility{ ID: v1ID, Parent: parentV1ID, Comment: historyEntry.Comment, @@ -307,8 +290,8 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) } - fsLayers[v1Index] = fsLayersSchema1{BlobSum: blobDigest} - history[v1Index] = historySchema1{V1Compatibility: string(v1CompatibilityBytes)} + fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} + history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} // Note that parentV1ID of the top layer is preserved when exiting this loop } diff --git a/vendor/github.com/containers/image/image/manifest.go b/vendor/github.com/containers/image/image/manifest.go index 75c9e711..cdd4233f 100644 --- a/vendor/github.com/containers/image/image/manifest.go +++ b/vendor/github.com/containers/image/image/manifest.go @@ -1,57 +1,14 @@ package image import ( - "time" + "fmt" "github.com/containers/image/docker/reference" "github.com/containers/image/manifest" - "github.com/containers/image/pkg/strslice" "github.com/containers/image/types" - "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) -type config struct { - Cmd strslice.StrSlice - Labels map[string]string -} - -type v1Image struct { - ID string `json:"id,omitempty"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig *config `json:"container_config,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - Author string `json:"author,omitempty"` - // Config is the configuration of the container received from the client - Config *config `json:"config,omitempty"` - // Architecture is the hardware that the image is build and runs on - Architecture string `json:"architecture,omitempty"` - // OS is the operating system used to build and run the image - OS string `json:"os,omitempty"` -} - -type image struct { - v1Image - History []imageHistory `json:"history,omitempty"` - RootFS *rootFS `json:"rootfs,omitempty"` -} - -type imageHistory struct { - Created time.Time `json:"created"` - Author string `json:"author,omitempty"` - CreatedBy string `json:"created_by,omitempty"` - Comment string `json:"comment,omitempty"` - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -type rootFS struct { - Type string `json:"type"` - DiffIDs []digest.Digest `json:"diff_ids,omitempty"` - BaseLayer string `json:"base_layer,omitempty"` -} - // genericManifest is an interface for parsing, modifying image manifests and related data. // Note that the public methods are intended to be a subset of types.Image // so that embedding a genericManifest into structs works. @@ -87,43 +44,24 @@ type genericManifest interface { UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) } -func manifestInstanceFromBlob(src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { - switch mt { - // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . - // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might - // need to happen within the ImageSource. - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, "application/json": +// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. +// If manblob is a manifest list, it implicitly chooses an appropriate image from the list. +func manifestInstanceFromBlob(ctx *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { + switch manifest.NormalizedMIMEType(mt) { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: return manifestSchema1FromManifest(manblob) case imgspecv1.MediaTypeImageManifest: return manifestOCI1FromManifest(src, manblob) case manifest.DockerV2Schema2MediaType: return manifestSchema2FromManifest(src, manblob) case manifest.DockerV2ListMediaType: - return manifestSchema2FromManifestList(src, manblob) - default: - // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time - // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 - // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 - // - // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. - // This makes no real sense, but it happens - // because requests for manifests are - // redirected to a content distribution - // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 - return manifestSchema1FromManifest(manblob) + return manifestSchema2FromManifestList(ctx, src, manblob) + default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) } } // inspectManifest is an implementation of types.Image.Inspect func inspectManifest(m genericManifest) (*types.ImageInspectInfo, error) { - info, err := m.imageInspectInfo() - if err != nil { - return nil, err - } - layers := m.LayerInfos() - info.Layers = make([]string, len(layers)) - for i, layer := range layers { - info.Layers[i] = layer.Digest.String() - } - return info, nil + return m.imageInspectInfo() } diff --git a/vendor/github.com/containers/image/image/memory.go b/vendor/github.com/containers/image/image/memory.go index 62995f61..4639c49a 100644 --- a/vendor/github.com/containers/image/image/memory.go +++ b/vendor/github.com/containers/image/image/memory.go @@ -33,11 +33,6 @@ func (i *memoryImage) Reference() types.ImageReference { return nil } -// Close removes resources associated with an initialized UnparsedImage, if any. -func (i *memoryImage) Close() error { - return nil -} - // Size returns the size of the image as stored, if known, or -1 if not. func (i *memoryImage) Size() (int64, error) { return -1, nil @@ -67,7 +62,9 @@ func (i *memoryImage) Inspect() (*types.ImageInspectInfo, error) { return inspectManifest(i.genericManifest) } -// IsMultiImage returns true if the image's manifest is a list of images, false otherwise. -func (i *memoryImage) IsMultiImage() bool { - return false +// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (i *memoryImage) LayerInfosForCopy() []types.BlobInfo { + return nil } diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go index 5f7c0728..3c03e49b 100644 --- a/vendor/github.com/containers/image/image/oci.go +++ b/vendor/github.com/containers/image/image/oci.go @@ -12,41 +12,34 @@ import ( "github.com/pkg/errors" ) -type descriptorOCI1 struct { - descriptor - Annotations map[string]string `json:"annotations,omitempty"` -} - type manifestOCI1 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of ConfigDescriptor. - SchemaVersion int `json:"schemaVersion"` - ConfigDescriptor descriptorOCI1 `json:"config"` - LayersDescriptors []descriptorOCI1 `json:"layers"` - Annotations map[string]string `json:"annotations,omitempty"` + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of m.Config. + m *manifest.OCI1 } -func manifestOCI1FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) { - oci := manifestOCI1{src: src} - if err := json.Unmarshal(manifest, &oci); err != nil { +func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.OCI1FromManifest(manifestBlob) + if err != nil { return nil, err } - return &oci, nil + return &manifestOCI1{ + src: src, + m: m, + }, nil } // manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: -func manifestOCI1FromComponents(config descriptorOCI1, src types.ImageSource, configBlob []byte, layers []descriptorOCI1) genericManifest { +func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { return &manifestOCI1{ - src: src, - configBlob: configBlob, - SchemaVersion: 2, - ConfigDescriptor: config, - LayersDescriptors: layers, + src: src, + configBlob: configBlob, + m: manifest.OCI1FromComponents(config, layers), } } func (m *manifestOCI1) serialize() ([]byte, error) { - return json.Marshal(*m) + return m.m.Serialize() } func (m *manifestOCI1) manifestMIMEType() string { @@ -56,7 +49,7 @@ func (m *manifestOCI1) manifestMIMEType() string { // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. func (m *manifestOCI1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size, Annotations: m.ConfigDescriptor.Annotations} + return m.m.ConfigInfo() } // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. @@ -67,9 +60,9 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) { return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") } stream, _, err := m.src.GetBlob(types.BlobInfo{ - Digest: m.ConfigDescriptor.Digest, - Size: m.ConfigDescriptor.Size, - URLs: m.ConfigDescriptor.URLs, + Digest: m.m.Config.Digest, + Size: m.m.Config.Size, + URLs: m.m.Config.URLs, }) if err != nil { return nil, err @@ -80,8 +73,8 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) { return nil, err } computedDigest := digest.FromBytes(blob) - if computedDigest != m.ConfigDescriptor.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest) + if computedDigest != m.m.Config.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) } m.configBlob = blob } @@ -107,11 +100,7 @@ func (m *manifestOCI1) OCIConfig() (*imgspecv1.Image, error) { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestOCI1) LayerInfos() []types.BlobInfo { - blobs := []types.BlobInfo{} - for _, layer := range m.LayersDescriptors { - blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs}) - } - return blobs + return m.m.LayerInfos() } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -122,21 +111,18 @@ func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) boo } func (m *manifestOCI1) imageInspectInfo() (*types.ImageInspectInfo, error) { - config, err := m.ConfigBlob() - if err != nil { - return nil, err + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob() + if err != nil { + return nil, err + } + return config, nil } - v1 := &v1Image{} - if err := json.Unmarshal(config, v1); err != nil { - return nil, err - } - return &types.ImageInspectInfo{ - DockerVersion: v1.DockerVersion, - Created: v1.Created, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Os: v1.OS, - }, nil + return m.m.Inspect(getter) } // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. @@ -149,18 +135,14 @@ func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdat // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := *m // NOTE: This is not a deep copy, it still shares slices etc. + copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.OCI1Clone(m.m), + } if options.LayerInfos != nil { - if len(copy.LayersDescriptors) != len(options.LayerInfos) { - return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos)) - } - copy.LayersDescriptors = make([]descriptorOCI1, len(options.LayerInfos)) - for i, info := range options.LayerInfos { - copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType - copy.LayersDescriptors[i].Digest = info.Digest - copy.LayersDescriptors[i].Size = info.Size - copy.LayersDescriptors[i].Annotations = info.Annotations - copy.LayersDescriptors[i].URLs = info.URLs + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err } } // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. @@ -176,17 +158,26 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types. return memoryImageFromManifest(©), nil } +func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { + return manifest.Schema2Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { // Create a copy of the descriptor. - config := m.ConfigDescriptor.descriptor + config := schema2DescriptorFromOCI1Descriptor(m.m.Config) // The only difference between OCI and DockerSchema2 is the mediatypes. The // media type of the manifest is handled by manifestSchema2FromComponents. config.MediaType = manifest.DockerV2Schema2ConfigMediaType - layers := make([]descriptor, len(m.LayersDescriptors)) + layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) for idx := range layers { - layers[idx] = m.LayersDescriptors[idx].descriptor + layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType } diff --git a/vendor/github.com/containers/image/image/sourced.go b/vendor/github.com/containers/image/image/sourced.go index ef35b3c3..3477f341 100644 --- a/vendor/github.com/containers/image/image/sourced.go +++ b/vendor/github.com/containers/image/image/sourced.go @@ -4,12 +4,22 @@ package image import ( - "github.com/containers/image/manifest" "github.com/containers/image/types" ) -// FromSource returns a types.Image implementation for source. -// The caller must call .Close() on the returned Image. +// imageCloser implements types.ImageCloser, perhaps allowing simple users +// to use a single object without having keep a reference to a types.ImageSource +// only to call types.ImageSource.Close(). +type imageCloser struct { + types.Image + src types.ImageSource +} + +// FromSource returns a types.ImageCloser implementation for the default instance of source. +// If source is a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate image instance. +// +// The caller must call .Close() on the returned ImageCloser. // // FromSource “takes ownership” of the input ImageSource and will call src.Close() // when the image is closed. (This does not prevent callers from using both the @@ -18,8 +28,19 @@ import ( // // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. -func FromSource(src types.ImageSource) (types.Image, error) { - return FromUnparsedImage(UnparsedFromSource(src)) +func FromSource(ctx *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { + img, err := FromUnparsedImage(ctx, UnparsedInstance(src, nil)) + if err != nil { + return nil, err + } + return &imageCloser{ + Image: img, + src: src, + }, nil +} + +func (ic *imageCloser) Close() error { + return ic.src.Close() } // sourcedImage is a general set of utilities for working with container images, @@ -38,27 +59,22 @@ type sourcedImage struct { } // FromUnparsedImage returns a types.Image implementation for unparsed. -// The caller must call .Close() on the returned Image. +// If unparsed represents a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate single image. // -// FromSource “takes ownership” of the input UnparsedImage and will call uparsed.Close() -// when the image is closed. (This does not prevent callers from using both the -// UnparsedImage and ImageSource objects simultaneously, but it means that they only need to -// keep a reference to the Image.) -func FromUnparsedImage(unparsed *UnparsedImage) (types.Image, error) { +// The Image must not be used after the underlying ImageSource is Close()d. +func FromUnparsedImage(ctx *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) { // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, // this is the only UnparsedImage implementation around, anyway. - // Also, we do not explicitly implement types.Image.Close; we let the implementation fall through to - // unparsed.Close. - // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). manifestBlob, manifestMIMEType, err := unparsed.Manifest() if err != nil { return nil, err } - parsedManifest, err := manifestInstanceFromBlob(unparsed.src, manifestBlob, manifestMIMEType) + parsedManifest, err := manifestInstanceFromBlob(ctx, unparsed.src, manifestBlob, manifestMIMEType) if err != nil { return nil, err } @@ -85,6 +101,6 @@ func (i *sourcedImage) Inspect() (*types.ImageInspectInfo, error) { return inspectManifest(i.genericManifest) } -func (i *sourcedImage) IsMultiImage() bool { - return i.manifestMIMEType == manifest.DockerV2ListMediaType +func (i *sourcedImage) LayerInfosForCopy() []types.BlobInfo { + return i.UnparsedImage.LayerInfosForCopy() } diff --git a/vendor/github.com/containers/image/image/unparsed.go b/vendor/github.com/containers/image/image/unparsed.go index 483cfd04..aff06d8a 100644 --- a/vendor/github.com/containers/image/image/unparsed.go +++ b/vendor/github.com/containers/image/image/unparsed.go @@ -11,8 +11,10 @@ import ( ) // UnparsedImage implements types.UnparsedImage . +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. type UnparsedImage struct { src types.ImageSource + instanceDigest *digest.Digest cachedManifest []byte // A private cache for Manifest(); nil if not yet known. // A private cache for Manifest(), may be the empty string if guessing failed. // Valid iff cachedManifest is not nil. @@ -20,49 +22,41 @@ type UnparsedImage struct { cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known. } -// UnparsedFromSource returns a types.UnparsedImage implementation for source. -// The caller must call .Close() on the returned UnparsedImage. +// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). // -// UnparsedFromSource “takes ownership” of the input ImageSource and will call src.Close() -// when the image is closed. (This does not prevent callers from using both the -// UnparsedImage and ImageSource objects simultaneously, but it means that they only need to -// keep a reference to the UnparsedImage.) -func UnparsedFromSource(src types.ImageSource) *UnparsedImage { - return &UnparsedImage{src: src} +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { + return &UnparsedImage{ + src: src, + instanceDigest: instanceDigest, + } } // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. func (i *UnparsedImage) Reference() types.ImageReference { + // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. return i.src.Reference() } -// Close removes resources associated with an initialized UnparsedImage, if any. -func (i *UnparsedImage) Close() error { - return i.src.Close() -} - // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. func (i *UnparsedImage) Manifest() ([]byte, string, error) { if i.cachedManifest == nil { - m, mt, err := i.src.GetManifest() + m, mt, err := i.src.GetManifest(i.instanceDigest) if err != nil { return nil, "", err } // ImageSource.GetManifest does not do digest verification, but we do; // this immediately protects also any user of types.Image. - ref := i.Reference().DockerReference() - if ref != nil { - if canonical, ok := ref.(reference.Canonical); ok { - digest := digest.Digest(canonical.Digest()) - matches, err := manifest.MatchesDigest(m, digest) - if err != nil { - return nil, "", errors.Wrap(err, "Error computing manifest digest") - } - if !matches { - return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) - } + if digest, haveDigest := i.expectedManifestDigest(); haveDigest { + matches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return nil, "", errors.Wrap(err, "Error computing manifest digest") + } + if !matches { + return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) } } @@ -72,10 +66,26 @@ func (i *UnparsedImage) Manifest() ([]byte, string, error) { return i.cachedManifest, i.cachedManifestMIMEType, nil } +// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. +// The bool return value seems redundant with digest != ""; it is used explicitly +// to refuse (unexpected) situations when the digest exists but is "". +func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { + if i.instanceDigest != nil { + return *i.instanceDigest, true + } + ref := i.Reference().DockerReference() + if ref != nil { + if canonical, ok := ref.(reference.Canonical); ok { + return canonical.Digest(), true + } + } + return "", false +} + // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { if i.cachedSignatures == nil { - sigs, err := i.src.GetSignatures(ctx) + sigs, err := i.src.GetSignatures(ctx, i.instanceDigest) if err != nil { return nil, err } @@ -83,3 +93,10 @@ func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { } return i.cachedSignatures, nil } + +// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (i *UnparsedImage) LayerInfosForCopy() []types.BlobInfo { + return i.src.LayerInfosForCopy() +} diff --git a/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go new file mode 100644 index 00000000..a28020ed --- /dev/null +++ b/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go @@ -0,0 +1,19 @@ +package tmpdir + +import ( + "os" + "runtime" +) + +// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files. +// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp +// which on systemd based systems could be the unsuitable tmpfs filesystem. +func TemporaryDirectoryForBigFiles() string { + var temporaryDirectoryForBigFiles string + if runtime.GOOS == "windows" { + temporaryDirectoryForBigFiles = os.TempDir() + } else { + temporaryDirectoryForBigFiles = "/var/tmp" + } + return temporaryDirectoryForBigFiles +} diff --git a/vendor/github.com/containers/image/manifest/docker_schema1.go b/vendor/github.com/containers/image/manifest/docker_schema1.go new file mode 100644 index 00000000..b1c1cfe9 --- /dev/null +++ b/vendor/github.com/containers/image/manifest/docker_schema1.go @@ -0,0 +1,310 @@ +package manifest + +import ( + "encoding/json" + "regexp" + "strings" + "time" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/types" + "github.com/docker/docker/api/types/versions" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. +type Schema1FSLayers struct { + BlobSum digest.Digest `json:"blobSum"` +} + +// Schema1History is an entry of the "history" array in docker/distribution schema 1. +type Schema1History struct { + V1Compatibility string `json:"v1Compatibility"` +} + +// Schema1 is a manifest in docker/distribution schema 1. +type Schema1 struct { + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []Schema1FSLayers `json:"fsLayers"` + History []Schema1History `json:"history"` + SchemaVersion int `json:"schemaVersion"` +} + +// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. +type Schema1V1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` +} + +// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. +// (NOTE: The instance is not necessary a literal representation of the original blob, +// layers with duplicate IDs are eliminated.) +func Schema1FromManifest(manifest []byte) (*Schema1, error) { + s1 := Schema1{} + if err := json.Unmarshal(manifest, &s1); err != nil { + return nil, err + } + if s1.SchemaVersion != 1 { + return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion) + } + if len(s1.FSLayers) != len(s1.History) { + return nil, errors.New("length of history not equal to number of layers") + } + if len(s1.FSLayers) == 0 { + return nil, errors.New("no FSLayers in manifest") + } + if err := s1.fixManifestLayers(); err != nil { + return nil, err + } + return &s1, nil +} + +// Schema1FromComponents creates an Schema1 manifest instance from the supplied data. +func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) *Schema1 { + var name, tag string + if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. + name = reference.Path(ref) + if tagged, ok := ref.(reference.NamedTagged); ok { + tag = tagged.Tag() + } + } + return &Schema1{ + Name: name, + Tag: tag, + Architecture: architecture, + FSLayers: fsLayers, + History: history, + SchemaVersion: 1, + } +} + +// Schema1Clone creates a copy of the supplied Schema1 manifest. +func Schema1Clone(src *Schema1) *Schema1 { + copy := *src + return © +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema1) ConfigInfo() types.BlobInfo { + return types.BlobInfo{} +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema1) LayerInfos() []types.BlobInfo { + layers := make([]types.BlobInfo, len(m.FSLayers)) + for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) + layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1} + } + return layers +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well. + if len(m.FSLayers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) + } + for i, info := range layerInfos { + // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest, + // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. + // So, we don't bother recomputing the IDs in m.History.V1Compatibility. + m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema1) Serialize() ([]byte, error) { + // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. + unsigned, err := json.Marshal(*m) + if err != nil { + return nil, err + } + return AddDummyV2S1Signature(unsigned) +} + +// fixManifestLayers, after validating the supplied manifest +// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History), +// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates, +// both from m.History and m.FSLayers). +// Note that even after this succeeds, m.FSLayers may contain duplicate entries +// (for Dockerfile operations which change the configuration but not the filesystem). +func (m *Schema1) fixManifestLayers() error { + type imageV1 struct { + ID string + Parent string + } + // Per the specification, we can assume that len(m.FSLayers) == len(m.History) + imgs := make([]*imageV1, len(m.FSLayers)) + for i := range m.FSLayers { + img := &imageV1{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := validateV1ID(img.ID); err != nil { + return err + } + } + if imgs[len(imgs)-1].Parent != "" { + return errors.New("Invalid parent ID in the base layer of the image") + } + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) + } + } + return nil +} + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +func validateV1ID(id string) error { + if ok := validHex.MatchString(id); !ok { + return errors.Errorf("image ID %q is invalid", id) + } + return nil +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + s1 := &Schema2V1Image{} + if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { + return nil, err + } + return &types.ImageInspectInfo{ + Tag: m.Tag, + Created: s1.Created, + DockerVersion: s1.DockerVersion, + Labels: make(map[string]string), + Architecture: s1.Architecture, + Os: s1.OS, + Layers: LayerInfosToStrings(m.LayerInfos()), + }, nil +} + +// ToSchema2 builds a schema2-style configuration blob using the supplied diffIDs. +func (m *Schema1) ToSchema2(diffIDs []digest.Digest) ([]byte, error) { + // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields + // that aren't directly comparable using info from the manifest. + if len(m.History) == 0 { + return nil, errors.New("image has no layers") + } + s2 := struct { + Schema2Image + ID string `json:"id,omitempty"` + Parent string `json:"parent,omitempty"` + ParentID string `json:"parent_id,omitempty"` + LayerID string `json:"layer_id,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` + Size int64 `json:",omitempty"` + }{} + config := []byte(m.History[0].V1Compatibility) + err := json.Unmarshal(config, &s2) + if err != nil { + return nil, errors.Wrapf(err, "error decoding configuration") + } + // Images created with versions prior to 1.8.3 require us to re-encode the encoded object, + // adding some fields that aren't "omitempty". + if s2.DockerVersion != "" && versions.LessThan(s2.DockerVersion, "1.8.3") { + config, err = json.Marshal(&s2) + if err != nil { + return nil, errors.Wrapf(err, "error re-encoding compat image config %#v", s2) + } + } + // Build the history. + convertedHistory := []Schema2History{} + for _, h := range m.History { + compat := Schema1V1Compatibility{} + if err := json.Unmarshal([]byte(h.V1Compatibility), &compat); err != nil { + return nil, errors.Wrapf(err, "error decoding history information") + } + hitem := Schema2History{ + Created: compat.Created, + CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), + Author: compat.Author, + Comment: compat.Comment, + EmptyLayer: compat.ThrowAway, + } + convertedHistory = append([]Schema2History{hitem}, convertedHistory...) + } + // Build the rootfs information. We need the decompressed sums that we've been + // calculating to fill in the DiffIDs. It's expected (but not enforced by us) + // that the number of diffIDs corresponds to the number of non-EmptyLayer + // entries in the history. + rootFS := &Schema2RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + // And now for some raw manipulation. + raw := make(map[string]*json.RawMessage) + err = json.Unmarshal(config, &raw) + if err != nil { + return nil, errors.Wrapf(err, "error re-decoding compat image config %#v: %v", s2) + } + // Drop some fields. + delete(raw, "id") + delete(raw, "parent") + delete(raw, "parent_id") + delete(raw, "layer_id") + delete(raw, "throwaway") + delete(raw, "Size") + // Add the history and rootfs information. + rootfs, err := json.Marshal(rootFS) + if err != nil { + return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err) + } + rawRootfs := json.RawMessage(rootfs) + raw["rootfs"] = &rawRootfs + history, err := json.Marshal(convertedHistory) + if err != nil { + return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err) + } + rawHistory := json.RawMessage(history) + raw["history"] = &rawHistory + // Encode the result. + config, err = json.Marshal(raw) + if err != nil { + return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s2, err) + } + return config, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) { + image, err := m.ToSchema2(diffIDs) + if err != nil { + return "", err + } + return digest.FromBytes(image).Hex(), nil +} diff --git a/vendor/github.com/containers/image/manifest/docker_schema2.go b/vendor/github.com/containers/image/manifest/docker_schema2.go new file mode 100644 index 00000000..ef82ffc2 --- /dev/null +++ b/vendor/github.com/containers/image/manifest/docker_schema2.go @@ -0,0 +1,251 @@ +package manifest + +import ( + "encoding/json" + "time" + + "github.com/containers/image/pkg/strslice" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. +type Schema2Descriptor struct { + MediaType string `json:"mediaType"` + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` + URLs []string `json:"urls,omitempty"` +} + +// Schema2 is a manifest in docker/distribution schema 2. +type Schema2 struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + ConfigDescriptor Schema2Descriptor `json:"config"` + LayersDescriptors []Schema2Descriptor `json:"layers"` +} + +// Schema2Port is a Port, a string containing port number and protocol in the +// format "80/tcp", from docker/go-connections/nat. +type Schema2Port string + +// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from +// docker/go-connections/nat. +type Schema2PortSet map[Schema2Port]struct{} + +// Schema2HealthConfig is a HealthConfig, which holds configuration settings +// for the HEALTHCHECK feature, from docker/docker/api/types/container. +type Schema2HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Schema2Config is a Config in docker/docker/api/types/container. +type Schema2Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// Schema2V1Image is a V1Image in docker/docker/image. +type Schema2V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig Schema2Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *Schema2Config `json:"config,omitempty"` + // Architecture is the hardware that the image is build and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image. +type Schema2RootFS struct { + Type string `json:"type"` + DiffIDs []digest.Digest `json:"diff_ids,omitempty"` +} + +// Schema2History stores build commands that were used to create an image, from docker/docker/image. +type Schema2History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Schema2Image is an Image in docker/docker/image. +type Schema2Image struct { + Schema2V1Image + Parent digest.Digest `json:"parent,omitempty"` + RootFS *Schema2RootFS `json:"rootfs,omitempty"` + History []Schema2History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + + // rawJSON caches the immutable JSON associated with this image. + rawJSON []byte + + // computedID is the ID computed from the hash of the image config. + // Not to be confused with the legacy V1 ID in V1Image. + computedID digest.Digest +} + +// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. +func Schema2FromManifest(manifest []byte) (*Schema2, error) { + s2 := Schema2{} + if err := json.Unmarshal(manifest, &s2); err != nil { + return nil, err + } + return &s2, nil +} + +// Schema2FromComponents creates an Schema2 manifest instance from the supplied data. +func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 { + return &Schema2{ + SchemaVersion: 2, + MediaType: DockerV2Schema2MediaType, + ConfigDescriptor: config, + LayersDescriptors: layers, + } +} + +// Schema2Clone creates a copy of the supplied Schema2 manifest. +func Schema2Clone(src *Schema2) *Schema2 { + copy := *src + return © +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema2) ConfigInfo() types.BlobInfo { + return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size} +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema2) LayerInfos() []types.BlobInfo { + blobs := []types.BlobInfo{} + for _, layer := range m.LayersDescriptors { + blobs = append(blobs, types.BlobInfo{ + Digest: layer.Digest, + Size: layer.Size, + URLs: layer.URLs, + }) + } + return blobs +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.LayersDescriptors) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) + } + original := m.LayersDescriptors + m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) + for i, info := range layerInfos { + m.LayersDescriptors[i].MediaType = original[i].MediaType + m.LayersDescriptors[i].Digest = info.Digest + m.LayersDescriptors[i].Size = info.Size + m.LayersDescriptors[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema2) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + s2 := &Schema2Image{} + if err := json.Unmarshal(config, s2); err != nil { + return nil, err + } + i := &types.ImageInspectInfo{ + Tag: "", + Created: s2.Created, + DockerVersion: s2.DockerVersion, + Architecture: s2.Architecture, + Os: s2.OS, + Layers: LayerInfosToStrings(m.LayerInfos()), + } + if s2.Config != nil { + i.Labels = s2.Config.Labels + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema2) ImageID([]digest.Digest) (string, error) { + if err := m.ConfigDescriptor.Digest.Validate(); err != nil { + return "", err + } + return m.ConfigDescriptor.Digest.Hex(), nil +} diff --git a/vendor/github.com/containers/image/manifest/manifest.go b/vendor/github.com/containers/image/manifest/manifest.go index e329ee57..2bc801d8 100644 --- a/vendor/github.com/containers/image/manifest/manifest.go +++ b/vendor/github.com/containers/image/manifest/manifest.go @@ -2,7 +2,9 @@ package manifest import ( "encoding/json" + "fmt" + "github.com/containers/image/types" "github.com/docker/libtrust" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -35,7 +37,40 @@ var DefaultRequestedManifestMIMETypes = []string{ DockerV2Schema2MediaType, DockerV2Schema1SignedMediaType, DockerV2Schema1MediaType, - // DockerV2ListMediaType, // FIXME: Restore this ASAP + DockerV2ListMediaType, +} + +// Manifest is an interface for parsing, modifying image manifests in isolation. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members +// directly. +// +// See types.Image for functionality not limited to manifests, including format conversions and config parsing. +// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image. +type Manifest interface { + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + ConfigInfo() types.BlobInfo + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []types.BlobInfo + // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) + UpdateLayerInfos(layerInfos []types.BlobInfo) error + + // ImageID computes an ID which can uniquely identify this image by its contents, irrespective + // of which (of possibly more than one simultaneously valid) reference was used to locate the + // image, and unchanged by whether or how the layers are compressed. The result takes the form + // of the hexadecimal portion of a digest.Digest. + ImageID(diffIDs []digest.Digest) (string, error) + + // Inspect returns various information for (skopeo inspect) parsed from the manifest, + // incorporating information from a configuration blob returned by configGetter, if + // the underlying image format is expected to include a configuration blob. + Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) + + // Serialize returns the manifest in a blob format. + // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! + Serialize() ([]byte, error) } // GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. @@ -142,3 +177,62 @@ func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { } return js.PrettySignature("signatures") } + +// MIMETypeIsMultiImage returns true if mimeType is a list of images +func MIMETypeIsMultiImage(mimeType string) bool { + return mimeType == DockerV2ListMediaType +} + +// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, +// centralizing various workarounds. +func NormalizedMIMEType(input string) string { + switch input { + // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . + // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might + // need to happen within the ImageSource. + case "application/json": + return DockerV2Schema1SignedMediaType + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, + imgspecv1.MediaTypeImageManifest, + DockerV2Schema2MediaType, + DockerV2ListMediaType: + return input + default: + // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time + // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 + // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 + // + // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. + // This makes no real sense, but it happens + // because requests for manifests are + // redirected to a content distribution + // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 + return DockerV2Schema1SignedMediaType + } +} + +// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type +func FromBlob(manblob []byte, mt string) (Manifest, error) { + switch NormalizedMIMEType(mt) { + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType: + return Schema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return OCI1FromManifest(manblob) + case DockerV2Schema2MediaType: + return Schema2FromManifest(manblob) + case DockerV2ListMediaType: + return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") + default: // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) + } +} + +// LayerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() +// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. +func LayerInfosToStrings(infos []types.BlobInfo) []string { + layers := make([]string, len(infos)) + for i, info := range infos { + layers[i] = info.Digest.String() + } + return layers +} diff --git a/vendor/github.com/containers/image/manifest/oci.go b/vendor/github.com/containers/image/manifest/oci.go new file mode 100644 index 00000000..0ffb35b7 --- /dev/null +++ b/vendor/github.com/containers/image/manifest/oci.go @@ -0,0 +1,120 @@ +package manifest + +import ( + "encoding/json" + "time" + + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// OCI1 is a manifest.Manifest implementation for OCI images. +// The underlying data from imgspecv1.Manifest is also available. +type OCI1 struct { + imgspecv1.Manifest +} + +// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. +func OCI1FromManifest(manifest []byte) (*OCI1, error) { + oci1 := OCI1{} + if err := json.Unmarshal(manifest, &oci1); err != nil { + return nil, err + } + return &oci1, nil +} + +// OCI1FromComponents creates an OCI1 manifest instance from the supplied data. +func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 { + return &OCI1{ + imgspecv1.Manifest{ + Versioned: specs.Versioned{SchemaVersion: 2}, + Config: config, + Layers: layers, + }, + } +} + +// OCI1Clone creates a copy of the supplied OCI1 manifest. +func OCI1Clone(src *OCI1) *OCI1 { + return &OCI1{ + Manifest: src.Manifest, + } +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *OCI1) ConfigInfo() types.BlobInfo { + return types.BlobInfo{Digest: m.Config.Digest, Size: m.Config.Size, Annotations: m.Config.Annotations} +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *OCI1) LayerInfos() []types.BlobInfo { + blobs := []types.BlobInfo{} + for _, layer := range m.Layers { + blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType}) + } + return blobs +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.Layers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) + } + original := m.Layers + m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) + for i, info := range layerInfos { + m.Layers[i].MediaType = original[i].MediaType + m.Layers[i].Digest = info.Digest + m.Layers[i].Size = info.Size + m.Layers[i].Annotations = info.Annotations + m.Layers[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *OCI1) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + v1 := &imgspecv1.Image{} + if err := json.Unmarshal(config, v1); err != nil { + return nil, err + } + d1 := &Schema2V1Image{} + json.Unmarshal(config, d1) + created := time.Time{} + if v1.Created != nil { + created = *v1.Created + } + i := &types.ImageInspectInfo{ + Tag: "", + Created: created, + DockerVersion: d1.DockerVersion, + Labels: v1.Config.Labels, + Architecture: v1.Architecture, + Os: v1.OS, + Layers: LayerInfosToStrings(m.LayerInfos()), + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *OCI1) ImageID([]digest.Digest) (string, error) { + if err := m.Config.Digest.Validate(); err != nil { + return "", err + } + return m.Config.Digest.Hex(), nil +} diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/oci/archive/oci_src.go index 8644202f..aee5d8d5 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_src.go +++ b/vendor/github.com/containers/image/oci/archive/oci_src.go @@ -68,14 +68,12 @@ func (s *ociArchiveImageSource) Close() error { return s.unpackedSrc.Close() } -// GetManifest returns the image's manifest along with its MIME type -// (which may be empty when it can't be determined but the manifest is available). -func (s *ociArchiveImageSource) GetManifest() ([]byte, string, error) { - return s.unpackedSrc.GetManifest() -} - -func (s *ociArchiveImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - return s.unpackedSrc.GetTargetManifest(digest) +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociArchiveImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + return s.unpackedSrc.GetManifest(instanceDigest) } // GetBlob returns a stream for the specified blob, and the blob's size. @@ -83,6 +81,15 @@ func (s *ociArchiveImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int return s.unpackedSrc.GetBlob(info) } -func (s *ociArchiveImageSource) GetSignatures(c context.Context) ([][]byte, error) { - return s.unpackedSrc.GetSignatures(c) +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return s.unpackedSrc.GetSignatures(ctx, instanceDigest) +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *ociArchiveImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil } diff --git a/vendor/github.com/containers/image/oci/archive/oci_transport.go b/vendor/github.com/containers/image/oci/archive/oci_transport.go index 31b19198..c4a4fa71 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_transport.go +++ b/vendor/github.com/containers/image/oci/archive/oci_transport.go @@ -4,13 +4,13 @@ import ( "fmt" "io/ioutil" "os" - "path/filepath" - "regexp" "strings" "github.com/containers/image/directory/explicitfilepath" "github.com/containers/image/docker/reference" "github.com/containers/image/image" + "github.com/containers/image/internal/tmpdir" + "github.com/containers/image/oci/internal" ocilayout "github.com/containers/image/oci/layout" "github.com/containers/image/transports" "github.com/containers/image/types" @@ -48,51 +48,12 @@ func (t ociArchiveTransport) ParseReference(reference string) (types.ImageRefere // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) error { - var file string - sep := strings.SplitN(scope, ":", 2) - file = sep[0] - - if len(sep) == 2 { - image := sep[1] - if !refRegexp.MatchString(image) { - return errors.Errorf("Invalid image %s", image) - } - } - - if !strings.HasPrefix(file, "/") { - return errors.Errorf("Invalid scope %s: must be an absolute path", scope) - } - // Refuse also "/", otherwise "/" and "" would have the same semantics, - // and "" could be unexpectedly shadowed by the "/" entry. - // (Note: we do allow "/:someimage", a bit ridiculous but why refuse it?) - if scope == "/" { - return errors.New(`Invalid scope "/": Use the generic default scope ""`) - } - cleaned := filepath.Clean(file) - if cleaned != file { - return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) - } - return nil + return internal.ValidateScope(scope) } -// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys -const ( - separator = `(?:[-._:@+]|--)` - alphanum = `(?:[A-Za-z0-9]+)` - component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` -) - -var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) - // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. func ParseReference(reference string) (types.ImageReference, error) { - var file, image string - sep := strings.SplitN(reference, ":", 2) - file = sep[0] - - if len(sep) == 2 { - image = sep[1] - } + file, image := internal.SplitPathAndImage(reference) return NewReference(file, image) } @@ -102,14 +63,15 @@ func NewReference(file, image string) (types.ImageReference, error) { if err != nil { return nil, err } - // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces - // from being ambiguous with values of PolicyConfigurationIdentity. - if strings.Contains(resolved, ":") { - return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", file, image, resolved) + + if err := internal.ValidateOCIPath(file); err != nil { + return nil, err } - if len(image) > 0 && !refRegexp.MatchString(image) { - return nil, errors.Errorf("Invalid image %s", image) + + if err := internal.ValidateImageName(image); err != nil { + return nil, err } + return ociArchiveReference{file: file, resolvedFile: resolved, image: image}, nil } @@ -154,14 +116,17 @@ func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string { return res } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. -func (ref ociArchiveReference) NewImage(ctx *types.SystemContext) (types.Image, error) { +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociArchiveReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { src, err := newImageSource(ctx, ref) if err != nil { return nil, err } - return image.FromSource(src) + return image.FromSource(ctx, src) } // NewImageSource returns a types.ImageSource for this reference. @@ -194,7 +159,7 @@ func (t *tempDirOCIRef) deleteTempDir() error { // createOCIRef creates the oci reference of the image func createOCIRef(image string) (tempDirOCIRef, error) { - dir, err := ioutil.TempDir("/var/tmp", "oci") + dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "oci") if err != nil { return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory") } diff --git a/vendor/github.com/containers/image/oci/internal/oci_util.go b/vendor/github.com/containers/image/oci/internal/oci_util.go new file mode 100644 index 00000000..c2012e50 --- /dev/null +++ b/vendor/github.com/containers/image/oci/internal/oci_util.go @@ -0,0 +1,126 @@ +package internal + +import ( + "github.com/pkg/errors" + "path/filepath" + "regexp" + "runtime" + "strings" +) + +// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys +const ( + separator = `(?:[-._:@+]|--)` + alphanum = `(?:[A-Za-z0-9]+)` + component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` +) + +var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) +var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`) + +// ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs. +// In any other case an error is returned. +func ValidateImageName(image string) error { + if len(image) == 0 { + return nil + } + + var err error + if !refRegexp.MatchString(image) { + err = errors.Errorf("Invalid image %s", image) + } + return err +} + +// SplitPathAndImage tries to split the provided OCI reference into the OCI path and image. +// Neither path nor image parts are validated at this stage. +func SplitPathAndImage(reference string) (string, string) { + if runtime.GOOS == "windows" { + return splitPathAndImageWindows(reference) + } + return splitPathAndImageNonWindows(reference) +} + +func splitPathAndImageWindows(reference string) (string, string) { + groups := windowsRefRegexp.FindStringSubmatch(reference) + // nil group means no match + if groups == nil { + return reference, "" + } + + // we expect three elements. First one full match, second the capture group for the path and + // the third the capture group for the image + if len(groups) != 3 { + return reference, "" + } + return groups[1], groups[2] +} + +func splitPathAndImageNonWindows(reference string) (string, string) { + sep := strings.SplitN(reference, ":", 2) + path := sep[0] + + var image string + if len(sep) == 2 { + image = sep[1] + } + return path, image +} + +// ValidateOCIPath takes the OCI path and validates it. +func ValidateOCIPath(path string) error { + if runtime.GOOS == "windows" { + // On Windows we must allow for a ':' as part of the path + if strings.Count(path, ":") > 1 { + return errors.Errorf("Invalid OCI reference: path %s contains more than one colon", path) + } + } else { + if strings.Contains(path, ":") { + return errors.Errorf("Invalid OCI reference: path %s contains a colon", path) + } + } + return nil +} + +// ValidateScope validates a policy configuration scope for an OCI transport. +func ValidateScope(scope string) error { + var err error + if runtime.GOOS == "windows" { + err = validateScopeWindows(scope) + } else { + err = validateScopeNonWindows(scope) + } + if err != nil { + return err + } + + cleaned := filepath.Clean(scope) + if cleaned != scope { + return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } + + return nil +} + +func validateScopeWindows(scope string) error { + matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope)) + if !matched { + return errors.Errorf("Invalid scope '%s'. Must be an absolute path", scope) + } + + return nil +} + +func validateScopeNonWindows(scope string) error { + if !strings.HasPrefix(scope, "/") { + return errors.Errorf("Invalid scope %s: must be an absolute path", scope) + } + + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + + return nil +} diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go index ce1e0c3e..e95f6516 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/oci/layout/oci_dest.go @@ -18,21 +18,47 @@ import ( ) type ociImageDestination struct { - ref ociReference - index imgspecv1.Index + ref ociReference + index imgspecv1.Index + sharedBlobDir string } // newImageDestination returns an ImageDestination for writing to an existing directory. -func newImageDestination(ref ociReference) (types.ImageDestination, error) { +func newImageDestination(ctx *types.SystemContext, ref ociReference) (types.ImageDestination, error) { if ref.image == "" { return nil, errors.Errorf("cannot save image with empty image.ref.name") } - index := imgspecv1.Index{ - Versioned: imgspec.Versioned{ - SchemaVersion: 2, - }, + + var index *imgspecv1.Index + if indexExists(ref) { + var err error + index, err = ref.getIndex() + if err != nil { + return nil, err + } + } else { + index = &imgspecv1.Index{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + } } - return &ociImageDestination{ref: ref, index: index}, nil + + d := &ociImageDestination{ref: ref, index: *index} + if ctx != nil { + d.sharedBlobDir = ctx.OCISharedBlobDirPath + } + + if err := ensureDirectoryExists(d.ref.dir); err != nil { + return nil, err + } + // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, + // but it MAY be empty (e.g. if we never end up calling PutBlob) + // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 + if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil { + return nil, err + } + return d, nil } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, @@ -81,16 +107,16 @@ func (d *ociImageDestination) MustMatchRuntimeOS() bool { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - if err := ensureDirectoryExists(d.ref.dir); err != nil { - return types.BlobInfo{}, err - } blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") if err != nil { return types.BlobInfo{}, err } succeeded := false + explicitClosed := false defer func() { - blobFile.Close() + if !explicitClosed { + blobFile.Close() + } if !succeeded { os.Remove(blobFile.Name()) } @@ -110,17 +136,28 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo if err := blobFile.Sync(); err != nil { return types.BlobInfo{}, err } - if err := blobFile.Chmod(0644); err != nil { - return types.BlobInfo{}, err + + // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. + // On Windows, the “permissions of newly created files” argument to syscall.Open is + // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, + // always fails on Windows. + if runtime.GOOS != "windows" { + if err := blobFile.Chmod(0644); err != nil { + return types.BlobInfo{}, err + } } - blobPath, err := d.ref.blobPath(computedDigest) + blobPath, err := d.ref.blobPath(computedDigest, d.sharedBlobDir) if err != nil { return types.BlobInfo{}, err } if err := ensureParentDirectoryExists(blobPath); err != nil { return types.BlobInfo{}, err } + + // need to explicitly close the file, since a rename won't otherwise not work on Windows + blobFile.Close() + explicitClosed = true if err := os.Rename(blobFile.Name(), blobPath); err != nil { return types.BlobInfo{}, err } @@ -136,7 +173,7 @@ func (d *ociImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) if info.Digest == "" { return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`) } - blobPath, err := d.ref.blobPath(info.Digest) + blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) if err != nil { return false, -1, err } @@ -169,7 +206,7 @@ func (d *ociImageDestination) PutManifest(m []byte) error { desc.MediaType = imgspecv1.MediaTypeImageManifest desc.Size = int64(len(m)) - blobPath, err := d.ref.blobPath(digest) + blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir) if err != nil { return err } @@ -191,23 +228,20 @@ func (d *ociImageDestination) PutManifest(m []byte) error { Architecture: runtime.GOARCH, OS: runtime.GOOS, } - d.index.Manifests = append(d.index.Manifests, desc) + d.addManifest(&desc) return nil } -func ensureDirectoryExists(path string) error { - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - if err := os.MkdirAll(path, 0755); err != nil { - return err +func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { + for i, manifest := range d.index.Manifests { + if manifest.Annotations["org.opencontainers.image.ref.name"] == desc.Annotations["org.opencontainers.image.ref.name"] { + // TODO Should there first be a cleanup based on the descriptor we are going to replace? + d.index.Manifests[i] = *desc + return } } - return nil -} - -// ensureParentDirectoryExists ensures the parent of the supplied path exists. -func ensureParentDirectoryExists(path string) error { - return ensureDirectoryExists(filepath.Dir(path)) + d.index.Manifests = append(d.index.Manifests, *desc) } func (d *ociImageDestination) PutSignatures(signatures [][]byte) error { @@ -231,3 +265,30 @@ func (d *ociImageDestination) Commit() error { } return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644) } + +func ensureDirectoryExists(path string) error { + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } + return nil +} + +// ensureParentDirectoryExists ensures the parent of the supplied path exists. +func ensureParentDirectoryExists(path string) error { + return ensureDirectoryExists(filepath.Dir(path)) +} + +// indexExists checks whether the index location specified in the OCI reference exists. +// The implementation is opinionated, since in case of unexpected errors false is returned +func indexExists(ref ociReference) bool { + _, err := os.Stat(ref.indexPath()) + if err == nil { + return true + } + if os.IsNotExist(err) { + return false + } + return true +} diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go index be8a2aa7..1109f65c 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/oci/layout/oci_src.go @@ -17,9 +17,10 @@ import ( ) type ociImageSource struct { - ref ociReference - descriptor imgspecv1.Descriptor - client *http.Client + ref ociReference + descriptor imgspecv1.Descriptor + client *http.Client + sharedBlobDir string } // newImageSource returns an ImageSource for reading from an existing directory. @@ -40,7 +41,12 @@ func newImageSource(ctx *types.SystemContext, ref ociReference) (types.ImageSour if err != nil { return nil, err } - return &ociImageSource{ref: ref, descriptor: descriptor, client: client}, nil + d := &ociImageSource{ref: ref, descriptor: descriptor, client: client} + if ctx != nil { + // TODO(jonboulle): check dir existence? + d.sharedBlobDir = ctx.OCISharedBlobDirPath + } + return d, nil } // Reference returns the reference used to set up this source. @@ -55,8 +61,26 @@ func (s *ociImageSource) Close() error { // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. -func (s *ociImageSource) GetManifest() ([]byte, string, error) { - manifestPath, err := s.ref.blobPath(digest.Digest(s.descriptor.Digest)) +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + var dig digest.Digest + var mimeType string + if instanceDigest == nil { + dig = digest.Digest(s.descriptor.Digest) + mimeType = s.descriptor.MediaType + } else { + dig = *instanceDigest + // XXX: instanceDigest means that we don't immediately have the context of what + // mediaType the manifest has. In OCI this means that we don't know + // what reference it came from, so we just *assume* that its + // MediaTypeImageManifest. + // FIXME: We should actually be able to look up the manifest in the index, + // and see the MIME type there. + mimeType = imgspecv1.MediaTypeImageManifest + } + + manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir) if err != nil { return nil, "", err } @@ -65,25 +89,7 @@ func (s *ociImageSource) GetManifest() ([]byte, string, error) { return nil, "", err } - return m, s.descriptor.MediaType, nil -} - -func (s *ociImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - manifestPath, err := s.ref.blobPath(digest) - if err != nil { - return nil, "", err - } - - m, err := ioutil.ReadFile(manifestPath) - if err != nil { - return nil, "", err - } - - // XXX: GetTargetManifest means that we don't have the context of what - // mediaType the manifest has. In OCI this means that we don't know - // what reference it came from, so we just *assume* that its - // MediaTypeImageManifest. - return m, imgspecv1.MediaTypeImageManifest, nil + return m, mimeType, nil } // GetBlob returns a stream for the specified blob, and the blob's size. @@ -92,7 +98,7 @@ func (s *ociImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, err return s.getExternalBlob(info.URLs) } - path, err := s.ref.blobPath(info.Digest) + path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir) if err != nil { return nil, 0, err } @@ -108,7 +114,11 @@ func (s *ociImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, err return r, fi.Size(), nil } -func (s *ociImageSource) GetSignatures(context.Context) ([][]byte, error) { +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { return [][]byte{}, nil } @@ -133,6 +143,11 @@ func (s *ociImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64, e return nil, 0, errWrap } +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *ociImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} + func getBlobSize(resp *http.Response) int64 { size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) if err != nil { diff --git a/vendor/github.com/containers/image/oci/layout/oci_transport.go b/vendor/github.com/containers/image/oci/layout/oci_transport.go index 312bc0e4..c181c4c7 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_transport.go +++ b/vendor/github.com/containers/image/oci/layout/oci_transport.go @@ -5,12 +5,12 @@ import ( "fmt" "os" "path/filepath" - "regexp" "strings" "github.com/containers/image/directory/explicitfilepath" "github.com/containers/image/docker/reference" "github.com/containers/image/image" + "github.com/containers/image/oci/internal" "github.com/containers/image/transports" "github.com/containers/image/types" "github.com/opencontainers/go-digest" @@ -36,45 +36,12 @@ func (t ociTransport) ParseReference(reference string) (types.ImageReference, er return ParseReference(reference) } -// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys -const ( - separator = `(?:[-._:@+]|--)` - alphanum = `(?:[A-Za-z0-9]+)` - component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` -) - -var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) - // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. // scope passed to this function will not be "", that value is always allowed. func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error { - var dir string - sep := strings.SplitN(scope, ":", 2) - dir = sep[0] - - if len(sep) == 2 { - image := sep[1] - if !refRegexp.MatchString(image) { - return errors.Errorf("Invalid image %s", image) - } - } - - if !strings.HasPrefix(dir, "/") { - return errors.Errorf("Invalid scope %s: must be an absolute path", scope) - } - // Refuse also "/", otherwise "/" and "" would have the same semantics, - // and "" could be unexpectedly shadowed by the "/" entry. - // (Note: we do allow "/:someimage", a bit ridiculous but why refuse it?) - if scope == "/" { - return errors.New(`Invalid scope "/": Use the generic default scope ""`) - } - cleaned := filepath.Clean(dir) - if cleaned != dir { - return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) - } - return nil + return internal.ValidateScope(scope) } // ociReference is an ImageReference for OCI directory paths. @@ -92,13 +59,7 @@ type ociReference struct { // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. func ParseReference(reference string) (types.ImageReference, error) { - var dir, image string - sep := strings.SplitN(reference, ":", 2) - dir = sep[0] - - if len(sep) == 2 { - image = sep[1] - } + dir, image := internal.SplitPathAndImage(reference) return NewReference(dir, image) } @@ -111,14 +72,15 @@ func NewReference(dir, image string) (types.ImageReference, error) { if err != nil { return nil, err } - // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces - // from being ambiguous with values of PolicyConfigurationIdentity. - if strings.Contains(resolved, ":") { - return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", dir, image, resolved) + + if err := internal.ValidateOCIPath(dir); err != nil { + return nil, err } - if len(image) > 0 && !refRegexp.MatchString(image) { - return nil, errors.Errorf("Invalid image %s", image) + + if err = internal.ValidateImageName(image); err != nil { + return nil, err } + return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil } @@ -177,28 +139,40 @@ func (ref ociReference) PolicyConfigurationNamespaces() []string { return res } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref ociReference) NewImage(ctx *types.SystemContext) (types.Image, error) { +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { src, err := newImageSource(ctx, ref) if err != nil { return nil, err } - return image.FromSource(src) + return image.FromSource(ctx, src) +} + +// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together +// with an error. +func (ref ociReference) getIndex() (*imgspecv1.Index, error) { + indexJSON, err := os.Open(ref.indexPath()) + if err != nil { + return nil, err + } + defer indexJSON.Close() + + index := &imgspecv1.Index{} + if err := json.NewDecoder(indexJSON).Decode(index); err != nil { + return nil, err + } + return index, nil } func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { - indexJSON, err := os.Open(ref.indexPath()) + index, err := ref.getIndex() if err != nil { return imgspecv1.Descriptor{}, err } - defer indexJSON.Close() - index := imgspecv1.Index{} - if err := json.NewDecoder(indexJSON).Decode(&index); err != nil { - return imgspecv1.Descriptor{}, err - } var d *imgspecv1.Descriptor if ref.image == "" { @@ -250,7 +224,7 @@ func (ref ociReference) NewImageSource(ctx *types.SystemContext) (types.ImageSou // NewImageDestination returns a types.ImageDestination for this reference. // The caller must call .Close() on the returned ImageDestination. func (ref ociReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ref) + return newImageDestination(ctx, ref) } // DeleteImage deletes the named image from the registry, if supported. @@ -269,9 +243,13 @@ func (ref ociReference) indexPath() string { } // blobPath returns a path for a blob within a directory using OCI image-layout conventions. -func (ref ociReference) blobPath(digest digest.Digest) (string, error) { +func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) { if err := digest.Validate(); err != nil { return "", errors.Wrapf(err, "unexpected digest reference %s", digest) } - return filepath.Join(ref.dir, "blobs", digest.Algorithm().String(), digest.Hex()), nil + blobDir := filepath.Join(ref.dir, "blobs") + if sharedBlobDir != "" { + blobDir = sharedBlobDir + } + return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil } diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go index 0117f2e0..54655914 100644 --- a/vendor/github.com/containers/image/openshift/openshift.go +++ b/vendor/github.com/containers/image/openshift/openshift.go @@ -200,20 +200,15 @@ func (s *openshiftImageSource) Close() error { return nil } -func (s *openshiftImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - if err := s.ensureImageIsResolved(context.TODO()); err != nil { - return nil, "", err - } - return s.docker.GetTargetManifest(digest) -} - // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. -func (s *openshiftImageSource) GetManifest() ([]byte, string, error) { +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { if err := s.ensureImageIsResolved(context.TODO()); err != nil { return nil, "", err } - return s.docker.GetManifest() + return s.docker.GetManifest(instanceDigest) } // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). @@ -224,12 +219,21 @@ func (s *openshiftImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int6 return s.docker.GetBlob(info) } -func (s *openshiftImageSource) GetSignatures(ctx context.Context) ([][]byte, error) { - if err := s.ensureImageIsResolved(ctx); err != nil { - return nil, err +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + var imageName string + if instanceDigest == nil { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, err + } + imageName = s.imageStreamImageName + } else { + imageName = instanceDigest.String() } - - image, err := s.client.getImage(ctx, s.imageStreamImageName) + image, err := s.client.getImage(ctx, imageName) if err != nil { return nil, err } @@ -242,6 +246,11 @@ func (s *openshiftImageSource) GetSignatures(ctx context.Context) ([][]byte, err return sigs, nil } +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *openshiftImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} + // ensureImageIsResolved sets up s.docker and s.imageStreamImageName func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error { if s.docker != nil { diff --git a/vendor/github.com/containers/image/openshift/openshift_transport.go b/vendor/github.com/containers/image/openshift/openshift_transport.go index 7db35d96..686d806f 100644 --- a/vendor/github.com/containers/image/openshift/openshift_transport.go +++ b/vendor/github.com/containers/image/openshift/openshift_transport.go @@ -125,16 +125,17 @@ func (ref openshiftReference) PolicyConfigurationNamespaces() []string { return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference) } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref openshiftReference) NewImage(ctx *types.SystemContext) (types.Image, error) { +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref openshiftReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { src, err := newImageSource(ctx, ref) if err != nil { return nil, err } - return genericImage.FromSource(src) + return genericImage.FromSource(ctx, src) } // NewImageSource returns a types.ImageSource for this reference. diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go index 26137431..704e1ece 100644 --- a/vendor/github.com/containers/image/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/ostree/ostree_dest.go @@ -4,6 +4,8 @@ package ostree import ( "bytes" + "compress/gzip" + "encoding/base64" "encoding/json" "fmt" "io" @@ -12,18 +14,27 @@ import ( "os/exec" "path/filepath" "strconv" - "strings" "time" "github.com/containers/image/manifest" "github.com/containers/image/types" "github.com/containers/storage/pkg/archive" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/ostreedev/ostree-go/pkg/otbuiltin" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" ) +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + type blobToImport struct { Size int64 Digest digest.Digest @@ -35,18 +46,24 @@ type descriptor struct { Digest digest.Digest `json:"digest"` } +type fsLayersSchema1 struct { + BlobSum digest.Digest `json:"blobSum"` +} + type manifestSchema struct { - ConfigDescriptor descriptor `json:"config"` - LayersDescriptors []descriptor `json:"layers"` + LayersDescriptors []descriptor `json:"layers"` + FSLayers []fsLayersSchema1 `json:"fsLayers"` } type ostreeImageDestination struct { - ref ostreeReference - manifest string - schema manifestSchema - tmpDirPath string - blobs map[string]*blobToImport - digest digest.Digest + ref ostreeReference + manifest string + schema manifestSchema + tmpDirPath string + blobs map[string]*blobToImport + digest digest.Digest + signaturesLen int + repo *C.struct_OstreeRepo } // newImageDestination returns an ImageDestination for writing to an existing ostree. @@ -55,7 +72,7 @@ func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDes if err := ensureDirectoryExists(tmpDirPath); err != nil { return nil, err } - return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, ""}, nil + return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, "", 0, nil}, nil } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, @@ -66,6 +83,9 @@ func (d *ostreeImageDestination) Reference() types.ImageReference { // Close removes resources associated with an initialized ImageDestination, if any. func (d *ostreeImageDestination) Close() error { + if d.repo != nil { + C.g_object_unref(C.gpointer(d.repo)) + } return os.RemoveAll(d.tmpDirPath) } @@ -174,6 +194,35 @@ func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch strin return err } +func generateTarSplitMetadata(output *bytes.Buffer, file string) error { + mfz := gzip.NewWriter(output) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + stream, err := os.OpenFile(file, os.O_RDONLY, 0) + if err != nil { + return err + } + defer stream.Close() + + gzReader, err := gzip.NewReader(stream) + if err != nil { + return err + } + defer gzReader.Close() + + its, err := asm.NewInputTarStream(gzReader, metaPacker, nil) + if err != nil { + return err + } + + _, err = io.Copy(ioutil.Discard, its) + if err != nil { + return err + } + return nil +} + func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToImport) error { ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root") @@ -185,6 +234,11 @@ func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToIm os.RemoveAll(destinationPath) }() + var tarSplitOutput bytes.Buffer + if err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath); err != nil { + return err + } + if os.Getuid() == 0 { if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil { return err @@ -202,28 +256,35 @@ func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToIm return err } } + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size), + fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))}) + +} + +func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error { + ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) + destinationPath := filepath.Dir(blob.BlobPath) + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) } -func (d *ostreeImageDestination) importConfig(blob *blobToImport) error { - ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) - - return exec.Command("ostree", "commit", - "--repo", d.ref.repo, - fmt.Sprintf("--add-metadata-string=docker.size=%d", blob.Size), - "--branch", ostreeBranch, filepath.Dir(blob.BlobPath)).Run() -} - func (d *ostreeImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { - branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) - output, err := exec.Command("ostree", "show", "--repo", d.ref.repo, "--print-metadata-key=docker.size", branch).CombinedOutput() - if err != nil { - if bytes.Index(output, []byte("not found")) >= 0 || bytes.Index(output, []byte("No such")) >= 0 { - return false, -1, nil + + if d.repo == nil { + repo, err := openRepo(d.ref.repo) + if err != nil { + return false, 0, err } - return false, -1, err + d.repo = repo } - size, err := strconv.ParseInt(strings.Trim(string(output), "'\n"), 10, 64) + branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) + + found, data, err := readMetadata(d.repo, branch, "docker.size") + if err != nil || !found { + return found, -1, err + } + + size, err := strconv.ParseInt(data, 10, 64) if err != nil { return false, -1, err } @@ -272,6 +333,7 @@ func (d *ostreeImageDestination) PutSignatures(signatures [][]byte) error { return err } } + d.signaturesLen = len(signatures) return nil } @@ -286,24 +348,37 @@ func (d *ostreeImageDestination) Commit() error { return err } - for _, layer := range d.schema.LayersDescriptors { - hash := layer.Digest.Hex() + checkLayer := func(hash string) error { blob := d.blobs[hash] // if the blob is not present in d.blobs then it is already stored in OSTree, // and we don't need to import it. if blob == nil { - continue + return nil } err := d.importBlob(repo, blob) if err != nil { return err } + + delete(d.blobs, hash) + return nil + } + for _, layer := range d.schema.LayersDescriptors { + hash := layer.Digest.Hex() + if err = checkLayer(hash); err != nil { + return err + } + } + for _, layer := range d.schema.FSLayers { + hash := layer.BlobSum.Hex() + if err = checkLayer(hash); err != nil { + return err + } } - hash := d.schema.ConfigDescriptor.Digest.Hex() - blob := d.blobs[hash] - if blob != nil { - err := d.importConfig(blob) + // Import the other blobs that are not layers + for _, blob := range d.blobs { + err := d.importConfig(repo, blob) if err != nil { return err } @@ -311,7 +386,9 @@ func (d *ostreeImageDestination) Commit() error { manifestPath := filepath.Join(d.tmpDirPath, "manifest") - metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), fmt.Sprintf("docker.digest=%s", string(d.digest))} + metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), + fmt.Sprintf("signatures=%d", d.signaturesLen), + fmt.Sprintf("docker.digest=%s", string(d.digest))} err = d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata) _, err = repo.CommitTransaction() diff --git a/vendor/github.com/containers/image/ostree/ostree_src.go b/vendor/github.com/containers/image/ostree/ostree_src.go new file mode 100644 index 00000000..c65a07b7 --- /dev/null +++ b/vendor/github.com/containers/image/ostree/ostree_src.go @@ -0,0 +1,354 @@ +// +build !containers_image_ostree_stub + +package ostree + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "strconv" + "strings" + "unsafe" + + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/containers/storage/pkg/ioutils" + "github.com/opencontainers/go-digest" + glib "github.com/ostreedev/ostree-go/pkg/glibobject" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +type ostreeImageSource struct { + ref ostreeReference + tmpDir string + repo *C.struct_OstreeRepo +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(ctx *types.SystemContext, tmpDir string, ref ostreeReference) (types.ImageSource, error) { + return &ostreeImageSource{ref: ref, tmpDir: tmpDir}, nil +} + +// Reference returns the reference used to set up this source. +func (s *ostreeImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ostreeImageSource) Close() error { + if s.repo != nil { + C.g_object_unref(C.gpointer(s.repo)) + } + return nil +} + +func (s *ostreeImageSource) getLayerSize(blob string) (int64, error) { + b := fmt.Sprintf("ociimage/%s", blob) + found, data, err := readMetadata(s.repo, b, "docker.size") + if err != nil || !found { + return 0, err + } + return strconv.ParseInt(data, 10, 64) +} + +func (s *ostreeImageSource) getLenSignatures() (int64, error) { + b := fmt.Sprintf("ociimage/%s", s.ref.branchName) + found, data, err := readMetadata(s.repo, b, "signatures") + if err != nil { + return -1, err + } + if !found { + // if 'signatures' is not present, just return 0 signatures. + return 0, nil + } + return strconv.ParseInt(data, 10, 64) +} + +func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) { + b := fmt.Sprintf("ociimage/%s", blob) + found, out, err := readMetadata(s.repo, b, "tarsplit.output") + if err != nil || !found { + return nil, err + } + return base64.StdEncoding.DecodeString(out) +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +func (s *ostreeImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.Errorf(`Manifest lists are not supported by "ostree:"`) + } + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, "", err + } + s.repo = repo + } + + b := fmt.Sprintf("ociimage/%s", s.ref.branchName) + found, out, err := readMetadata(s.repo, b, "docker.manifest") + if err != nil { + return nil, "", err + } + if !found { + return nil, "", errors.New("manifest not found") + } + m := []byte(out) + return m, manifest.GuessMIMEType(m), nil +} + +func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { + return nil, "", errors.New("manifest lists are not supported by this transport") +} + +func openRepo(path string) (*C.struct_OstreeRepo, error) { + var cerr *C.GError + cpath := C.CString(path) + defer C.free(unsafe.Pointer(cpath)) + pathc := C.g_file_new_for_path(cpath) + defer C.g_object_unref(C.gpointer(pathc)) + repo := C.ostree_repo_new(pathc) + r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) + if !r { + C.g_object_unref(C.gpointer(repo)) + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + return repo, nil +} + +type ostreePathFileGetter struct { + repo *C.struct_OstreeRepo + parentRoot *C.GFile +} + +type ostreeReader struct { + stream *C.GFileInputStream +} + +func (o ostreeReader) Close() error { + C.g_object_unref(C.gpointer(o.stream)) + return nil +} +func (o ostreeReader) Read(p []byte) (int, error) { + var cerr *C.GError + instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type()) + stream := (*C.GInputStream)(unsafe.Pointer(instanceCast)) + + b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr) + if b == nil { + return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + defer C.g_bytes_unref(b) + + count := int(C.g_bytes_get_size(b)) + if count == 0 { + return 0, io.EOF + } + data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count] + copy(p, data) + return count, nil +} + +func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) { + var cerr *C.GError + var ref *C.char + defer C.free(unsafe.Pointer(ref)) + + cCommit := C.CString(commit) + defer C.free(unsafe.Pointer(cCommit)) + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) { + return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + if ref == nil { + return false, "", nil + } + + var variant *C.GVariant + if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) { + return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + defer C.g_variant_unref(variant) + if variant != nil { + cKey := C.CString(key) + defer C.free(unsafe.Pointer(cKey)) + + metadata := C.g_variant_get_child_value(variant, 0) + defer C.g_variant_unref(metadata) + + data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil) + if data != nil { + defer C.g_variant_unref(data) + ptr := (*C.char)(C.g_variant_get_string(data, nil)) + val := C.GoString(ptr) + return true, val, nil + } + } + return false, "", nil +} + +func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) { + var cerr *C.GError + var parentRoot *C.GFile + cCommit := C.CString(commit) + defer C.free(unsafe.Pointer(cCommit)) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) { + return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + C.g_object_ref(C.gpointer(repo)) + + return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil +} + +func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) { + var file *C.GFile + if strings.HasPrefix(filename, "./") { + filename = filename[2:] + } + cfilename := C.CString(filename) + defer C.free(unsafe.Pointer(cfilename)) + + file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename)) + + var cerr *C.GError + stream := C.g_file_read(file, nil, &cerr) + if stream == nil { + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + return &ostreeReader{stream: stream}, nil +} + +func (o ostreePathFileGetter) Close() { + C.g_object_unref(C.gpointer(o.repo)) + C.g_object_unref(C.gpointer(o.parentRoot)) +} + +func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) { + getter, err := newOSTreePathFileGetter(s.repo, commit) + if err != nil { + return nil, err + } + defer getter.Close() + + return getter.Get(path) +} + +// GetBlob returns a stream for the specified blob, and the blob's size. +func (s *ostreeImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { + blob := info.Digest.Hex() + branch := fmt.Sprintf("ociimage/%s", blob) + + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, 0, err + } + s.repo = repo + } + + layerSize, err := s.getLayerSize(blob) + if err != nil { + return nil, 0, err + } + + tarsplit, err := s.getTarSplitData(blob) + if err != nil { + return nil, 0, err + } + + // if tarsplit is nil we are looking at the manifest. Return directly the file in /content + if tarsplit == nil { + file, err := s.readSingleFile(branch, "/content") + if err != nil { + return nil, 0, err + } + return file, layerSize, nil + } + + mf := bytes.NewReader(tarsplit) + mfz, err := gzip.NewReader(mf) + if err != nil { + return nil, 0, err + } + defer mfz.Close() + metaUnpacker := storage.NewJSONUnpacker(mfz) + + getter, err := newOSTreePathFileGetter(s.repo, branch) + if err != nil { + return nil, 0, err + } + + ots := asm.NewOutputTarStream(getter, metaUnpacker) + + pipeReader, pipeWriter := io.Pipe() + go func() { + io.Copy(pipeWriter, ots) + pipeWriter.Close() + }() + + rc := ioutils.NewReadCloserWrapper(pipeReader, func() error { + getter.Close() + return ots.Close() + }) + return rc, layerSize, nil +} + +func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.New("manifest lists are not supported by this transport") + } + lenSignatures, err := s.getLenSignatures() + if err != nil { + return nil, err + } + branch := fmt.Sprintf("ociimage/%s", s.ref.branchName) + + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, err + } + s.repo = repo + } + + signatures := [][]byte{} + for i := int64(1); i <= lenSignatures; i++ { + sigReader, err := s.readSingleFile(branch, fmt.Sprintf("/signature-%d", i)) + if err != nil { + return nil, err + } + defer sigReader.Close() + + sig, err := ioutil.ReadAll(sigReader) + if err != nil { + return nil, err + } + signatures = append(signatures, sig) + } + return signatures, nil +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *ostreeImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/ostree/ostree_transport.go b/vendor/github.com/containers/image/ostree/ostree_transport.go index 0de74a71..cc85a43f 100644 --- a/vendor/github.com/containers/image/ostree/ostree_transport.go +++ b/vendor/github.com/containers/image/ostree/ostree_transport.go @@ -10,12 +10,12 @@ import ( "regexp" "strings" - "github.com/pkg/errors" - "github.com/containers/image/directory/explicitfilepath" "github.com/containers/image/docker/reference" + "github.com/containers/image/image" "github.com/containers/image/transports" "github.com/containers/image/types" + "github.com/pkg/errors" ) const defaultOSTreeRepo = "/ostree/repo" @@ -66,6 +66,11 @@ type ostreeReference struct { repo string } +type ostreeImageCloser struct { + types.ImageCloser + size int64 +} + func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) { var repo = "" var image = "" @@ -110,7 +115,7 @@ func NewReference(image string, repo string) (types.ImageReference, error) { // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces // from being ambiguous with values of PolicyConfigurationIdentity. if strings.Contains(resolved, ":") { - return nil, errors.Errorf("Invalid OSTreeCI reference %s@%s: path %s contains a colon", image, repo, resolved) + return nil, errors.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved) } return ostreeReference{ @@ -168,18 +173,38 @@ func (ref ostreeReference) PolicyConfigurationNamespaces() []string { return res } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. +func (s *ostreeImageCloser) Size() (int64, error) { + return s.size, nil +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref ostreeReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - return nil, errors.New("Reading ostree: images is currently not supported") +func (ref ostreeReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { + var tmpDir string + if ctx == nil || ctx.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = ctx.OSTreeTmpDirPath + } + src, err := newImageSource(ctx, tmpDir, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, src) } // NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. func (ref ostreeReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { - return nil, errors.New("Reading ostree: images is currently not supported") + var tmpDir string + if ctx == nil || ctx.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = ctx.OSTreeTmpDirPath + } + return newImageSource(ctx, tmpDir, ref) } // NewImageDestination returns a types.ImageDestination for this reference. diff --git a/vendor/github.com/containers/image/pkg/sysregistries/system_registries.go b/vendor/github.com/containers/image/pkg/sysregistries/system_registries.go deleted file mode 100644 index e5564a2a..00000000 --- a/vendor/github.com/containers/image/pkg/sysregistries/system_registries.go +++ /dev/null @@ -1,86 +0,0 @@ -package sysregistries - -import ( - "github.com/BurntSushi/toml" - "github.com/containers/image/types" - "io/ioutil" - "path/filepath" -) - -// systemRegistriesConfPath is the path to the system-wide registry configuration file -// and is used to add/subtract potential registries for obtaining images. -// You can override this at build time with -// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfPath=$your_path' -var systemRegistriesConfPath = builtinRegistriesConfPath - -// builtinRegistriesConfPath is the path to registry configuration file -// DO NOT change this, instead see systemRegistriesConfPath above. -const builtinRegistriesConfPath = "/etc/containers/registries.conf" - -type registries struct { - Registries []string `toml:"registries"` -} - -type tomlConfig struct { - Registries struct { - Search registries `toml:"search"` - Insecure registries `toml:"insecure"` - Block registries `toml:"block"` - } `toml:"registries"` -} - -// Reads the global registry file from the filesystem. Returns -// a byte array -func readRegistryConf(ctx *types.SystemContext) ([]byte, error) { - dirPath := systemRegistriesConfPath - if ctx != nil { - if ctx.SystemRegistriesConfPath != "" { - dirPath = ctx.SystemRegistriesConfPath - } else if ctx.RootForImplicitAbsolutePaths != "" { - dirPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath) - } - } - configBytes, err := ioutil.ReadFile(dirPath) - return configBytes, err -} - -// For mocking in unittests -var readConf = readRegistryConf - -// Loads the registry configuration file from the filesystem and -// then unmarshals it. Returns the unmarshalled object. -func loadRegistryConf(ctx *types.SystemContext) (*tomlConfig, error) { - config := &tomlConfig{} - - configBytes, err := readConf(ctx) - if err != nil { - return nil, err - } - - err = toml.Unmarshal(configBytes, &config) - return config, err -} - -// GetRegistries returns an array of strings that contain the names -// of the registries as defined in the system-wide -// registries file. it returns an empty array if none are -// defined -func GetRegistries(ctx *types.SystemContext) ([]string, error) { - config, err := loadRegistryConf(ctx) - if err != nil { - return nil, err - } - return config.Registries.Search.Registries, nil -} - -// GetInsecureRegistries returns an array of strings that contain the names -// of the insecure registries as defined in the system-wide -// registries file. it returns an empty array if none are -// defined -func GetInsecureRegistries(ctx *types.SystemContext) ([]string, error) { - config, err := loadRegistryConf(ctx) - if err != nil { - return nil, err - } - return config.Registries.Insecure.Registries, nil -} diff --git a/vendor/github.com/containers/image/signature/policy_config.go b/vendor/github.com/containers/image/signature/policy_config.go index bc6c5e9a..42cc12ab 100644 --- a/vendor/github.com/containers/image/signature/policy_config.go +++ b/vendor/github.com/containers/image/signature/policy_config.go @@ -70,7 +70,11 @@ func NewPolicyFromFile(fileName string) (*Policy, error) { if err != nil { return nil, err } - return NewPolicyFromBytes(contents) + policy, err := NewPolicyFromBytes(contents) + if err != nil { + return nil, errors.Wrapf(err, "invalid policy in %q", fileName) + } + return policy, nil } // NewPolicyFromBytes returns a policy parsed from the specified blob. diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go index 08fa71b5..038195c1 100644 --- a/vendor/github.com/containers/image/storage/storage_image.go +++ b/vendor/github.com/containers/image/storage/storage_image.go @@ -1,14 +1,17 @@ +// +build !containers_image_storage_stub + package storage import ( "bytes" "context" "encoding/json" + "fmt" "io" "io/ioutil" - "time" - - "github.com/pkg/errors" + "os" + "path/filepath" + "sync/atomic" "github.com/containers/image/image" "github.com/containers/image/manifest" @@ -16,10 +19,14 @@ import ( "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/ioutils" - ddigest "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) +const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. + var ( // ErrBlobDigestMismatch is returned when PutBlob() is given a blob // with a digest-based name that doesn't match its contents. @@ -27,8 +34,8 @@ var ( // ErrBlobSizeMismatch is returned when PutBlob() is given a blob // with an expected size that doesn't match the reader. ErrBlobSizeMismatch = errors.New("blob size mismatch") - // ErrNoManifestLists is returned when GetTargetManifest() is - // called. + // ErrNoManifestLists is returned when GetManifest() is called. + // with a non-nil instanceDigest. ErrNoManifestLists = errors.New("manifest lists are not supported by this transport") // ErrNoSuchImage is returned when we attempt to access an image which // doesn't exist in the storage area. @@ -37,256 +44,318 @@ var ( type storageImageSource struct { imageRef storageReference - Tag string `json:"tag,omitempty"` - Created time.Time `json:"created-time,omitempty"` - ID string `json:"id"` - BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle - Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs - LayerPosition map[ddigest.Digest]int `json:"-"` // Where we are in reading a blob's layers - SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice + ID string + layerPosition map[digest.Digest]int // Where we are in reading a blob's layers + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice } type storageImageDestination struct { - imageRef storageReference - Tag string `json:"tag,omitempty"` - Created time.Time `json:"created-time,omitempty"` - ID string `json:"id"` - BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle - Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs - BlobData map[ddigest.Digest][]byte `json:"-"` // Map from names of blobs that aren't layers to contents, temporary - Manifest []byte `json:"-"` // Manifest contents, temporary - Signatures []byte `json:"-"` // Signature contents, temporary - SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice + image types.ImageCloser + systemContext *types.SystemContext + imageRef storageReference // The reference we'll use to name the image + publicRef storageReference // The reference we return when asked about the name we'll give to the image + directory string // Temporary directory where we store blobs until Commit() time + nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs + manifest []byte // Manifest contents, temporary + signatures []byte // Signature contents, temporary + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes + filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice } -type storageLayerMetadata struct { - Digest string `json:"digest,omitempty"` - Size int64 `json:"size"` - CompressedSize int64 `json:"compressed-size,omitempty"` -} - -type storageImage struct { - types.Image +type storageImageCloser struct { + types.ImageCloser size int64 } -// newImageSource sets us up to read out an image, which needs to already exist. +// newImageSource sets up an image for reading. func newImageSource(imageRef storageReference) (*storageImageSource, error) { + // First, locate the image. img, err := imageRef.resolveImage() if err != nil { return nil, err } + + // Build the reader object. image := &storageImageSource{ imageRef: imageRef, - Created: time.Now(), ID: img.ID, - BlobList: []types.BlobInfo{}, - Layers: make(map[ddigest.Digest][]string), - LayerPosition: make(map[ddigest.Digest]int), + layerPosition: make(map[digest.Digest]int), SignatureSizes: []int{}, } - if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { - return nil, errors.Wrap(err, "error decoding metadata for source image") - } - return image, nil -} - -// newImageDestination sets us up to write a new image. -func newImageDestination(imageRef storageReference) (*storageImageDestination, error) { - image := &storageImageDestination{ - imageRef: imageRef, - Tag: imageRef.reference, - Created: time.Now(), - ID: imageRef.id, - BlobList: []types.BlobInfo{}, - Layers: make(map[ddigest.Digest][]string), - BlobData: make(map[ddigest.Digest][]byte), - SignatureSizes: []int{}, + if img.Metadata != "" { + if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { + return nil, errors.Wrap(err, "error decoding metadata for source image") + } } return image, nil } +// Reference returns the image reference that we used to find this image. func (s storageImageSource) Reference() types.ImageReference { return s.imageRef } -func (s storageImageDestination) Reference() types.ImageReference { - return s.imageRef -} - +// Close cleans up any resources we tied up while reading the image. func (s storageImageSource) Close() error { return nil } -func (s storageImageDestination) Close() error { - return nil +// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given. +func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { + rc, n, _, err = s.getBlobAndLayerID(info) + return rc, n, err } +// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. +func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { + var layer storage.Layer + var diffOptions *storage.DiffOptions + // We need a valid digest value. + err = info.Digest.Validate() + if err != nil { + return nil, -1, "", err + } + // Check if the blob corresponds to a diff that was used to initialize any layers. Our + // callers should try to retrieve layers using their uncompressed digests, so no need to + // check if they're using one of the compressed digests, which we can't reproduce anyway. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest) + // If it's not a layer, then it must be a data item. + if len(layers) == 0 { + b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String()) + if err != nil { + return nil, -1, "", err + } + r := bytes.NewReader(b) + logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) + return ioutil.NopCloser(r), int64(r.Len()), "", nil + } + // Step through the list of matching layers. Tests may want to verify that if we have multiple layers + // which claim to have the same contents, that we actually do have multiple layers, otherwise we could + // just go ahead and use the first one every time. + i := s.layerPosition[info.Digest] + s.layerPosition[info.Digest] = i + 1 + if len(layers) > 0 { + layer = layers[i%len(layers)] + } + // Force the storage layer to not try to match any compression that was used when the layer was first + // handed to it. + noCompression := archive.Uncompressed + diffOptions = &storage.DiffOptions{ + Compression: &noCompression, + } + if layer.UncompressedSize < 0 { + n = -1 + } else { + n = layer.UncompressedSize + } + logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) + rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) + if err != nil { + return nil, -1, "", err + } + return rc, n, layer.ID, err +} + +// GetManifest() reads the image's manifest. +func (s *storageImageSource) GetManifest(instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) { + if instanceDigest != nil { + return nil, "", ErrNoManifestLists + } + if len(s.cachedManifest) == 0 { + // We stored the manifest as an item named after storage.ImageDigestBigDataKey. + cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, storage.ImageDigestBigDataKey) + if err != nil { + return nil, "", err + } + s.cachedManifest = cachedBlob + } + return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err +} + +// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of +// the image, after they've been decompressed. +func (s *storageImageSource) LayerInfosForCopy() []types.BlobInfo { + simg, err := s.imageRef.transport.store.Image(s.ID) + if err != nil { + logrus.Errorf("error reading image %q: %v", s.ID, err) + return nil + } + updatedBlobInfos := []types.BlobInfo{} + layerID := simg.TopLayer + _, manifestType, err := s.GetManifest(nil) + if err != nil { + logrus.Errorf("error reading image manifest for %q: %v", s.ID, err) + return nil + } + uncompressedLayerType := "" + switch manifestType { + case imgspecv1.MediaTypeImageManifest: + uncompressedLayerType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + // This is actually a compressed type, but there's no uncompressed type defined + uncompressedLayerType = manifest.DockerV2Schema2LayerMediaType + } + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + logrus.Errorf("error reading layer %q in image %q: %v", layerID, s.ID, err) + return nil + } + if layer.UncompressedDigest == "" { + logrus.Errorf("uncompressed digest for layer %q is unknown", layerID) + return nil + } + if layer.UncompressedSize < 0 { + logrus.Errorf("uncompressed size for layer %q is unknown", layerID) + return nil + } + blobInfo := types.BlobInfo{ + Digest: layer.UncompressedDigest, + Size: layer.UncompressedSize, + MediaType: uncompressedLayerType, + } + updatedBlobInfos = append([]types.BlobInfo{blobInfo}, updatedBlobInfos...) + layerID = layer.Parent + } + return updatedBlobInfos +} + +// GetSignatures() parses the image's signatures blob into a slice of byte slices. +func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) { + if instanceDigest != nil { + return nil, ErrNoManifestLists + } + var offset int + sigslice := [][]byte{} + signature := []byte{} + if len(s.SignatureSizes) > 0 { + signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures") + if err != nil { + return nil, errors.Wrapf(err, "error looking up signatures data for image %q", s.ID) + } + signature = signatureBlob + } + for _, length := range s.SignatureSizes { + sigslice = append(sigslice, signature[offset:offset+length]) + offset += length + } + if offset != len(signature) { + return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) + } + return sigslice, nil +} + +// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until +// it's time to Commit() the image +func newImageDestination(ctx *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) { + directory, err := ioutil.TempDir(temporaryDirectoryForBigFiles, "storage") + if err != nil { + return nil, errors.Wrapf(err, "error creating a temporary directory") + } + // Break reading of the reference we're writing, so that copy.Image() won't try to rewrite + // schema1 image manifests to remove embedded references, since that changes the manifest's + // digest, and that makes the image unusable if we subsequently try to access it using a + // reference that mentions the no-longer-correct digest. + publicRef := imageRef + publicRef.name = nil + image := &storageImageDestination{ + systemContext: ctx, + imageRef: imageRef, + publicRef: publicRef, + directory: directory, + blobDiffIDs: make(map[digest.Digest]digest.Digest), + fileSizes: make(map[digest.Digest]int64), + filenames: make(map[digest.Digest]string), + SignatureSizes: []int{}, + } + return image, nil +} + +// Reference returns a mostly-usable image reference that can't return a DockerReference, to +// avoid triggering logic in copy.Image() that rewrites schema 1 image manifests in order to +// remove image names that they contain which don't match the value we're using. +func (s storageImageDestination) Reference() types.ImageReference { + return s.publicRef +} + +// Close cleans up the temporary directory. +func (s *storageImageDestination) Close() error { + return os.RemoveAll(s.directory) +} + +// ShouldCompressLayers indicates whether or not a caller should compress not-already-compressed +// data when handing it to us. func (s storageImageDestination) ShouldCompressLayers() bool { - // We ultimately have to decompress layers to populate trees on disk, - // so callers shouldn't bother compressing them before handing them to - // us, if they're not already compressed. + // We ultimately have to decompress layers to populate trees on disk, so callers shouldn't + // bother compressing them before handing them to us, if they're not already compressed. return false } -// putBlob stores a layer or data blob, optionally enforcing that a digest in -// blobinfo matches the incoming data. -func (s *storageImageDestination) putBlob(stream io.Reader, blobinfo types.BlobInfo, enforceDigestAndSize bool) (types.BlobInfo, error) { - blobSize := blobinfo.Size - digest := blobinfo.Digest +// PutBlob stores a layer or data blob in our temporary directory, checking that any information +// in the blobinfo matches the incoming data. +func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) { errorBlobInfo := types.BlobInfo{ Digest: "", Size: -1, } - // Try to read an initial snippet of the blob. - buf := [archive.HeaderSize]byte{} - n, err := io.ReadAtLeast(stream, buf[:], len(buf)) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return errorBlobInfo, err - } - // Set up to read the whole blob (the initial snippet, plus the rest) - // while digesting it with either the default, or the passed-in digest, - // if one was specified. - hasher := ddigest.Canonical.Digester() - if digest.Validate() == nil { - if a := digest.Algorithm(); a.Available() { + // Set up to digest the blob and count its size while saving it to a file. + hasher := digest.Canonical.Digester() + if blobinfo.Digest.Validate() == nil { + if a := blobinfo.Digest.Algorithm(); a.Available() { hasher = a.Digester() } } - hash := "" + diffID := digest.Canonical.Digester() + filename := filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename) + } + defer file.Close() counter := ioutils.NewWriteCounter(hasher.Hash()) - defragmented := io.MultiReader(bytes.NewBuffer(buf[:n]), stream) - multi := io.TeeReader(defragmented, counter) - if (n > 0) && archive.IsArchive(buf[:n]) { - // It's a filesystem layer. If it's not the first one in the - // image, we assume that the most recently added layer is its - // parent. - parentLayer := "" - for _, blob := range s.BlobList { - if layerList, ok := s.Layers[blob.Digest]; ok { - parentLayer = layerList[len(layerList)-1] - } - } - // If we have an expected content digest, generate a layer ID - // based on the parent's ID and the expected content digest. - id := "" - if digest.Validate() == nil { - id = ddigest.Canonical.FromBytes([]byte(parentLayer + "+" + digest.String())).Hex() - } - // Attempt to create the identified layer and import its contents. - layer, uncompressedSize, err := s.imageRef.transport.store.PutLayer(id, parentLayer, nil, "", true, multi) - if err != nil && errors.Cause(err) != storage.ErrDuplicateID { - logrus.Debugf("error importing layer blob %q as %q: %v", blobinfo.Digest, id, err) - return errorBlobInfo, err - } - if errors.Cause(err) == storage.ErrDuplicateID { - // We specified an ID, and there's already a layer with - // the same ID. Drain the input so that we can look at - // its length and digest. - _, err := io.Copy(ioutil.Discard, multi) - if err != nil && err != io.EOF { - logrus.Debugf("error digesting layer blob %q: %v", blobinfo.Digest, id, err) - return errorBlobInfo, err - } - hash = hasher.Digest().String() - } else { - // Applied the layer with the specified ID. Note the - // size info and computed digest. - hash = hasher.Digest().String() - layerMeta := storageLayerMetadata{ - Digest: hash, - CompressedSize: counter.Count, - Size: uncompressedSize, - } - if metadata, err := json.Marshal(&layerMeta); len(metadata) != 0 && err == nil { - s.imageRef.transport.store.SetMetadata(layer.ID, string(metadata)) - } - // Hang on to the new layer's ID. - id = layer.ID - } - // Check if the size looks right. - if enforceDigestAndSize && blobinfo.Size >= 0 && blobinfo.Size != counter.Count { - logrus.Debugf("layer blob %q size is %d, not %d, rejecting", blobinfo.Digest, counter.Count, blobinfo.Size) - if layer != nil { - // Something's wrong; delete the newly-created layer. - s.imageRef.transport.store.DeleteLayer(layer.ID) - } - return errorBlobInfo, ErrBlobSizeMismatch - } - // If the content digest was specified, verify it. - if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash { - logrus.Debugf("layer blob %q digests to %q, rejecting", blobinfo.Digest, hash) - if layer != nil { - // Something's wrong; delete the newly-created layer. - s.imageRef.transport.store.DeleteLayer(layer.ID) - } - return errorBlobInfo, ErrBlobDigestMismatch - } - // If we didn't get a blob size, return the one we calculated. - if blobSize == -1 { - blobSize = counter.Count - } - // If we didn't get a digest, construct one. - if digest == "" { - digest = ddigest.Digest(hash) - } - // Record that this layer blob is a layer, and the layer ID it - // ended up having. This is a list, in case the same blob is - // being applied more than once. - s.Layers[digest] = append(s.Layers[digest], id) - s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: counter.Count}) - if layer != nil { - logrus.Debugf("blob %q imported as a filesystem layer %q", blobinfo.Digest, id) - } else { - logrus.Debugf("layer blob %q already present as layer %q", blobinfo.Digest, id) - } - } else { - // It's just data. Finish scanning it in, check that our - // computed digest matches the passed-in digest, and store it, - // but leave it out of the blob-to-layer-ID map so that we can - // tell that it's not a layer. - blob, err := ioutil.ReadAll(multi) - if err != nil && err != io.EOF { - return errorBlobInfo, err - } - hash = hasher.Digest().String() - if enforceDigestAndSize && blobinfo.Size >= 0 && int64(len(blob)) != blobinfo.Size { - logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, int64(len(blob)), blobinfo.Size) - return errorBlobInfo, ErrBlobSizeMismatch - } - // If we were given a digest, verify that the content matches - // it. - if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash { - logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash) - return errorBlobInfo, ErrBlobDigestMismatch - } - // If we didn't get a blob size, return the one we calculated. - if blobSize == -1 { - blobSize = int64(len(blob)) - } - // If we didn't get a digest, construct one. - if digest == "" { - digest = ddigest.Digest(hash) - } - // Save the blob for when we Commit(). - s.BlobData[digest] = blob - s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: int64(len(blob))}) - logrus.Debugf("blob %q imported as opaque data %q", blobinfo.Digest, digest) + reader := io.TeeReader(io.TeeReader(stream, counter), file) + decompressed, err := archive.DecompressStream(reader) + if err != nil { + return errorBlobInfo, errors.Wrap(err, "error setting up to decompress blob") + } + // Copy the data to the file. + _, err = io.Copy(diffID.Hash(), decompressed) + decompressed.Close() + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "error storing blob to file %q", filename) + } + // Ensure that any information that we were given about the blob is correct. + if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() { + return errorBlobInfo, ErrBlobDigestMismatch + } + if blobinfo.Size >= 0 && blobinfo.Size != counter.Count { + return errorBlobInfo, ErrBlobSizeMismatch + } + // Record information about the blob. + s.blobDiffIDs[hasher.Digest()] = diffID.Digest() + s.fileSizes[hasher.Digest()] = counter.Count + s.filenames[hasher.Digest()] = filename + blobDigest := blobinfo.Digest + if blobDigest.Validate() != nil { + blobDigest = hasher.Digest() + } + blobSize := blobinfo.Size + if blobSize < 0 { + blobSize = counter.Count } return types.BlobInfo{ - Digest: digest, - Size: blobSize, + Digest: blobDigest, + Size: blobSize, + MediaType: blobinfo.MediaType, }, nil } -// PutBlob is used to both store filesystem layers and binary data that is part -// of the image. Filesystem layers are assumed to be imported in order, as -// that is required by some of the underlying storage drivers. -func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) { - return s.putBlob(stream, blobinfo, true) -} - -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. +// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be +// reapplied using ReapplyBlob. +// // Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. // If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); // it returns a non-nil error only on an unexpected failure. @@ -294,93 +363,289 @@ func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, if blobinfo.Digest == "" { return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`) } - for _, blob := range s.BlobList { - if blob.Digest == blobinfo.Digest { - return true, blob.Size, nil - } + if err := blobinfo.Digest.Validate(); err != nil { + return false, -1, errors.Wrapf(err, `Can not check for a blob with invalid digest`) } + // Check if we've already cached it in a file. + if size, ok := s.fileSizes[blobinfo.Digest]; ok { + return true, size, nil + } + // Check if we have a wasn't-compressed layer in storage that's based on that blob. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, -1, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Save this for completeness. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, layers[0].UncompressedSize, nil + } + // Check if we have a was-compressed layer in storage that's based on that blob. + layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, -1, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, layers[0].CompressedSize, nil + } + // Nope, we don't have it. return false, -1, nil } +// ReapplyBlob is now a no-op, assuming HasBlob() says we already have it, since Commit() can just apply the +// same one when it walks the list in the manifest. func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.BlobInfo, error) { - err := blobinfo.Digest.Validate() - if err != nil { - return types.BlobInfo{}, err + present, size, err := s.HasBlob(blobinfo) + if !present { + return types.BlobInfo{}, errors.Errorf("error reapplying blob %+v: blob was not previously applied", blobinfo) } - if layerList, ok := s.Layers[blobinfo.Digest]; !ok || len(layerList) < 1 { - b, err := s.imageRef.transport.store.ImageBigData(s.ID, blobinfo.Digest.String()) - if err != nil { - return types.BlobInfo{}, err + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "error reapplying blob %+v", blobinfo) + } + blobinfo.Size = size + return blobinfo, nil +} + +// computeID computes a recommended image ID based on information we have so far. If +// the manifest is not of a type that we recognize, we return an empty value, indicating +// that since we don't have a recommendation, a random ID should be used if one needs +// to be allocated. +func (s *storageImageDestination) computeID(m manifest.Manifest) string { + // Build the diffID list. We need the decompressed sums that we've been calculating to + // fill in the DiffIDs. It's expected (but not enforced by us) that the number of + // diffIDs corresponds to the number of non-EmptyLayer entries in the history. + var diffIDs []digest.Digest + switch m.(type) { + case *manifest.Schema1: + // Build a list of the diffIDs we've generated for the non-throwaway FS layers, + // in reverse of the order in which they were originally listed. + s1, ok := m.(*manifest.Schema1) + if !ok { + // Shouldn't happen + logrus.Debugf("internal error reading schema 1 manifest") + return "" } - return types.BlobInfo{Digest: blobinfo.Digest, Size: int64(len(b))}, nil + for i, history := range s1.History { + compat := manifest.Schema1V1Compatibility{} + if err := json.Unmarshal([]byte(history.V1Compatibility), &compat); err != nil { + logrus.Debugf("internal error reading schema 1 history: %v", err) + return "" + } + if compat.ThrowAway { + continue + } + blobSum := s1.FSLayers[i].BlobSum + diffID, ok := s.blobDiffIDs[blobSum] + if !ok { + logrus.Infof("error looking up diffID for layer %q", blobSum.String()) + return "" + } + diffIDs = append([]digest.Digest{diffID}, diffIDs...) + } + case *manifest.Schema2, *manifest.OCI1: + // We know the ID calculation for these formats doesn't actually use the diffIDs, + // so we don't need to populate the diffID list. } - layerList := s.Layers[blobinfo.Digest] - rc, _, err := diffLayer(s.imageRef.transport.store, layerList[len(layerList)-1]) + id, err := m.ImageID(diffIDs) if err != nil { - return types.BlobInfo{}, err + return "" } - return s.putBlob(rc, blobinfo, false) + return id +} + +// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig +// information out of it for Inspect(). +func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) { + if info.Digest == "" { + return nil, errors.Errorf(`no digest supplied when reading blob`) + } + if err := info.Digest.Validate(); err != nil { + return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`) + } + // Assume it's a file, since we're only calling this from a place that expects to read files. + if filename, ok := s.filenames[info.Digest]; ok { + contents, err2 := ioutil.ReadFile(filename) + if err2 != nil { + return nil, errors.Wrapf(err2, `error reading blob from file %q`, filename) + } + return contents, nil + } + // If it's not a file, it's a bug, because we're not expecting to be asked for a layer. + return nil, errors.New("blob not found") } func (s *storageImageDestination) Commit() error { - // Create the image record. - lastLayer := "" - for _, blob := range s.BlobList { - if layerList, ok := s.Layers[blob.Digest]; ok { - lastLayer = layerList[len(layerList)-1] - } + // Find the list of layer blobs. + if len(s.manifest) == 0 { + return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") } - img, err := s.imageRef.transport.store.CreateImage(s.ID, nil, lastLayer, "", nil) + man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) + if err != nil { + return errors.Wrapf(err, "error parsing manifest") + } + layerBlobs := man.LayerInfos() + // Extract or find the layers. + lastLayer := "" + addedLayers := []string{} + for _, blob := range layerBlobs { + var diff io.ReadCloser + // Check if there's already a layer with the ID that we'd give to the result of applying + // this layer blob to its parent, if it has one, or the blob's hex value otherwise. + diffID, haveDiffID := s.blobDiffIDs[blob.Digest] + if !haveDiffID { + // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), + // or to even check if we had it. + logrus.Debugf("looking for diffID for blob %+v", blob.Digest) + has, _, err := s.HasBlob(blob) + if err != nil { + return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String()) + } + if !has { + return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) + } + diffID, haveDiffID = s.blobDiffIDs[blob.Digest] + if !haveDiffID { + return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) + } + } + id := diffID.Hex() + if lastLayer != "" { + id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() + } + if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { + // There's already a layer that should have the right contents, just reuse it. + lastLayer = layer.ID + continue + } + // Check if we cached a file with that blobsum. If we didn't already have a layer with + // the blob's contents, we should have gotten a copy. + if filename, ok := s.filenames[blob.Digest]; ok { + // Use the file's contents to initialize the layer. + file, err2 := os.Open(filename) + if err2 != nil { + return errors.Wrapf(err2, "error opening file %q", filename) + } + defer file.Close() + diff = file + } + if diff == nil { + // Try to find a layer with contents matching that blobsum. + layer := "" + layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } else { + layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } + } + if layer == "" { + return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest) + } + // Use the layer's contents to initialize the new layer. + noCompression := archive.Uncompressed + diffOptions := &storage.DiffOptions{ + Compression: &noCompression, + } + diff, err2 = s.imageRef.transport.store.Diff("", layer, diffOptions) + if err2 != nil { + return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest) + } + defer diff.Close() + } + if diff == nil { + // This shouldn't have happened. + return errors.Errorf("error applying blob %q: content not found", blob.Digest) + } + // Build the new layer using the diff, regardless of where it came from. + layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, diff) + if err != nil { + return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest) + } + lastLayer = layer.ID + addedLayers = append([]string{lastLayer}, addedLayers...) + } + // If one of those blobs was a configuration blob, then we can try to dig out the date when the image + // was originally created, in case we're just copying it. If not, no harm done. + options := &storage.ImageOptions{} + if inspect, err := man.Inspect(s.getConfigBlob); err == nil { + logrus.Debugf("setting image creation date to %s", inspect.Created) + options.CreationDate = inspect.Created + } + if manifestDigest, err := manifest.Digest(s.manifest); err == nil { + options.Digest = manifestDigest + } + // Create the image record, pointing to the most-recently added layer. + intendedID := s.imageRef.id + if intendedID == "" { + intendedID = s.computeID(man) + } + oldNames := []string{} + img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) if err != nil { if errors.Cause(err) != storage.ErrDuplicateID { logrus.Debugf("error creating image: %q", err) - return errors.Wrapf(err, "error creating image %q", s.ID) + return errors.Wrapf(err, "error creating image %q", intendedID) } - img, err = s.imageRef.transport.store.Image(s.ID) + img, err = s.imageRef.transport.store.Image(intendedID) if err != nil { - return errors.Wrapf(err, "error reading image %q", s.ID) + return errors.Wrapf(err, "error reading image %q", intendedID) } if img.TopLayer != lastLayer { - logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", s.ID) - return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", s.ID) + logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID) + return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID) } logrus.Debugf("reusing image ID %q", img.ID) + oldNames = append(oldNames, img.Names...) } else { logrus.Debugf("created new image ID %q", img.ID) } - s.ID = img.ID - names := img.Names - if s.Tag != "" { - names = append(names, s.Tag) + // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so + // we just need to screen out the ones that are actually layers to get the list of non-layers. + dataBlobs := make(map[digest.Digest]struct{}) + for blob := range s.filenames { + dataBlobs[blob] = struct{}{} } - // We have names to set, so move those names to this image. - if len(names) > 0 { + for _, layerBlob := range layerBlobs { + delete(dataBlobs, layerBlob.Digest) + } + for blob := range dataBlobs { + v, err := ioutil.ReadFile(s.filenames[blob]) + if err != nil { + return errors.Wrapf(err, "error copying non-layer blob %q to image", blob) + } + if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err) + return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID) + } + } + // Set the reference's name on the image. + if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil { + names := []string{} + if name != nil { + names = append(names, verboseName(name)) + } + if len(oldNames) > 0 { + names = append(names, oldNames...) + } if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } - logrus.Debugf("error setting names on image %q: %v", img.ID, err) - return err + logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err) + return errors.Wrapf(err, "error setting names %v on image %q", names, img.ID) } logrus.Debugf("set names of image %q to %v", img.ID, names) } - // Save the data blobs to disk, and drop their contents from memory. - keys := []ddigest.Digest{} - for k, v := range s.BlobData { - if err := s.imageRef.transport.store.SetImageBigData(img.ID, k.String(), v); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving big data %q for image %q: %v", k, img.ID, err) - return err - } - keys = append(keys, k) - } - for _, key := range keys { - delete(s.BlobData, key) - } - // Save the manifest, if we have one. - if err := s.imageRef.transport.store.SetImageBigData(s.ID, "manifest", s.Manifest); err != nil { + // Save the manifest. Use storage.ImageDigestBigDataKey as the item's + // name, so that its digest can be used to locate the image in the Store. + if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } @@ -388,12 +653,14 @@ func (s *storageImageDestination) Commit() error { return err } // Save the signatures, if we have any. - if err := s.imageRef.transport.store.SetImageBigData(s.ID, "signatures", s.Signatures); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + if len(s.signatures) > 0 { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return err } - logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) - return err } // Save our metadata. metadata, err := json.Marshal(s) @@ -405,7 +672,7 @@ func (s *storageImageDestination) Commit() error { return err } if len(metadata) != 0 { - if err = s.imageRef.transport.store.SetMetadata(s.ID, string(metadata)); err != nil { + if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } @@ -418,7 +685,7 @@ func (s *storageImageDestination) Commit() error { } var manifestMIMETypes = []string{ - // TODO(runcom): we'll add OCI as part of another PR here + imgspecv1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType, @@ -428,23 +695,20 @@ func (s *storageImageDestination) SupportedManifestMIMETypes() []string { return manifestMIMETypes } -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +// PutManifest writes the manifest to the destination. func (s *storageImageDestination) PutManifest(manifest []byte) error { - s.Manifest = make([]byte, len(manifest)) - copy(s.Manifest, manifest) + s.manifest = make([]byte, len(manifest)) + copy(s.manifest, manifest) return nil } -// SupportsSignatures returns an error if we can't expect GetSignatures() to -// return data that was previously supplied to PutSignatures(). +// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was +// previously supplied to PutSignatures(). func (s *storageImageDestination) SupportsSignatures() error { return nil } -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be // uploaded to the image destination, true otherwise. func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { return false @@ -455,6 +719,7 @@ func (s *storageImageDestination) MustMatchRuntimeOS() bool { return true } +// PutSignatures records the image's signatures for committing as a single data blob. func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { sizes := []int{} sigblob := []byte{} @@ -465,146 +730,73 @@ func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { copy(newblob[len(sigblob):], sig) sigblob = newblob } - s.Signatures = sigblob + s.signatures = sigblob s.SignatureSizes = sizes return nil } -func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { - rc, n, _, err = s.getBlobAndLayerID(info) - return rc, n, err -} - -func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { - err = info.Digest.Validate() - if err != nil { - return nil, -1, "", err - } - if layerList, ok := s.Layers[info.Digest]; !ok || len(layerList) < 1 { - b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String()) - if err != nil { - return nil, -1, "", err - } - r := bytes.NewReader(b) - logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) - return ioutil.NopCloser(r), int64(r.Len()), "", nil - } - // If the blob was "put" more than once, we have multiple layer IDs - // which should all produce the same diff. For the sake of tests that - // want to make sure we created different layers each time the blob was - // "put", though, cycle through the layers. - layerList := s.Layers[info.Digest] - position, ok := s.LayerPosition[info.Digest] - if !ok { - position = 0 - } - s.LayerPosition[info.Digest] = (position + 1) % len(layerList) - logrus.Debugf("exporting filesystem layer %q for blob %q", layerList[position], info.Digest) - rc, n, err = diffLayer(s.imageRef.transport.store, layerList[position]) - return rc, n, layerList[position], err -} - -func diffLayer(store storage.Store, layerID string) (rc io.ReadCloser, n int64, err error) { - layer, err := store.Layer(layerID) - if err != nil { - return nil, -1, err - } - layerMeta := storageLayerMetadata{ - CompressedSize: -1, - } - if layer.Metadata != "" { - if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { - return nil, -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID) - } - } - if layerMeta.CompressedSize <= 0 { - n = -1 - } else { - n = layerMeta.CompressedSize - } - diff, err := store.Diff("", layer.ID, nil) - if err != nil { - return nil, -1, err - } - return diff, n, nil -} - -func (s *storageImageSource) GetManifest() (manifestBlob []byte, MIMEType string, err error) { - manifestBlob, err = s.imageRef.transport.store.ImageBigData(s.ID, "manifest") - return manifestBlob, manifest.GuessMIMEType(manifestBlob), err -} - -func (s *storageImageSource) GetTargetManifest(digest ddigest.Digest) (manifestBlob []byte, MIMEType string, err error) { - return nil, "", ErrNoManifestLists -} - -func (s *storageImageSource) GetSignatures(ctx context.Context) (signatures [][]byte, err error) { - var offset int - signature, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures") - if err != nil { - return nil, err - } - sigslice := [][]byte{} - for _, length := range s.SignatureSizes { - sigslice = append(sigslice, signature[offset:offset+length]) - offset += length - } - if offset != len(signature) { - return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) - } - return sigslice, nil -} - +// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. func (s *storageImageSource) getSize() (int64, error) { var sum int64 - names, err := s.imageRef.transport.store.ListImageBigData(s.imageRef.id) + // Size up the data blobs. + dataNames, err := s.imageRef.transport.store.ListImageBigData(s.ID) if err != nil { - return -1, errors.Wrapf(err, "error reading image %q", s.imageRef.id) + return -1, errors.Wrapf(err, "error reading image %q", s.ID) } - for _, name := range names { - bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.imageRef.id, name) + for _, dataName := range dataNames { + bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.ID, dataName) if err != nil { - return -1, errors.Wrapf(err, "error reading data blob size %q for %q", name, s.imageRef.id) + return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.ID) } sum += bigSize } + // Add the signature sizes. for _, sigSize := range s.SignatureSizes { sum += int64(sigSize) } - for _, layerList := range s.Layers { - for _, layerID := range layerList { - layer, err := s.imageRef.transport.store.Layer(layerID) - if err != nil { - return -1, err - } - layerMeta := storageLayerMetadata{ - Size: -1, - } - if layer.Metadata != "" { - if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { - return -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID) - } - } - if layerMeta.Size < 0 { - return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) - } - sum += layerMeta.Size + // Prepare to walk the layer list. + img, err := s.imageRef.transport.store.Image(s.ID) + if err != nil { + return -1, errors.Wrapf(err, "error reading image info %q", s.ID) + } + // Walk the layer list. + layerID := img.TopLayer + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return -1, err } + if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { + return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) + } + sum += layer.UncompressedSize + if layer.Parent == "" { + break + } + layerID = layer.Parent } return sum, nil } -func (s *storageImage) Size() (int64, error) { +// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageImageSource) Size() (int64, error) { + return s.getSize() +} + +// Size() returns the previously-computed size of the image, with no error. +func (s *storageImageCloser) Size() (int64, error) { return s.size, nil } // newImage creates an image that also knows its size -func newImage(s storageReference) (types.Image, error) { +func newImage(ctx *types.SystemContext, s storageReference) (types.ImageCloser, error) { src, err := newImageSource(s) if err != nil { return nil, err } - img, err := image.FromSource(src) + img, err := image.FromSource(ctx, src) if err != nil { return nil, err } @@ -612,5 +804,5 @@ func newImage(s storageReference) (types.Image, error) { if err != nil { return nil, err } - return &storageImage{Image: img, size: size}, nil + return &storageImageCloser{ImageCloser: img, size: size}, nil } diff --git a/vendor/github.com/containers/image/storage/storage_reference.go b/vendor/github.com/containers/image/storage/storage_reference.go index ded58705..96887142 100644 --- a/vendor/github.com/containers/image/storage/storage_reference.go +++ b/vendor/github.com/containers/image/storage/storage_reference.go @@ -1,3 +1,5 @@ +// +build !containers_image_storage_stub + package storage import ( @@ -6,6 +8,7 @@ import ( "github.com/containers/image/docker/reference" "github.com/containers/image/types" "github.com/containers/storage" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -18,9 +21,11 @@ type storageReference struct { reference string id string name reference.Named + tag string + digest digest.Digest } -func newReference(transport storageTransport, reference, id string, name reference.Named) *storageReference { +func newReference(transport storageTransport, reference, id string, name reference.Named, tag string, digest digest.Digest) *storageReference { // We take a copy of the transport, which contains a pointer to the // store that it used for resolving this reference, so that the // transport that we'll return from Transport() won't be affected by @@ -30,6 +35,8 @@ func newReference(transport storageTransport, reference, id string, name referen reference: reference, id: id, name: name, + tag: tag, + digest: digest, } } @@ -37,11 +44,32 @@ func newReference(transport storageTransport, reference, id string, name referen // one present with the same name or ID, and return the image. func (s *storageReference) resolveImage() (*storage.Image, error) { if s.id == "" { + // Look for an image that has the expanded reference name as an explicit Name value. image, err := s.transport.store.Image(s.reference) if image != nil && err == nil { s.id = image.ID } } + if s.id == "" && s.name != nil && s.digest != "" { + // Look for an image with the specified digest that has the same name, + // though possibly with a different tag or digest, as a Name value, so + // that the canonical reference can be implicitly resolved to the image. + images, err := s.transport.store.ImagesByDigest(s.digest) + if images != nil && err == nil { + repo := reference.FamiliarName(reference.TrimNamed(s.name)) + search: + for _, image := range images { + for _, name := range image.Names { + if named, err := reference.ParseNormalizedNamed(name); err == nil { + if reference.FamiliarName(reference.TrimNamed(named)) == repo { + s.id = image.ID + break search + } + } + } + } + } + } if s.id == "" { logrus.Errorf("reference %q does not resolve to an image ID", s.StringWithinTransport()) return nil, ErrNoSuchImage @@ -50,12 +78,15 @@ func (s *storageReference) resolveImage() (*storage.Image, error) { if err != nil { return nil, errors.Wrapf(err, "error reading image %q", s.id) } - if s.reference != "" { + if s.name != nil { + repo := reference.FamiliarName(reference.TrimNamed(s.name)) nameMatch := false for _, name := range img.Names { - if name == s.reference { - nameMatch = true - break + if named, err := reference.ParseNormalizedNamed(name); err == nil { + if reference.FamiliarName(reference.TrimNamed(named)) == repo { + nameMatch = true + break + } } } if !nameMatch { @@ -76,8 +107,21 @@ func (s storageReference) Transport() types.ImageTransport { } } -// Return a name with a tag, if we have a name to base them on. +// Return a name with a tag or digest, if we have either, else return it bare. func (s storageReference) DockerReference() reference.Named { + if s.name == nil { + return nil + } + if s.tag != "" { + if namedTagged, err := reference.WithTag(s.name, s.tag); err == nil { + return namedTagged + } + } + if s.digest != "" { + if canonical, err := reference.WithDigest(s.name, s.digest); err == nil { + return canonical + } + } return s.name } @@ -91,7 +135,7 @@ func (s storageReference) StringWithinTransport() string { optionsList = ":" + strings.Join(options, ",") } storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" - if s.name == nil { + if s.reference == "" { return storeSpec + "@" + s.id } if s.id == "" { @@ -120,11 +164,8 @@ func (s storageReference) PolicyConfigurationNamespaces() []string { driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" namespaces := []string{} if s.name != nil { - if s.id != "" { - // The reference without the ID is also a valid namespace. - namespaces = append(namespaces, storeSpec+s.reference) - } - components := strings.Split(s.name.Name(), "/") + name := reference.TrimNamed(s.name) + components := strings.Split(name.String(), "/") for len(components) > 0 { namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) components = components[:len(components)-1] @@ -135,8 +176,13 @@ func (s storageReference) PolicyConfigurationNamespaces() []string { return namespaces } -func (s storageReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - return newImage(s) +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (s storageReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { + return newImage(ctx, s) } func (s storageReference) DeleteImage(ctx *types.SystemContext) error { @@ -159,5 +205,5 @@ func (s storageReference) NewImageSource(ctx *types.SystemContext) (types.ImageS } func (s storageReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(s) + return newImageDestination(ctx, s) } diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go index 1a0ebd04..f6ebcdc4 100644 --- a/vendor/github.com/containers/image/storage/storage_transport.go +++ b/vendor/github.com/containers/image/storage/storage_transport.go @@ -1,3 +1,5 @@ +// +build !containers_image_storage_stub + package storage import ( @@ -11,11 +13,14 @@ import ( "github.com/containers/image/types" "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" - "github.com/opencontainers/go-digest" - ddigest "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) +const ( + minimumTruncatedIDLength = 3 +) + func init() { transports.Register(Transport) } @@ -101,69 +106,133 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { // relative to the given store, and returns it in a reference object. func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { var name reference.Named - var sum digest.Digest - var err error if ref == "" { - return nil, ErrInvalidReference + return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference") } if ref[0] == '[' { // Ignore the store specifier. closeIndex := strings.IndexRune(ref, ']') if closeIndex < 1 { - return nil, ErrInvalidReference + return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref) } ref = ref[closeIndex+1:] } - refInfo := strings.SplitN(ref, "@", 2) - if len(refInfo) == 1 { - // A name. - name, err = reference.ParseNormalizedNamed(refInfo[0]) - if err != nil { - return nil, err + + // The last segment, if there's more than one, is either a digest from a reference, or an image ID. + split := strings.LastIndex(ref, "@") + idOrDigest := "" + if split != -1 { + // Peel off that last bit so that we can work on the rest. + idOrDigest = ref[split+1:] + if idOrDigest == "" { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest) } - } else if len(refInfo) == 2 { - // An ID, possibly preceded by a name. - if refInfo[0] != "" { - name, err = reference.ParseNormalizedNamed(refInfo[0]) - if err != nil { - return nil, err - } - } - sum, err = digest.Parse(refInfo[1]) - if err != nil || sum.Validate() != nil { - sum, err = digest.Parse("sha256:" + refInfo[1]) - if err != nil || sum.Validate() != nil { - return nil, err - } - } - } else { // Coverage: len(refInfo) is always 1 or 2 - // Anything else: store specified in a form we don't - // recognize. - return nil, ErrInvalidReference + ref = ref[:split] } + + // The middle segment (now the last segment), if there is one, is a digest. + split = strings.LastIndex(ref, "@") + sum := digest.Digest("") + if split != -1 { + sum = digest.Digest(ref[split+1:]) + if sum == "" { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum) + } + ref = ref[:split] + } + + // If we have something that unambiguously should be a digest, validate it, and then the third part, + // if we have one, as an ID. + id := "" + if sum != "" { + if idSum, err := digest.Parse("sha256:" + idOrDigest); err != nil || idSum.Validate() != nil { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID", idOrDigest) + } + if err := sum.Validate(); err != nil { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum) + } + id = idOrDigest + if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, idOrDigest) { + // The ID is a truncated version of the ID of an image that's present in local storage, + // so we might as well use the expanded value. + id = img.ID + } + } else if idOrDigest != "" { + // There was no middle portion, so the final portion could be either a digest or an ID. + if idSum, err := digest.Parse("sha256:" + idOrDigest); err == nil && idSum.Validate() == nil { + // It's an ID. + id = idOrDigest + } else if idSum, err := digest.Parse(idOrDigest); err == nil && idSum.Validate() == nil { + // It's a digest. + sum = idSum + } else if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, idOrDigest) { + // It's a truncated version of the ID of an image that's present in local storage, + // and we may need the expanded value. + id = img.ID + } else { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest) + } + } + + // If we only had one portion, then _maybe_ it's a truncated image ID. Only check on that if it's + // at least of what we guess is a reasonable minimum length, because we don't want a really short value + // like "a" matching an image by ID prefix when the input was actually meant to specify an image name. + if len(ref) >= minimumTruncatedIDLength && sum == "" && id == "" { + if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) { + // It's a truncated version of the ID of an image that's present in local storage; + // we need to expand it. + id = img.ID + ref = "" + } + } + + // The initial portion is probably a name, possibly with a tag. + if ref != "" { + var err error + if name, err = reference.ParseNormalizedNamed(ref); err != nil { + return nil, errors.Wrapf(err, "error parsing named reference %q", ref) + } + } + if name == nil && sum == "" && id == "" { + return nil, errors.Errorf("error parsing reference") + } + + // Construct a copy of the store spec. optionsList := "" options := store.GraphOptions() if len(options) > 0 { optionsList = ":" + strings.Join(options, ",") } storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]" - id := "" - if sum.Validate() == nil { - id = sum.Hex() - } + + // Convert the name back into a reference string, if we got a name. refname := "" + tag := "" if name != nil { - name = reference.TagNameOnly(name) - refname = verboseName(name) + if sum.Validate() == nil { + canonical, err := reference.WithDigest(name, sum) + if err != nil { + return nil, errors.Wrapf(err, "error mixing name %q with digest %q", name, sum) + } + refname = verboseName(canonical) + } else { + name = reference.TagNameOnly(name) + tagged, ok := name.(reference.Tagged) + if !ok { + return nil, errors.Errorf("error parsing possibly-tagless name %q", ref) + } + refname = verboseName(name) + tag = tagged.Tag() + } } if refname == "" { - logrus.Debugf("parsed reference into %q", storeSpec+"@"+id) + logrus.Debugf("parsed reference to id into %q", storeSpec+"@"+id) } else if id == "" { - logrus.Debugf("parsed reference into %q", storeSpec+refname) + logrus.Debugf("parsed reference to refname into %q", storeSpec+refname) } else { - logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id) + logrus.Debugf("parsed reference to refname@id into %q", storeSpec+refname+"@"+id) } - return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name), nil + return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name, tag, sum), nil } func (s *storageTransport) GetStore() (storage.Store, error) { @@ -182,11 +251,14 @@ func (s *storageTransport) GetStore() (storage.Store, error) { return s.store, nil } -// ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"), +// ParseReference takes a name and a tag or digest and/or ID +// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"), // possibly prefixed with a store specifier in the form "[_graphroot_]" or // "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or // "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", // tries to figure out which it is, and returns it in a reference object. +// If _id_ is the ID of an image that's present in local storage, it can be truncated, and +// even be specified as if it were a _name_, value. func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { var store storage.Store // Check if there's a store location prefix. If there is, then it @@ -265,17 +337,23 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { dref := ref.DockerReference() - if dref == nil { - if sref, ok := ref.(*storageReference); ok { - if sref.id != "" { - if img, err := store.Image(sref.id); err == nil { - return img, nil - } + if dref != nil { + if img, err := store.Image(verboseName(dref)); err == nil { + return img, nil + } + } + if sref, ok := ref.(*storageReference); ok { + if sref.id != "" { + if img, err := store.Image(sref.id); err == nil { + return img, nil } } - return nil, ErrInvalidReference + tmpRef := *sref + if img, err := tmpRef.resolveImage(); err == nil { + return img, nil + } } - return store.Image(verboseName(dref)) + return nil, storage.ErrImageUnknown } func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { @@ -335,7 +413,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { if err != nil { return err } - _, err = ddigest.Parse("sha256:" + scopeInfo[1]) + _, err = digest.Parse("sha256:" + scopeInfo[1]) if err != nil { return err } @@ -345,11 +423,28 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { return nil } -func verboseName(name reference.Named) string { - name = reference.TagNameOnly(name) - tag := "" - if tagged, ok := name.(reference.NamedTagged); ok { - tag = ":" + tagged.Tag() +func verboseName(r reference.Reference) string { + if r == nil { + return "" } - return name.Name() + tag + named, isNamed := r.(reference.Named) + digested, isDigested := r.(reference.Digested) + tagged, isTagged := r.(reference.Tagged) + name := "" + tag := "" + sum := "" + if isNamed { + name = (reference.TrimNamed(named)).String() + } + if isTagged { + if tagged.Tag() != "" { + tag = ":" + tagged.Tag() + } + } + if isDigested { + if digested.Digest().Validate() == nil { + sum = "@" + digested.Digest().String() + } + } + return name + tag + sum } diff --git a/vendor/github.com/containers/image/tarball/doc.go b/vendor/github.com/containers/image/tarball/doc.go new file mode 100644 index 00000000..a6ced5a0 --- /dev/null +++ b/vendor/github.com/containers/image/tarball/doc.go @@ -0,0 +1,48 @@ +// Package tarball provides a way to generate images using one or more layer +// tarballs and an optional template configuration. +// +// An example: +// package main +// +// import ( +// "fmt" +// +// cp "github.com/containers/image/copy" +// "github.com/containers/image/tarball" +// "github.com/containers/image/transports/alltransports" +// +// imgspecv1 "github.com/containers/image/transports/alltransports" +// ) +// +// func imageFromTarball() { +// src, err := alltransports.ParseImageName("tarball:/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") +// // - or - +// // src, err := tarball.Transport.ParseReference("/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") +// if err != nil { +// panic(err) +// } +// updater, ok := src.(tarball.ConfigUpdater) +// if !ok { +// panic("unexpected: a tarball reference should implement tarball.ConfigUpdater") +// } +// config := imgspecv1.Image{ +// Config: imgspecv1.ImageConfig{ +// Cmd: []string{"/bin/bash"}, +// }, +// } +// annotations := make(map[string]string) +// annotations[imgspecv1.AnnotationDescription] = "test image built from a mock root cache" +// err = updater.ConfigUpdate(config, annotations) +// if err != nil { +// panic(err) +// } +// dest, err := alltransports.ParseImageName("docker-daemon:mock:latest") +// if err != nil { +// panic(err) +// } +// err = cp.Image(nil, dest, src, nil) +// if err != nil { +// panic(err) +// } +// } +package tarball diff --git a/vendor/github.com/containers/image/tarball/tarball_reference.go b/vendor/github.com/containers/image/tarball/tarball_reference.go new file mode 100644 index 00000000..4ccfb406 --- /dev/null +++ b/vendor/github.com/containers/image/tarball/tarball_reference.go @@ -0,0 +1,93 @@ +package tarball + +import ( + "fmt" + "os" + "strings" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/image" + "github.com/containers/image/types" + + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ConfigUpdater is an interface that ImageReferences for "tarball" images also +// implement. It can be used to set values for a configuration, and to set +// image annotations which will be present in the images returned by the +// reference's NewImage() or NewImageSource() methods. +type ConfigUpdater interface { + ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error +} + +type tarballReference struct { + transport types.ImageTransport + config imgspecv1.Image + annotations map[string]string + filenames []string + stdin []byte +} + +// ConfigUpdate updates the image's default configuration and adds annotations +// which will be visible in source images created using this reference. +func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error { + r.config = config + if r.annotations == nil { + r.annotations = make(map[string]string) + } + for k, v := range annotations { + r.annotations[k] = v + } + return nil +} + +func (r *tarballReference) Transport() types.ImageTransport { + return r.transport +} + +func (r *tarballReference) StringWithinTransport() string { + return strings.Join(r.filenames, ":") +} + +func (r *tarballReference) DockerReference() reference.Named { + return nil +} + +func (r *tarballReference) PolicyConfigurationIdentity() string { + return "" +} + +func (r *tarballReference) PolicyConfigurationNamespaces() []string { + return nil +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (r *tarballReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { + src, err := r.NewImageSource(ctx) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, src) + if err != nil { + src.Close() + return nil, err + } + return img, nil +} + +func (r *tarballReference) DeleteImage(ctx *types.SystemContext) error { + for _, filename := range r.filenames { + if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("error removing %q: %v", filename, err) + } + } + return nil +} + +func (r *tarballReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { + return nil, fmt.Errorf("destination not implemented yet") +} diff --git a/vendor/github.com/containers/image/tarball/tarball_src.go b/vendor/github.com/containers/image/tarball/tarball_src.go new file mode 100644 index 00000000..8b5b496d --- /dev/null +++ b/vendor/github.com/containers/image/tarball/tarball_src.go @@ -0,0 +1,260 @@ +package tarball + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "strings" + "time" + + "github.com/containers/image/types" + + digest "github.com/opencontainers/go-digest" + imgspecs "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type tarballImageSource struct { + reference tarballReference + filenames []string + diffIDs []digest.Digest + diffSizes []int64 + blobIDs []digest.Digest + blobSizes []int64 + blobTypes []string + config []byte + configID digest.Digest + configSize int64 + manifest []byte +} + +func (r *tarballReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { + // Gather up the digests, sizes, and date information for all of the files. + filenames := []string{} + diffIDs := []digest.Digest{} + diffSizes := []int64{} + blobIDs := []digest.Digest{} + blobSizes := []int64{} + blobTimes := []time.Time{} + blobTypes := []string{} + for _, filename := range r.filenames { + var file *os.File + var err error + var blobSize int64 + var blobTime time.Time + var reader io.Reader + if filename == "-" { + blobSize = int64(len(r.stdin)) + blobTime = time.Now() + reader = bytes.NewReader(r.stdin) + } else { + file, err = os.Open(filename) + if err != nil { + return nil, fmt.Errorf("error opening %q for reading: %v", filename, err) + } + defer file.Close() + reader = file + fileinfo, err := file.Stat() + if err != nil { + return nil, fmt.Errorf("error reading size of %q: %v", filename, err) + } + blobSize = fileinfo.Size() + blobTime = fileinfo.ModTime() + } + + // Default to assuming the layer is compressed. + layerType := imgspecv1.MediaTypeImageLayerGzip + + // Set up to digest the file as it is. + blobIDdigester := digest.Canonical.Digester() + reader = io.TeeReader(reader, blobIDdigester.Hash()) + + // Set up to digest the file after we maybe decompress it. + diffIDdigester := digest.Canonical.Digester() + uncompressed, err := gzip.NewReader(reader) + if err == nil { + // It is compressed, so the diffID is the digest of the uncompressed version + reader = io.TeeReader(uncompressed, diffIDdigester.Hash()) + } else { + // It is not compressed, so the diffID and the blobID are going to be the same + diffIDdigester = blobIDdigester + layerType = imgspecv1.MediaTypeImageLayer + uncompressed = nil + } + n, err := io.Copy(ioutil.Discard, reader) + if err != nil { + return nil, fmt.Errorf("error reading %q: %v", filename, err) + } + if uncompressed != nil { + uncompressed.Close() + } + + // Grab our uncompressed and possibly-compressed digests and sizes. + filenames = append(filenames, filename) + diffIDs = append(diffIDs, diffIDdigester.Digest()) + diffSizes = append(diffSizes, n) + blobIDs = append(blobIDs, blobIDdigester.Digest()) + blobSizes = append(blobSizes, blobSize) + blobTimes = append(blobTimes, blobTime) + blobTypes = append(blobTypes, layerType) + } + + // Build the rootfs and history for the configuration blob. + rootfs := imgspecv1.RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + created := time.Time{} + history := []imgspecv1.History{} + // Pick up the layer comment from the configuration's history list, if one is set. + comment := "imported from tarball" + if len(r.config.History) > 0 && r.config.History[0].Comment != "" { + comment = r.config.History[0].Comment + } + for i := range diffIDs { + createdBy := fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffIDs[i].Hex(), os.PathSeparator) + history = append(history, imgspecv1.History{ + Created: &blobTimes[i], + CreatedBy: createdBy, + Comment: comment, + }) + // Use the mtime of the most recently modified file as the image's creation time. + if created.Before(blobTimes[i]) { + created = blobTimes[i] + } + } + + // Pick up other defaults from the config in the reference. + config := r.config + if config.Created == nil { + config.Created = &created + } + if config.Architecture == "" { + config.Architecture = runtime.GOARCH + } + if config.OS == "" { + config.OS = runtime.GOOS + } + config.RootFS = rootfs + config.History = history + + // Encode and digest the image configuration blob. + configBytes, err := json.Marshal(&config) + if err != nil { + return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err) + } + configID := digest.Canonical.FromBytes(configBytes) + configSize := int64(len(configBytes)) + + // Populate a manifest with the configuration blob and the file as the single layer. + layerDescriptors := []imgspecv1.Descriptor{} + for i := range blobIDs { + layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ + Digest: blobIDs[i], + Size: blobSizes[i], + MediaType: blobTypes[i], + }) + } + annotations := make(map[string]string) + for k, v := range r.annotations { + annotations[k] = v + } + manifest := imgspecv1.Manifest{ + Versioned: imgspecs.Versioned{ + SchemaVersion: 2, + }, + Config: imgspecv1.Descriptor{ + Digest: configID, + Size: configSize, + MediaType: imgspecv1.MediaTypeImageConfig, + }, + Layers: layerDescriptors, + Annotations: annotations, + } + + // Encode the manifest. + manifestBytes, err := json.Marshal(&manifest) + if err != nil { + return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err) + } + + // Return the image. + src := &tarballImageSource{ + reference: *r, + filenames: filenames, + diffIDs: diffIDs, + diffSizes: diffSizes, + blobIDs: blobIDs, + blobSizes: blobSizes, + blobTypes: blobTypes, + config: configBytes, + configID: configID, + configSize: configSize, + manifest: manifestBytes, + } + + return src, nil +} + +func (is *tarballImageSource) Close() error { + return nil +} + +func (is *tarballImageSource) GetBlob(blobinfo types.BlobInfo) (io.ReadCloser, int64, error) { + // We should only be asked about things in the manifest. Maybe the configuration blob. + if blobinfo.Digest == is.configID { + return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil + } + // Maybe one of the layer blobs. + for i := range is.blobIDs { + if blobinfo.Digest == is.blobIDs[i] { + // We want to read that layer: open the file or memory block and hand it back. + if is.filenames[i] == "-" { + return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil + } + reader, err := os.Open(is.filenames[i]) + if err != nil { + return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) + } + return reader, is.blobSizes[i], nil + } + } + return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (is *tarballImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", fmt.Errorf("manifest lists are not supported by the %q transport", transportName) + } + return is.manifest, imgspecv1.MediaTypeImageManifest, nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName) + } + return nil, nil +} + +func (is *tarballImageSource) Reference() types.ImageReference { + return &is.reference +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (*tarballImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/tarball/tarball_transport.go b/vendor/github.com/containers/image/tarball/tarball_transport.go new file mode 100644 index 00000000..72558b5e --- /dev/null +++ b/vendor/github.com/containers/image/tarball/tarball_transport.go @@ -0,0 +1,66 @@ +package tarball + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/containers/image/transports" + "github.com/containers/image/types" +) + +const ( + transportName = "tarball" + separator = ":" +) + +var ( + // Transport implements the types.ImageTransport interface for "tarball:" images, + // which are makeshift images constructed using one or more possibly-compressed tar + // archives. + Transport = &tarballTransport{} +) + +type tarballTransport struct { +} + +func (t *tarballTransport) Name() string { + return transportName +} + +func (t *tarballTransport) ParseReference(reference string) (types.ImageReference, error) { + var stdin []byte + var err error + filenames := strings.Split(reference, separator) + for _, filename := range filenames { + if filename == "-" { + stdin, err = ioutil.ReadAll(os.Stdin) + if err != nil { + return nil, fmt.Errorf("error buffering stdin: %v", err) + } + continue + } + f, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("error opening %q: %v", filename, err) + } + f.Close() + } + ref := &tarballReference{ + transport: t, + filenames: filenames, + stdin: stdin, + } + return ref, nil +} + +func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error { + // See the explanation in daemonReference.PolicyConfigurationIdentity. + return errors.New(`tarball: does not support any scopes except the default "" one`) +} + +func init() { + transports.Register(Transport) +} diff --git a/vendor/github.com/containers/image/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/transports/alltransports/alltransports.go index 4279b9d2..b4552df6 100644 --- a/vendor/github.com/containers/image/transports/alltransports/alltransports.go +++ b/vendor/github.com/containers/image/transports/alltransports/alltransports.go @@ -13,8 +13,9 @@ import ( _ "github.com/containers/image/oci/archive" _ "github.com/containers/image/oci/layout" _ "github.com/containers/image/openshift" + _ "github.com/containers/image/tarball" // The ostree transport is registered by ostree*.go - _ "github.com/containers/image/storage" + // The storage transport is registered by storage*.go "github.com/containers/image/transports" "github.com/containers/image/types" "github.com/pkg/errors" diff --git a/vendor/github.com/containers/image/transports/alltransports/storage.go b/vendor/github.com/containers/image/transports/alltransports/storage.go new file mode 100644 index 00000000..a867c664 --- /dev/null +++ b/vendor/github.com/containers/image/transports/alltransports/storage.go @@ -0,0 +1,8 @@ +// +build !containers_image_storage_stub + +package alltransports + +import ( + // Register the storage transport + _ "github.com/containers/image/storage" +) diff --git a/vendor/github.com/containers/image/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/transports/alltransports/storage_stub.go new file mode 100644 index 00000000..4ac684e5 --- /dev/null +++ b/vendor/github.com/containers/image/transports/alltransports/storage_stub.go @@ -0,0 +1,9 @@ +// +build containers_image_storage_stub + +package alltransports + +import "github.com/containers/image/transports" + +func init() { + transports.Register(transports.NewStubTransport("containers-storage")) +} diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go index 4ede907b..2e9c7105 100644 --- a/vendor/github.com/containers/image/types/types.go +++ b/vendor/github.com/containers/image/types/types.go @@ -73,11 +73,12 @@ type ImageReference interface { // and each following element to be a prefix of the element preceding it. PolicyConfigurationNamespaces() []string - // NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. - // The caller must call .Close() on the returned Image. + // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. + // The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. - NewImage(ctx *SystemContext) (Image, error) + // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. + NewImage(ctx *SystemContext) (ImageCloser, error) // NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. NewImageSource(ctx *SystemContext) (ImageSource, error) @@ -96,9 +97,10 @@ type BlobInfo struct { Size int64 // -1 if unknown URLs []string Annotations map[string]string + MediaType string } -// ImageSource is a service, possibly remote (= slow), to download components of a single image. +// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list). // This is primarily useful for copying images around; for examining their properties, Image (below) // is usually more useful. // Each ImageSource should eventually be closed by calling Close(). @@ -113,15 +115,21 @@ type ImageSource interface { Close() error // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. - GetManifest() ([]byte, string, error) - // GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest - // out of a manifest list. - GetTargetManifest(digest digest.Digest) ([]byte, string, error) + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); + // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). + GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). - // The Digest field in BlobInfo is guaranteed to be provided; Size may be -1. + // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. GetBlob(BlobInfo) (io.ReadCloser, int64, error) // GetSignatures returns the image's signatures. It may use a remote (= slow) service. - GetSignatures(context.Context) ([][]byte, error) + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy() []BlobInfo } // ImageDestination is a service, possibly remote (= slow), to store components of a single image. @@ -153,9 +161,10 @@ type ImageDestination interface { AcceptsForeignLayerURLs() bool // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. MustMatchRuntimeOS() bool - // PutBlob writes contents of stream and returns data representing the result (with all data filled in). + // PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. + // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. @@ -194,28 +203,35 @@ func (e ManifestTypeRejectedError) Error() string { // Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, // allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. // This also makes the UnparsedImage→Image conversion an explicitly visible step. -// Each UnparsedImage should eventually be closed by calling Close(). +// +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. type UnparsedImage interface { // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. Reference() ImageReference - // Close removes resources associated with an initialized UnparsedImage, if any. - Close() error // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. Manifest() ([]byte, string, error) // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. Signatures(ctx context.Context) ([][]byte, error) + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy() []BlobInfo } // Image is the primary API for inspecting properties of images. -// Each Image should eventually be closed by calling Close(). +// An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The Image must not be used after the underlying ImageSource is Close()d. type Image interface { // Note that Reference may return nil in the return value of UpdatedImage! UnparsedImage // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. ConfigInfo() BlobInfo - // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. + // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. // The result is cached; it is OK to call this however often you need. ConfigBlob() ([]byte, error) // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about @@ -223,7 +239,7 @@ type Image interface { // old image manifests work (docker v2s1 especially). OCIConfig() (*v1.Image, error) // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided; Size may be -1. + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // WARNING: The list may contain duplicates, and they are semantically relevant. LayerInfos() []BlobInfo // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -240,16 +256,23 @@ type Image interface { // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. // This does not change the state of the original Image object. UpdatedImage(options ManifestUpdateOptions) (Image, error) - // IsMultiImage returns true if the image's manifest is a list of images, false otherwise. - IsMultiImage() bool // Size returns an approximation of the amount of disk space which is consumed by the image in its current // location. If the size is not known, -1 will be returned. Size() (int64, error) } +// ImageCloser is an Image with a Close() method which must be called by the user. +// This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource, +// to ensure that the ImageSource is closed. +type ImageCloser interface { + Image + // Close removes resources associated with an initialized ImageCloser. + Close() error +} + // ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest type ManifestUpdateOptions struct { - LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls) which should replace the originals, in order (the root layer first, and then successive layered layers) + LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. EmbeddedDockerReference reference.Named ManifestMIMEType string // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. @@ -283,7 +306,7 @@ type DockerAuthConfig struct { Password string } -// SystemContext allows parametrizing access to implicitly-accessed resources, +// SystemContext allows parameterizing access to implicitly-accessed resources, // like configuration files in /etc and users' login state in their home directory. // Various components can share the same field only if their semantics is exactly // the same; if in doubt, add a new field. @@ -306,6 +329,10 @@ type SystemContext struct { SystemRegistriesConfPath string // If not "", overrides the default path for the authentication file AuthFilePath string + // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. + ArchitectureChoice string + // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. + OSChoice string // === OCI.Transport overrides === // If not "", a directory containing a CA certificate (ending with ".crt"), @@ -314,6 +341,8 @@ type SystemContext struct { OCICertPath string // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. OCIInsecureSkipTLSVerify bool + // If not "", use a shared directory for storing blobs rather than within OCI layouts + OCISharedBlobDirPath string // === docker.Transport overrides === // If not "", a directory containing a CA certificate (ending with ".crt"), @@ -322,8 +351,9 @@ type SystemContext struct { DockerCertPath string // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. // Ignored if DockerCertPath is non-empty. - DockerPerHostCertDirPath string - DockerInsecureSkipTLSVerify bool // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + DockerPerHostCertDirPath string + // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + DockerInsecureSkipTLSVerify bool // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials DockerAuthConfig *DockerAuthConfig // if not "", an User-Agent header is added to each request when contacting a registry. @@ -334,6 +364,20 @@ type SystemContext struct { DockerDisableV1Ping bool // Directory to use for OSTree temporary files OSTreeTmpDirPath string + + // === docker/daemon.Transport overrides === + // A directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client certificate key + // (ending with ".key") used when talking to a Docker daemon. + DockerDaemonCertPath string + // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed. + DockerDaemonHost string + // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well. + DockerDaemonInsecureSkipTLSVerify bool + + // === dir.Transport overrides === + // DirForceCompress compresses the image layers if set to true + DirForceCompress bool } // ProgressProperties is used to pass information from the copy code to a monitor which diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf index d5bae3b0..f3634b38 100644 --- a/vendor/github.com/containers/image/vendor.conf +++ b/vendor/github.com/containers/image/vendor.conf @@ -1,5 +1,5 @@ github.com/sirupsen/logrus v1.0.0 -github.com/containers/storage 47536c89fcc545a87745e1a1573addc439409165 +github.com/containers/storage master github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 @@ -22,7 +22,7 @@ github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9 github.com/pkg/errors 248dadf4e9068a0b3e79f02ed0a610d935de5302 github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 -github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721 +github.com/vbatts/tar-split v0.10.2 golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8 golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08 @@ -36,4 +36,5 @@ github.com/tchap/go-patricia v2.2.6 github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0 github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460 -github.com/gogo/protobuf/proto fcdc5011193ff531a548e9b0301828d5a5b97fd8 +github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8 +github.com/pquerna/ffjson master diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index c6eb9eaf..5631e31c 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -10,6 +10,8 @@ import ( "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/truncindex" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" ) // A Container is a reference to a read-write layer with metadata. @@ -44,6 +46,10 @@ type Container struct { // that has been stored, if they're known. BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` + // BigDataDigests maps the names in BigDataNames to the digests of the + // data that has been stored, if they're known. + BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"` + // Created is the datestamp for when this container was created. Older // versions of the library did not track this information, so callers // will likely want to use the IsZero() method to verify that a value @@ -133,6 +139,7 @@ func (r *containerStore) Load() error { ids := make(map[string]*Container) names := make(map[string]*Container) if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil { + idlist = make([]string, 0, len(containers)) for n, container := range containers { idlist = append(idlist, container.ID) ids[container.ID] = containers[n] @@ -223,6 +230,9 @@ func (r *containerStore) SetFlag(id string, flag string, value interface{}) erro if !ok { return ErrContainerUnknown } + if container.Flags == nil { + container.Flags = make(map[string]interface{}) + } container.Flags[flag] = value return r.Save() } @@ -247,15 +257,16 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat } if err == nil { container = &Container{ - ID: id, - Names: names, - ImageID: image, - LayerID: layer, - Metadata: metadata, - BigDataNames: []string{}, - BigDataSizes: make(map[string]int64), - Created: time.Now().UTC(), - Flags: make(map[string]interface{}), + ID: id, + Names: names, + ImageID: image, + LayerID: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: time.Now().UTC(), + Flags: make(map[string]interface{}), } r.containers = append(r.containers, container) r.byid[id] = container @@ -362,6 +373,9 @@ func (r *containerStore) Exists(id string) bool { } func (r *containerStore) BigData(id, key string) ([]byte, error) { + if key == "" { + return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve container big data value for empty name") + } c, ok := r.lookup(id) if !ok { return nil, ErrContainerUnknown @@ -370,16 +384,61 @@ func (r *containerStore) BigData(id, key string) ([]byte, error) { } func (r *containerStore) BigDataSize(id, key string) (int64, error) { + if key == "" { + return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of container big data with empty name") + } c, ok := r.lookup(id) if !ok { return -1, ErrContainerUnknown } + if c.BigDataSizes == nil { + c.BigDataSizes = make(map[string]int64) + } if size, ok := c.BigDataSizes[key]; ok { return size, nil } + if data, err := r.BigData(id, key); err == nil && data != nil { + if r.SetBigData(id, key, data) == nil { + c, ok := r.lookup(id) + if !ok { + return -1, ErrContainerUnknown + } + if size, ok := c.BigDataSizes[key]; ok { + return size, nil + } + } + } return -1, ErrSizeUnknown } +func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) { + if key == "" { + return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of container big data value with empty name") + } + c, ok := r.lookup(id) + if !ok { + return "", ErrContainerUnknown + } + if c.BigDataDigests == nil { + c.BigDataDigests = make(map[string]digest.Digest) + } + if d, ok := c.BigDataDigests[key]; ok { + return d, nil + } + if data, err := r.BigData(id, key); err == nil && data != nil { + if r.SetBigData(id, key, data) == nil { + c, ok := r.lookup(id) + if !ok { + return "", ErrContainerUnknown + } + if d, ok := c.BigDataDigests[key]; ok { + return d, nil + } + } + } + return "", ErrDigestUnknown +} + func (r *containerStore) BigDataNames(id string) ([]string, error) { c, ok := r.lookup(id) if !ok { @@ -389,6 +448,9 @@ func (r *containerStore) BigDataNames(id string) ([]string, error) { } func (r *containerStore) SetBigData(id, key string, data []byte) error { + if key == "" { + return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for container big data item") + } c, ok := r.lookup(id) if !ok { return ErrContainerUnknown @@ -399,19 +461,28 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error { err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600) if err == nil { save := false - oldSize, ok := c.BigDataSizes[key] + if c.BigDataSizes == nil { + c.BigDataSizes = make(map[string]int64) + } + oldSize, sizeOk := c.BigDataSizes[key] c.BigDataSizes[key] = int64(len(data)) - if !ok || oldSize != c.BigDataSizes[key] { + if c.BigDataDigests == nil { + c.BigDataDigests = make(map[string]digest.Digest) + } + oldDigest, digestOk := c.BigDataDigests[key] + newDigest := digest.Canonical.FromBytes(data) + c.BigDataDigests[key] = newDigest + if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest { save = true } - add := true + addName := true for _, name := range c.BigDataNames { if name == key { - add = false + addName = false break } } - if add { + if addName { c.BigDataNames = append(c.BigDataNames, key) save = true } @@ -423,7 +494,7 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error { } func (r *containerStore) Wipe() error { - ids := []string{} + ids := make([]string, 0, len(r.byid)) for id := range r.byid { ids = append(ids, id) } diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go new file mode 100644 index 00000000..95261980 --- /dev/null +++ b/vendor/github.com/containers/storage/containers_ffjson.go @@ -0,0 +1,1194 @@ +// Code generated by ffjson . DO NOT EDIT. +// source: containers.go + +package storage + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/opencontainers/go-digest" + fflib "github.com/pquerna/ffjson/fflib/v1" +) + +// MarshalJSON marshal bytes to json - template +func (j *Container) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *Container) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "id":`) + fflib.WriteJsonString(buf, string(j.ID)) + buf.WriteByte(',') + if len(j.Names) != 0 { + buf.WriteString(`"names":`) + if j.Names != nil { + buf.WriteString(`[`) + for i, v := range j.Names { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + buf.WriteString(`"image":`) + fflib.WriteJsonString(buf, string(j.ImageID)) + buf.WriteString(`,"layer":`) + fflib.WriteJsonString(buf, string(j.LayerID)) + buf.WriteByte(',') + if len(j.Metadata) != 0 { + buf.WriteString(`"metadata":`) + fflib.WriteJsonString(buf, string(j.Metadata)) + buf.WriteByte(',') + } + if len(j.BigDataNames) != 0 { + buf.WriteString(`"big-data-names":`) + if j.BigDataNames != nil { + buf.WriteString(`[`) + for i, v := range j.BigDataNames { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if len(j.BigDataSizes) != 0 { + if j.BigDataSizes == nil { + buf.WriteString(`"big-data-sizes":null`) + } else { + buf.WriteString(`"big-data-sizes":{ `) + for key, value := range j.BigDataSizes { + fflib.WriteJsonString(buf, key) + buf.WriteString(`:`) + fflib.FormatBits2(buf, uint64(value), 10, value < 0) + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + } + buf.WriteByte(',') + } + if len(j.BigDataDigests) != 0 { + if j.BigDataDigests == nil { + buf.WriteString(`"big-data-digests":null`) + } else { + buf.WriteString(`"big-data-digests":{ `) + for key, value := range j.BigDataDigests { + fflib.WriteJsonString(buf, key) + buf.WriteString(`:`) + fflib.WriteJsonString(buf, string(value)) + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + } + buf.WriteByte(',') + } + if true { + buf.WriteString(`"created":`) + + { + + obj, err = j.Created.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + if len(j.Flags) != 0 { + buf.WriteString(`"flags":`) + /* Falling back. type=map[string]interface {} kind=map */ + err = buf.Encode(j.Flags) + if err != nil { + return err + } + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffjtContainerbase = iota + ffjtContainernosuchkey + + ffjtContainerID + + ffjtContainerNames + + ffjtContainerImageID + + ffjtContainerLayerID + + ffjtContainerMetadata + + ffjtContainerBigDataNames + + ffjtContainerBigDataSizes + + ffjtContainerBigDataDigests + + ffjtContainerCreated + + ffjtContainerFlags +) + +var ffjKeyContainerID = []byte("id") + +var ffjKeyContainerNames = []byte("names") + +var ffjKeyContainerImageID = []byte("image") + +var ffjKeyContainerLayerID = []byte("layer") + +var ffjKeyContainerMetadata = []byte("metadata") + +var ffjKeyContainerBigDataNames = []byte("big-data-names") + +var ffjKeyContainerBigDataSizes = []byte("big-data-sizes") + +var ffjKeyContainerBigDataDigests = []byte("big-data-digests") + +var ffjKeyContainerCreated = []byte("created") + +var ffjKeyContainerFlags = []byte("flags") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *Container) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *Container) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtContainerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtContainernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'b': + + if bytes.Equal(ffjKeyContainerBigDataNames, kn) { + currentKey = ffjtContainerBigDataNames + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyContainerBigDataSizes, kn) { + currentKey = ffjtContainerBigDataSizes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyContainerBigDataDigests, kn) { + currentKey = ffjtContainerBigDataDigests + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'c': + + if bytes.Equal(ffjKeyContainerCreated, kn) { + currentKey = ffjtContainerCreated + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffjKeyContainerFlags, kn) { + currentKey = ffjtContainerFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffjKeyContainerID, kn) { + currentKey = ffjtContainerID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyContainerImageID, kn) { + currentKey = ffjtContainerImageID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffjKeyContainerLayerID, kn) { + currentKey = ffjtContainerLayerID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffjKeyContainerMetadata, kn) { + currentKey = ffjtContainerMetadata + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffjKeyContainerNames, kn) { + currentKey = ffjtContainerNames + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffjKeyContainerFlags, kn) { + currentKey = ffjtContainerFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyContainerCreated, kn) { + currentKey = ffjtContainerCreated + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyContainerBigDataDigests, kn) { + currentKey = ffjtContainerBigDataDigests + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyContainerBigDataSizes, kn) { + currentKey = ffjtContainerBigDataSizes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyContainerBigDataNames, kn) { + currentKey = ffjtContainerBigDataNames + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyContainerMetadata, kn) { + currentKey = ffjtContainerMetadata + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyContainerLayerID, kn) { + currentKey = ffjtContainerLayerID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyContainerImageID, kn) { + currentKey = ffjtContainerImageID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyContainerNames, kn) { + currentKey = ffjtContainerNames + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyContainerID, kn) { + currentKey = ffjtContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtContainernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtContainerID: + goto handle_ID + + case ffjtContainerNames: + goto handle_Names + + case ffjtContainerImageID: + goto handle_ImageID + + case ffjtContainerLayerID: + goto handle_LayerID + + case ffjtContainerMetadata: + goto handle_Metadata + + case ffjtContainerBigDataNames: + goto handle_BigDataNames + + case ffjtContainerBigDataSizes: + goto handle_BigDataSizes + + case ffjtContainerBigDataDigests: + goto handle_BigDataDigests + + case ffjtContainerCreated: + goto handle_Created + + case ffjtContainerFlags: + goto handle_Flags + + case ffjtContainernosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ID: + + /* handler: j.ID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.ID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Names: + + /* handler: j.Names type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.Names = nil + } else { + + j.Names = []string{} + + wantVal := true + + for { + + var tmpJNames string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJNames type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJNames = string(string(outBuf)) + + } + } + + j.Names = append(j.Names, tmpJNames) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ImageID: + + /* handler: j.ImageID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.ImageID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_LayerID: + + /* handler: j.LayerID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.LayerID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Metadata: + + /* handler: j.Metadata type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.Metadata = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BigDataNames: + + /* handler: j.BigDataNames type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.BigDataNames = nil + } else { + + j.BigDataNames = []string{} + + wantVal := true + + for { + + var tmpJBigDataNames string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJBigDataNames type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJBigDataNames = string(string(outBuf)) + + } + } + + j.BigDataNames = append(j.BigDataNames, tmpJBigDataNames) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BigDataSizes: + + /* handler: j.BigDataSizes type=map[string]int64 kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.BigDataSizes = nil + } else { + + j.BigDataSizes = make(map[string]int64, 0) + + wantVal := true + + for { + + var k string + + var tmpJBigDataSizes int64 + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJBigDataSizes type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + tmpJBigDataSizes = int64(tval) + + } + } + + j.BigDataSizes[k] = tmpJBigDataSizes + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BigDataDigests: + + /* handler: j.BigDataDigests type=map[string]digest.Digest kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.BigDataDigests = nil + } else { + + j.BigDataDigests = make(map[string]digest.Digest, 0) + + wantVal := true + + for { + + var k string + + var tmpJBigDataDigests digest.Digest + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJBigDataDigests type=digest.Digest kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJBigDataDigests = digest.Digest(string(outBuf)) + + } + } + + j.BigDataDigests[k] = tmpJBigDataDigests + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Created: + + /* handler: j.Created type=time.Time kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + } else { + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = j.Created.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Flags: + + /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.Flags = nil + } else { + + j.Flags = make(map[string]interface{}, 0) + + wantVal := true + + for { + + var k string + + var tmpJFlags interface{} + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/ + + { + /* Falling back. type=interface {} kind=interface */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJFlags) + if err != nil { + return fs.WrapErr(err) + } + } + + j.Flags[k] = tmpJFlags + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *containerStore) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *containerStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{}`) + return nil +} + +const ( + ffjtcontainerStorebase = iota + ffjtcontainerStorenosuchkey +) + +// UnmarshalJSON umarshall json - template of ffjson +func (j *containerStore) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *containerStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtcontainerStorebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtcontainerStorenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + } + + currentKey = ffjtcontainerStorenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtcontainerStorenosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go index e94947d7..48a1f078 100644 --- a/vendor/github.com/containers/storage/drivers/fsdiff.go +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -94,7 +94,7 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err // are extracted from tar's with full second precision on modified time. // We need this hack here to make sure calls within same second receive // correct result. - time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second))) + time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) return err }), nil } diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go index 2f6ea14f..2a096edf 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/check.go +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -8,6 +8,7 @@ import ( "os" "path" "path/filepath" + "syscall" "github.com/containers/storage/pkg/system" "github.com/pkg/errors" @@ -15,10 +16,11 @@ import ( "golang.org/x/sys/unix" ) -// hasOpaqueCopyUpBug checks whether the filesystem has a bug +// doesSupportNativeDiff checks whether the filesystem has a bug // which copies up the opaque flag when copying up an opaque -// directory. When this bug exists naive diff should be used. -func hasOpaqueCopyUpBug(d string) error { +// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR. +// When these exist naive diff should be used. +func doesSupportNativeDiff(d string) error { td, err := ioutil.TempDir(d, "opaque-bug-check") if err != nil { return err @@ -29,10 +31,13 @@ func hasOpaqueCopyUpBug(d string) error { } }() - // Make directories l1/d, l2/d, l3, work, merged + // Make directories l1/d, l1/d1, l2/d, l3, work, merged if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil { return err } + if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil { + return err + } if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil { return err } @@ -75,5 +80,23 @@ func hasOpaqueCopyUpBug(d string) error { return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix") } + // rename "d1" to "d2" + if err := os.Rename(filepath.Join(td, "merged", "d1"), filepath.Join(td, "merged", "d2")); err != nil { + // if rename failed with syscall.EXDEV, the kernel doesn't have CONFIG_OVERLAY_FS_REDIRECT_DIR enabled + if err.(*os.LinkError).Err == syscall.EXDEV { + return nil + } + return errors.Wrap(err, "failed to rename dir in merged directory") + } + // get the xattr of "d2" + xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), "trusted.overlay.redirect") + if err != nil { + return errors.Wrap(err, "failed to read redirect flag on upper layer") + } + + if string(xattrRedirect) == "d1" { + return errors.New("kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled") + } + return nil } diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go index 7e20c348..feb03959 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/mount.go +++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go @@ -49,7 +49,6 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e output := bytes.NewBuffer(nil) cmd.Stdout = output cmd.Stderr = output - if err := cmd.Start(); err != nil { w.Close() return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 29ec7b73..4974a94e 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -3,7 +3,6 @@ package overlay import ( - "bufio" "fmt" "io" "io/ioutil" @@ -26,7 +25,6 @@ import ( "github.com/containers/storage/pkg/locker" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" - "github.com/containers/storage/pkg/parsers/kernel" "github.com/containers/storage/pkg/system" units "github.com/docker/go-units" "github.com/opencontainers/selinux/go-selinux/label" @@ -124,22 +122,6 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap return nil, err } - if err := supportsOverlay(); err != nil { - return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support overlay fs") - } - - // require kernel 4.0.0 to ensure multiple lower dirs are supported - v, err := kernel.GetKernelVersion() - if err != nil { - return nil, err - } - if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 { - if !opts.overrideKernelCheck { - return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay") - } - logrus.Warn("Using pre-4.0.0 kernel for overlay, mount failures may require kernel update") - } - fsMagic, err := graphdriver.GetFSMagic(home) if err != nil { return nil, err @@ -153,22 +135,18 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: logrus.Errorf("'overlay' is not supported over %s", backingFs) return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s", backingFs) - case graphdriver.FsMagicBtrfs: - // Support for OverlayFS on BTRFS was added in kernel 4.7 - // See https://btrfs.wiki.kernel.org/index.php/Changelog - if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 7, Minor: 0}) < 0 { - if !opts.overrideKernelCheck { - logrus.Errorf("'overlay' requires kernel 4.7 to use on %s", backingFs) - return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' requires kernel 4.7 to use on %s", backingFs) - } - logrus.Warn("Using pre-4.7.0 kernel for overlay on btrfs, may require kernel update") - } } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) if err != nil { return nil, err } + + supportsDType, err := supportsOverlay(home, fsMagic, rootUID, rootGID) + if err != nil { + return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support overlay fs") + } + // Create the driver home dir if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { return nil, err @@ -178,16 +156,6 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap return nil, err } - supportsDType, err := fsutils.SupportsDType(home) - if err != nil { - return nil, err - } - if !supportsDType { - logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs)) - // TODO: Will make fatal when CRI-O Has AMI built on RHEL7.4 - // return nil, overlayutils.ErrDTypeNotSupported("overlay", backingFs) - } - d := &Driver{ name: "overlay", home: home, @@ -210,10 +178,10 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay.size is used. - return nil, fmt.Errorf("Storage Option overlay.size only supported for backingFS XFS. Found %v", backingFs) + return nil, fmt.Errorf("Storage option overlay.size only supported for backingFS XFS. Found %v", backingFs) } - logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported) + logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported) return d, nil } @@ -227,20 +195,20 @@ func parseOptions(options []string) (*overlayOptions, error) { } key = strings.ToLower(key) switch key { - case "overlay.override_kernel_check", "overlay2.override_kernel_check": - logrus.Debugf("overlay: overide_kernelcheck=%s", val) + case ".override_kernel_check", "overlay.override_kernel_check", "overlay2.override_kernel_check": + logrus.Debugf("overlay: override_kernelcheck=%s", val) o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } - case "overlay.size", "overlay2.size": + case ".size", "overlay.size", "overlay2.size": logrus.Debugf("overlay: size=%s", val) size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) - case "overlay.imagestore", "overlay2.imagestore": + case ".imagestore", "overlay.imagestore", "overlay2.imagestore": logrus.Debugf("overlay: imagestore=%s", val) // Additional read only image stores to use for lower paths for _, store := range strings.Split(val, ",") { @@ -264,31 +232,65 @@ func parseOptions(options []string) (*overlayOptions, error) { return o, nil } -func supportsOverlay() error { - // We can try to modprobe overlay first before looking at - // proc/filesystems for when overlay is supported +func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) { + // We can try to modprobe overlay first exec.Command("modprobe", "overlay").Run() - f, err := os.Open("/proc/filesystems") - if err != nil { - return err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if s.Text() == "nodev\toverlay" { - return nil + layerDir, err := ioutil.TempDir(home, "compat") + if err == nil { + // Check if reading the directory's contents populates the d_type field, which is required + // for proper operation of the overlay filesystem. + supportsDType, err = fsutils.SupportsDType(layerDir) + if err != nil { + return false, err } + if !supportsDType { + logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs)) + // TODO: Will make fatal when CRI-O Has AMI built on RHEL7.4 + // return nil, overlayutils.ErrDTypeNotSupported("overlay", backingFs) + } + + // Try a test mount in the specific location we're looking at using. + mergedDir := filepath.Join(layerDir, "merged") + lower1Dir := filepath.Join(layerDir, "lower1") + lower2Dir := filepath.Join(layerDir, "lower2") + defer func() { + // Permitted to fail, since the various subdirectories + // can be empty or not even there, and the home might + // legitimately be not empty + _ = unix.Unmount(mergedDir, unix.MNT_DETACH) + _ = os.RemoveAll(layerDir) + _ = os.Remove(home) + }() + _ = idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(lower1Dir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID) + flags := fmt.Sprintf("lowerdir=%s:%s", lower1Dir, lower2Dir) + if len(flags) < unix.Getpagesize() { + if mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) == nil { + logrus.Debugf("overlay test mount with multiple lowers succeeded") + return supportsDType, nil + } + } + flags = fmt.Sprintf("lowerdir=%s", lower1Dir) + if len(flags) < unix.Getpagesize() { + if mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) == nil { + logrus.Errorf("overlay test mount with multiple lowers failed, but succeeded with a single lower") + return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay") + } + } + logrus.Errorf("'overlay' is not supported over %s at %q", backingFs, home) + return supportsDType, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s at %q", backingFs, home) } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") - return errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") } func useNaiveDiff(home string) bool { useNaiveDiffLock.Do(func() { - if err := hasOpaqueCopyUpBug(home); err != nil { - logrus.Warnf("Not using native diff for overlay: %v", err) + if err := doesSupportNativeDiff(home); err != nil { + logrus.Warnf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err) useNaiveDiffOnly = true } }) @@ -650,12 +652,22 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return err + } mountpoint := path.Join(d.dir(id), "merged") if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } - err := unix.Unmount(mountpoint, unix.MNT_DETACH) - if err != nil { + if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil { + // If no lower, we used the diff directory, so no work to do + if os.IsNotExist(err) { + return nil + } + return err + } + if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } return nil diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index d9e24e2e..ae62207d 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/chrootarchive" @@ -25,13 +26,23 @@ func init() { // This sets the home directory for the driver and returns NaiveDiffDriver. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { d := &Driver{ - home: home, + homes: []string{home}, idMappings: idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), } rootIDs := d.idMappings.RootPair() if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil { return nil, err } + for _, option := range options { + if strings.HasPrefix(option, "vfs.imagestore=") { + d.homes = append(d.homes, strings.Split(option[15:], ",")...) + continue + } + if strings.HasPrefix(option, ".imagestore=") { + d.homes = append(d.homes, strings.Split(option[12:], ",")...) + continue + } + } return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil } @@ -40,7 +51,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap // In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. // Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver type Driver struct { - home string + homes []string idMappings *idtools.IDMappings } @@ -98,7 +109,17 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { } func (d *Driver) dir(id string) string { - return filepath.Join(d.home, "dir", filepath.Base(id)) + for i, home := range d.homes { + if i > 0 { + home = filepath.Join(home, d.String()) + } + candidate := filepath.Join(home, "dir", filepath.Base(id)) + fi, err := os.Stat(candidate) + if err == nil && fi.IsDir() { + return candidate + } + } + return filepath.Join(d.homes[0], "dir", filepath.Base(id)) } // Remove deletes the content from the directory for a given id. @@ -132,5 +153,8 @@ func (d *Driver) Exists(id string) bool { // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { + if len(d.homes) > 1 { + return d.homes[1:] + } return nil } diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go index 3242886f..bed6f8cd 100644 --- a/vendor/github.com/containers/storage/errors.go +++ b/vendor/github.com/containers/storage/errors.go @@ -49,4 +49,8 @@ var ( ErrDuplicateImageNames = errors.New("read-only image store assigns the same name to multiple images") // ErrDuplicateLayerNames indicates that the read-only store uses the same name for multiple layers. ErrDuplicateLayerNames = errors.New("read-only layer store assigns the same name to multiple layers") + // ErrInvalidBigDataName indicates that the name for a big data item is not acceptable; it may be empty. + ErrInvalidBigDataName = errors.New("not a valid name for a big data item") + // ErrDigestUnknown indicates that we were unable to compute the digest of a specified item. + ErrDigestUnknown = errors.New("could not compute digest of item") ) diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index b2129b42..962e1bb7 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -10,15 +10,26 @@ import ( "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/truncindex" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) +const ( + // ImageDigestBigDataKey is the name of the big data item whose + // contents we consider useful for computing a "digest" of the + // image, by which we can locate the image later. + ImageDigestBigDataKey = "manifest" +) + // An Image is a reference to a layer and an associated metadata string. type Image struct { // ID is either one which was specified at create-time, or a random // value which was generated by the library. ID string `json:"id"` + // Digest is a digest value that we can use to locate the image. + Digest digest.Digest `json:"digest,omitempty"` + // Names is an optional set of user-defined convenience values. The // image can be referred to by its ID or any of its names. Names are // unique among images. @@ -27,7 +38,7 @@ type Image struct { // TopLayer is the ID of the topmost layer of the image itself, if the // image contains one or more layers. Multiple images can refer to the // same top layer. - TopLayer string `json:"layer"` + TopLayer string `json:"layer,omitempty"` // Metadata is data we keep for the convenience of the caller. It is not // expected to be large, since it is kept in memory. @@ -42,6 +53,10 @@ type Image struct { // that has been stored, if they're known. BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` + // BigDataDigests maps the names in BigDataNames to the digests of the + // data that has been stored, if they're known. + BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"` + // Created is the datestamp for when this image was created. Older // versions of the library did not track this information, so callers // will likely want to use the IsZero() method to verify that a value @@ -69,6 +84,10 @@ type ROImageStore interface { // Images returns a slice enumerating the known images. Images() ([]Image, error) + + // Images returns a slice enumerating the images which have a big data + // item with the name ImageDigestBigDataKey and the specified digest. + ByDigest(d digest.Digest) ([]*Image, error) } // ImageStore provides bookkeeping for information about Images. @@ -82,7 +101,7 @@ type ImageStore interface { // Create creates an image that has a specified ID (or a random one) and // optional names, using the specified layer as its topmost (hopefully // read-only) layer. That layer can be referenced by multiple images. - Create(id string, names []string, layer, metadata string, created time.Time) (*Image, error) + Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error) // SetNames replaces the list of names associated with an image with the // supplied values. @@ -102,6 +121,7 @@ type imageStore struct { idindex *truncindex.TruncIndex byid map[string]*Image byname map[string]*Image + bydigest map[digest.Digest][]*Image } func (r *imageStore) Images() ([]Image, error) { @@ -135,7 +155,9 @@ func (r *imageStore) Load() error { idlist := []string{} ids := make(map[string]*Image) names := make(map[string]*Image) + digests := make(map[digest.Digest][]*Image) if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil { + idlist = make([]string, 0, len(images)) for n, image := range images { ids[image.ID] = images[n] idlist = append(idlist, image.ID) @@ -146,6 +168,16 @@ func (r *imageStore) Load() error { } names[name] = images[n] } + // Implicit digest + if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok { + digests[digest] = append(digests[digest], images[n]) + } + // Explicit digest + if image.Digest == "" { + image.Digest = image.BigDataDigests[ImageDigestBigDataKey] + } else if image.Digest != image.BigDataDigests[ImageDigestBigDataKey] { + digests[image.Digest] = append(digests[image.Digest], images[n]) + } } } if shouldSave && !r.IsReadWrite() { @@ -155,6 +187,7 @@ func (r *imageStore) Load() error { r.idindex = truncindex.NewTruncIndex(idlist) r.byid = ids r.byname = names + r.bydigest = digests if shouldSave { return r.Save() } @@ -193,6 +226,7 @@ func newImageStore(dir string) (ImageStore, error) { images: []*Image{}, byid: make(map[string]*Image), byname: make(map[string]*Image), + bydigest: make(map[digest.Digest][]*Image), } if err := istore.Load(); err != nil { return nil, err @@ -213,6 +247,7 @@ func newROImageStore(dir string) (ROImageStore, error) { images: []*Image{}, byid: make(map[string]*Image), byname: make(map[string]*Image), + bydigest: make(map[digest.Digest][]*Image), } if err := istore.Load(); err != nil { return nil, err @@ -252,11 +287,14 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { if !ok { return ErrImageUnknown } + if image.Flags == nil { + image.Flags = make(map[string]interface{}) + } image.Flags[flag] = value return r.Save() } -func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time) (image *Image, err error) { +func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) { if !r.IsReadWrite() { return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath()) } @@ -282,18 +320,24 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c } if err == nil { image = &Image{ - ID: id, - Names: names, - TopLayer: layer, - Metadata: metadata, - BigDataNames: []string{}, - BigDataSizes: make(map[string]int64), - Created: created, - Flags: make(map[string]interface{}), + ID: id, + Digest: searchableDigest, + Names: names, + TopLayer: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: created, + Flags: make(map[string]interface{}), } r.images = append(r.images, image) r.idindex.Add(id) r.byid[id] = image + if searchableDigest != "" { + list := r.bydigest[searchableDigest] + r.bydigest[searchableDigest] = append(list, image) + } for _, name := range names { r.byname[name] = image } @@ -373,6 +417,28 @@ func (r *imageStore) Delete(id string) error { r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...) } } + if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok { + // remove the image from the digest-based index + if list, ok := r.bydigest[digest]; ok { + prunedList := imageSliceWithoutValue(list, image) + if len(prunedList) == 0 { + delete(r.bydigest, digest) + } else { + r.bydigest[digest] = prunedList + } + } + } + if image.Digest != "" { + // remove the image's hard-coded digest from the digest-based index + if list, ok := r.bydigest[image.Digest]; ok { + prunedList := imageSliceWithoutValue(list, image) + if len(prunedList) == 0 { + delete(r.bydigest, image.Digest) + } else { + r.bydigest[image.Digest] = prunedList + } + } + } if err := r.Save(); err != nil { return err } @@ -401,7 +467,17 @@ func (r *imageStore) Exists(id string) bool { return ok } +func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) { + if images, ok := r.bydigest[d]; ok { + return images, nil + } + return nil, ErrImageUnknown +} + func (r *imageStore) BigData(id, key string) ([]byte, error) { + if key == "" { + return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name") + } image, ok := r.lookup(id) if !ok { return nil, ErrImageUnknown @@ -410,16 +486,61 @@ func (r *imageStore) BigData(id, key string) ([]byte, error) { } func (r *imageStore) BigDataSize(id, key string) (int64, error) { + if key == "" { + return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of image big data with empty name") + } image, ok := r.lookup(id) if !ok { return -1, ErrImageUnknown } + if image.BigDataSizes == nil { + image.BigDataSizes = make(map[string]int64) + } if size, ok := image.BigDataSizes[key]; ok { return size, nil } + if data, err := r.BigData(id, key); err == nil && data != nil { + if r.SetBigData(id, key, data) == nil { + image, ok := r.lookup(id) + if !ok { + return -1, ErrImageUnknown + } + if size, ok := image.BigDataSizes[key]; ok { + return size, nil + } + } + } return -1, ErrSizeUnknown } +func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) { + if key == "" { + return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of image big data value with empty name") + } + image, ok := r.lookup(id) + if !ok { + return "", ErrImageUnknown + } + if image.BigDataDigests == nil { + image.BigDataDigests = make(map[string]digest.Digest) + } + if d, ok := image.BigDataDigests[key]; ok { + return d, nil + } + if data, err := r.BigData(id, key); err == nil && data != nil { + if r.SetBigData(id, key, data) == nil { + image, ok := r.lookup(id) + if !ok { + return "", ErrImageUnknown + } + if d, ok := image.BigDataDigests[key]; ok { + return d, nil + } + } + } + return "", ErrDigestUnknown +} + func (r *imageStore) BigDataNames(id string) ([]string, error) { image, ok := r.lookup(id) if !ok { @@ -428,7 +549,21 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) { return image.BigDataNames, nil } +func imageSliceWithoutValue(slice []*Image, value *Image) []*Image { + modified := make([]*Image, 0, len(slice)) + for _, v := range slice { + if v == value { + continue + } + modified = append(modified, v) + } + return modified +} + func (r *imageStore) SetBigData(id, key string, data []byte) error { + if key == "" { + return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item") + } if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath()) } @@ -441,23 +576,55 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error { } err := ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600) if err == nil { - add := true save := false - oldSize, ok := image.BigDataSizes[key] + if image.BigDataSizes == nil { + image.BigDataSizes = make(map[string]int64) + } + oldSize, sizeOk := image.BigDataSizes[key] image.BigDataSizes[key] = int64(len(data)) - if !ok || oldSize != image.BigDataSizes[key] { + if image.BigDataDigests == nil { + image.BigDataDigests = make(map[string]digest.Digest) + } + oldDigest, digestOk := image.BigDataDigests[key] + newDigest := digest.Canonical.FromBytes(data) + image.BigDataDigests[key] = newDigest + if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest { save = true } + addName := true for _, name := range image.BigDataNames { if name == key { - add = false + addName = false break } } - if add { + if addName { image.BigDataNames = append(image.BigDataNames, key) save = true } + if key == ImageDigestBigDataKey { + if oldDigest != "" && oldDigest != newDigest && oldDigest != image.Digest { + // remove the image from the list of images in the digest-based + // index which corresponds to the old digest for this item, unless + // it's also the hard-coded digest + if list, ok := r.bydigest[oldDigest]; ok { + prunedList := imageSliceWithoutValue(list, image) + if len(prunedList) == 0 { + delete(r.bydigest, oldDigest) + } else { + r.bydigest[oldDigest] = prunedList + } + } + } + // add the image to the list of images in the digest-based index which + // corresponds to the new digest for this item, unless it's already there + list := r.bydigest[newDigest] + if len(list) == len(imageSliceWithoutValue(list, image)) { + // the list isn't shortened by trying to prune this image from it, + // so it's not in there yet + r.bydigest[newDigest] = append(list, image) + } + } if save { err = r.Save() } @@ -469,7 +636,7 @@ func (r *imageStore) Wipe() error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath()) } - ids := []string{} + ids := make([]string, 0, len(r.byid)) for id := range r.byid { ids = append(ids, id) } diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go new file mode 100644 index 00000000..f6a8b065 --- /dev/null +++ b/vendor/github.com/containers/storage/images_ffjson.go @@ -0,0 +1,1202 @@ +// Code generated by ffjson . DO NOT EDIT. +// source: images.go + +package storage + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/opencontainers/go-digest" + fflib "github.com/pquerna/ffjson/fflib/v1" +) + +// MarshalJSON marshal bytes to json - template +func (j *Image) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "id":`) + fflib.WriteJsonString(buf, string(j.ID)) + buf.WriteByte(',') + if len(j.Digest) != 0 { + buf.WriteString(`"digest":`) + fflib.WriteJsonString(buf, string(j.Digest)) + buf.WriteByte(',') + } + if len(j.Names) != 0 { + buf.WriteString(`"names":`) + if j.Names != nil { + buf.WriteString(`[`) + for i, v := range j.Names { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if len(j.TopLayer) != 0 { + buf.WriteString(`"layer":`) + fflib.WriteJsonString(buf, string(j.TopLayer)) + buf.WriteByte(',') + } + if len(j.Metadata) != 0 { + buf.WriteString(`"metadata":`) + fflib.WriteJsonString(buf, string(j.Metadata)) + buf.WriteByte(',') + } + if len(j.BigDataNames) != 0 { + buf.WriteString(`"big-data-names":`) + if j.BigDataNames != nil { + buf.WriteString(`[`) + for i, v := range j.BigDataNames { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if len(j.BigDataSizes) != 0 { + if j.BigDataSizes == nil { + buf.WriteString(`"big-data-sizes":null`) + } else { + buf.WriteString(`"big-data-sizes":{ `) + for key, value := range j.BigDataSizes { + fflib.WriteJsonString(buf, key) + buf.WriteString(`:`) + fflib.FormatBits2(buf, uint64(value), 10, value < 0) + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + } + buf.WriteByte(',') + } + if len(j.BigDataDigests) != 0 { + if j.BigDataDigests == nil { + buf.WriteString(`"big-data-digests":null`) + } else { + buf.WriteString(`"big-data-digests":{ `) + for key, value := range j.BigDataDigests { + fflib.WriteJsonString(buf, key) + buf.WriteString(`:`) + fflib.WriteJsonString(buf, string(value)) + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + } + buf.WriteByte(',') + } + if true { + buf.WriteString(`"created":`) + + { + + obj, err = j.Created.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + if len(j.Flags) != 0 { + buf.WriteString(`"flags":`) + /* Falling back. type=map[string]interface {} kind=map */ + err = buf.Encode(j.Flags) + if err != nil { + return err + } + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffjtImagebase = iota + ffjtImagenosuchkey + + ffjtImageID + + ffjtImageDigest + + ffjtImageNames + + ffjtImageTopLayer + + ffjtImageMetadata + + ffjtImageBigDataNames + + ffjtImageBigDataSizes + + ffjtImageBigDataDigests + + ffjtImageCreated + + ffjtImageFlags +) + +var ffjKeyImageID = []byte("id") + +var ffjKeyImageDigest = []byte("digest") + +var ffjKeyImageNames = []byte("names") + +var ffjKeyImageTopLayer = []byte("layer") + +var ffjKeyImageMetadata = []byte("metadata") + +var ffjKeyImageBigDataNames = []byte("big-data-names") + +var ffjKeyImageBigDataSizes = []byte("big-data-sizes") + +var ffjKeyImageBigDataDigests = []byte("big-data-digests") + +var ffjKeyImageCreated = []byte("created") + +var ffjKeyImageFlags = []byte("flags") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *Image) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *Image) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtImagebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtImagenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'b': + + if bytes.Equal(ffjKeyImageBigDataNames, kn) { + currentKey = ffjtImageBigDataNames + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyImageBigDataSizes, kn) { + currentKey = ffjtImageBigDataSizes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyImageBigDataDigests, kn) { + currentKey = ffjtImageBigDataDigests + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'c': + + if bytes.Equal(ffjKeyImageCreated, kn) { + currentKey = ffjtImageCreated + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffjKeyImageDigest, kn) { + currentKey = ffjtImageDigest + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffjKeyImageFlags, kn) { + currentKey = ffjtImageFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffjKeyImageID, kn) { + currentKey = ffjtImageID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffjKeyImageTopLayer, kn) { + currentKey = ffjtImageTopLayer + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffjKeyImageMetadata, kn) { + currentKey = ffjtImageMetadata + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffjKeyImageNames, kn) { + currentKey = ffjtImageNames + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffjKeyImageFlags, kn) { + currentKey = ffjtImageFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyImageCreated, kn) { + currentKey = ffjtImageCreated + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyImageBigDataDigests, kn) { + currentKey = ffjtImageBigDataDigests + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyImageBigDataSizes, kn) { + currentKey = ffjtImageBigDataSizes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyImageBigDataNames, kn) { + currentKey = ffjtImageBigDataNames + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyImageMetadata, kn) { + currentKey = ffjtImageMetadata + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyImageTopLayer, kn) { + currentKey = ffjtImageTopLayer + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyImageNames, kn) { + currentKey = ffjtImageNames + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyImageDigest, kn) { + currentKey = ffjtImageDigest + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyImageID, kn) { + currentKey = ffjtImageID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtImagenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtImageID: + goto handle_ID + + case ffjtImageDigest: + goto handle_Digest + + case ffjtImageNames: + goto handle_Names + + case ffjtImageTopLayer: + goto handle_TopLayer + + case ffjtImageMetadata: + goto handle_Metadata + + case ffjtImageBigDataNames: + goto handle_BigDataNames + + case ffjtImageBigDataSizes: + goto handle_BigDataSizes + + case ffjtImageBigDataDigests: + goto handle_BigDataDigests + + case ffjtImageCreated: + goto handle_Created + + case ffjtImageFlags: + goto handle_Flags + + case ffjtImagenosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ID: + + /* handler: j.ID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.ID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Digest: + + /* handler: j.Digest type=digest.Digest kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.Digest = digest.Digest(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Names: + + /* handler: j.Names type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.Names = nil + } else { + + j.Names = []string{} + + wantVal := true + + for { + + var tmpJNames string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJNames type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJNames = string(string(outBuf)) + + } + } + + j.Names = append(j.Names, tmpJNames) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TopLayer: + + /* handler: j.TopLayer type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.TopLayer = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Metadata: + + /* handler: j.Metadata type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.Metadata = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BigDataNames: + + /* handler: j.BigDataNames type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.BigDataNames = nil + } else { + + j.BigDataNames = []string{} + + wantVal := true + + for { + + var tmpJBigDataNames string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJBigDataNames type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJBigDataNames = string(string(outBuf)) + + } + } + + j.BigDataNames = append(j.BigDataNames, tmpJBigDataNames) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BigDataSizes: + + /* handler: j.BigDataSizes type=map[string]int64 kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.BigDataSizes = nil + } else { + + j.BigDataSizes = make(map[string]int64, 0) + + wantVal := true + + for { + + var k string + + var tmpJBigDataSizes int64 + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJBigDataSizes type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + tmpJBigDataSizes = int64(tval) + + } + } + + j.BigDataSizes[k] = tmpJBigDataSizes + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BigDataDigests: + + /* handler: j.BigDataDigests type=map[string]digest.Digest kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.BigDataDigests = nil + } else { + + j.BigDataDigests = make(map[string]digest.Digest, 0) + + wantVal := true + + for { + + var k string + + var tmpJBigDataDigests digest.Digest + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJBigDataDigests type=digest.Digest kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJBigDataDigests = digest.Digest(string(outBuf)) + + } + } + + j.BigDataDigests[k] = tmpJBigDataDigests + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Created: + + /* handler: j.Created type=time.Time kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + } else { + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = j.Created.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Flags: + + /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.Flags = nil + } else { + + j.Flags = make(map[string]interface{}, 0) + + wantVal := true + + for { + + var k string + + var tmpJFlags interface{} + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/ + + { + /* Falling back. type=interface {} kind=interface */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJFlags) + if err != nil { + return fs.WrapErr(err) + } + } + + j.Flags[k] = tmpJFlags + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *imageStore) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *imageStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{}`) + return nil +} + +const ( + ffjtimageStorebase = iota + ffjtimageStorenosuchkey +) + +// UnmarshalJSON umarshall json - template of ffjson +func (j *imageStore) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *imageStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtimageStorebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtimageStorenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + } + + currentKey = ffjtimageStorenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtimageStorenosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index 105875d0..f51406a0 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -254,6 +254,7 @@ func (r *layerStore) Load() error { compressedsums := make(map[digest.Digest][]string) uncompressedsums := make(map[digest.Digest][]string) if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil { + idlist = make([]string, 0, len(layers)) for n, layer := range layers { ids[layer.ID] = layers[n] idlist = append(idlist, layer.ID) @@ -305,6 +306,9 @@ func (r *layerStore) Load() error { // actually delete. if r.IsReadWrite() { for _, layer := range r.layers { + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } if cleanup, ok := layer.Flags[incompleteFlag]; ok { if b, ok := cleanup.(bool); ok && b { err = r.Delete(layer.ID) @@ -338,7 +342,7 @@ func (r *layerStore) Save() error { if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil { return err } - mounts := []layerMountPoint{} + mounts := make([]layerMountPoint, 0, len(r.layers)) for _, layer := range r.layers { if layer.MountPoint != "" && layer.MountCount > 0 { mounts = append(mounts, layerMountPoint{ @@ -455,6 +459,9 @@ func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { if !ok { return ErrLayerUnknown } + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } layer.Flags[flag] = value return r.Save() } @@ -733,7 +740,7 @@ func (r *layerStore) Wipe() error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) } - ids := []string{} + ids := make([]string, 0, len(r.byid)) for id := range r.byid { ids = append(ids, id) } diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go new file mode 100644 index 00000000..8bec40e1 --- /dev/null +++ b/vendor/github.com/containers/storage/layers_ffjson.go @@ -0,0 +1,1713 @@ +// Code generated by ffjson . DO NOT EDIT. +// source: layers.go + +package storage + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/containers/storage/pkg/archive" + "github.com/opencontainers/go-digest" + fflib "github.com/pquerna/ffjson/fflib/v1" +) + +// MarshalJSON marshal bytes to json - template +func (j *DiffOptions) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *DiffOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + if j.Compression != nil { + buf.WriteString(`{"Compression":`) + fflib.FormatBits2(buf, uint64(*j.Compression), 10, *j.Compression < 0) + } else { + buf.WriteString(`{"Compression":null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffjtDiffOptionsbase = iota + ffjtDiffOptionsnosuchkey + + ffjtDiffOptionsCompression +) + +var ffjKeyDiffOptionsCompression = []byte("Compression") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *DiffOptions) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *DiffOptions) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtDiffOptionsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtDiffOptionsnosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'C': + + if bytes.Equal(ffjKeyDiffOptionsCompression, kn) { + currentKey = ffjtDiffOptionsCompression + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffjKeyDiffOptionsCompression, kn) { + currentKey = ffjtDiffOptionsCompression + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtDiffOptionsnosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtDiffOptionsCompression: + goto handle_Compression + + case ffjtDiffOptionsnosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Compression: + + /* handler: j.Compression type=archive.Compression kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + j.Compression = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := archive.Compression(tval) + j.Compression = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *Layer) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *Layer) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "id":`) + fflib.WriteJsonString(buf, string(j.ID)) + buf.WriteByte(',') + if len(j.Names) != 0 { + buf.WriteString(`"names":`) + if j.Names != nil { + buf.WriteString(`[`) + for i, v := range j.Names { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if len(j.Parent) != 0 { + buf.WriteString(`"parent":`) + fflib.WriteJsonString(buf, string(j.Parent)) + buf.WriteByte(',') + } + if len(j.Metadata) != 0 { + buf.WriteString(`"metadata":`) + fflib.WriteJsonString(buf, string(j.Metadata)) + buf.WriteByte(',') + } + if len(j.MountLabel) != 0 { + buf.WriteString(`"mountlabel":`) + fflib.WriteJsonString(buf, string(j.MountLabel)) + buf.WriteByte(',') + } + if true { + buf.WriteString(`"created":`) + + { + + obj, err = j.Created.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + if len(j.CompressedDigest) != 0 { + buf.WriteString(`"compressed-diff-digest":`) + fflib.WriteJsonString(buf, string(j.CompressedDigest)) + buf.WriteByte(',') + } + if j.CompressedSize != 0 { + buf.WriteString(`"compressed-size":`) + fflib.FormatBits2(buf, uint64(j.CompressedSize), 10, j.CompressedSize < 0) + buf.WriteByte(',') + } + if len(j.UncompressedDigest) != 0 { + buf.WriteString(`"diff-digest":`) + fflib.WriteJsonString(buf, string(j.UncompressedDigest)) + buf.WriteByte(',') + } + if j.UncompressedSize != 0 { + buf.WriteString(`"diff-size":`) + fflib.FormatBits2(buf, uint64(j.UncompressedSize), 10, j.UncompressedSize < 0) + buf.WriteByte(',') + } + if j.CompressionType != 0 { + buf.WriteString(`"compression":`) + fflib.FormatBits2(buf, uint64(j.CompressionType), 10, j.CompressionType < 0) + buf.WriteByte(',') + } + if len(j.Flags) != 0 { + buf.WriteString(`"flags":`) + /* Falling back. type=map[string]interface {} kind=map */ + err = buf.Encode(j.Flags) + if err != nil { + return err + } + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffjtLayerbase = iota + ffjtLayernosuchkey + + ffjtLayerID + + ffjtLayerNames + + ffjtLayerParent + + ffjtLayerMetadata + + ffjtLayerMountLabel + + ffjtLayerCreated + + ffjtLayerCompressedDigest + + ffjtLayerCompressedSize + + ffjtLayerUncompressedDigest + + ffjtLayerUncompressedSize + + ffjtLayerCompressionType + + ffjtLayerFlags +) + +var ffjKeyLayerID = []byte("id") + +var ffjKeyLayerNames = []byte("names") + +var ffjKeyLayerParent = []byte("parent") + +var ffjKeyLayerMetadata = []byte("metadata") + +var ffjKeyLayerMountLabel = []byte("mountlabel") + +var ffjKeyLayerCreated = []byte("created") + +var ffjKeyLayerCompressedDigest = []byte("compressed-diff-digest") + +var ffjKeyLayerCompressedSize = []byte("compressed-size") + +var ffjKeyLayerUncompressedDigest = []byte("diff-digest") + +var ffjKeyLayerUncompressedSize = []byte("diff-size") + +var ffjKeyLayerCompressionType = []byte("compression") + +var ffjKeyLayerFlags = []byte("flags") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *Layer) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *Layer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtLayerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtLayernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffjKeyLayerCreated, kn) { + currentKey = ffjtLayerCreated + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyLayerCompressedDigest, kn) { + currentKey = ffjtLayerCompressedDigest + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyLayerCompressedSize, kn) { + currentKey = ffjtLayerCompressedSize + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyLayerCompressionType, kn) { + currentKey = ffjtLayerCompressionType + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffjKeyLayerUncompressedDigest, kn) { + currentKey = ffjtLayerUncompressedDigest + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyLayerUncompressedSize, kn) { + currentKey = ffjtLayerUncompressedSize + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffjKeyLayerFlags, kn) { + currentKey = ffjtLayerFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffjKeyLayerID, kn) { + currentKey = ffjtLayerID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffjKeyLayerMetadata, kn) { + currentKey = ffjtLayerMetadata + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyLayerMountLabel, kn) { + currentKey = ffjtLayerMountLabel + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffjKeyLayerNames, kn) { + currentKey = ffjtLayerNames + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffjKeyLayerParent, kn) { + currentKey = ffjtLayerParent + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffjKeyLayerFlags, kn) { + currentKey = ffjtLayerFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerCompressionType, kn) { + currentKey = ffjtLayerCompressionType + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerUncompressedSize, kn) { + currentKey = ffjtLayerUncompressedSize + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerUncompressedDigest, kn) { + currentKey = ffjtLayerUncompressedDigest + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerCompressedSize, kn) { + currentKey = ffjtLayerCompressedSize + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerCompressedDigest, kn) { + currentKey = ffjtLayerCompressedDigest + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerCreated, kn) { + currentKey = ffjtLayerCreated + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerMountLabel, kn) { + currentKey = ffjtLayerMountLabel + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerMetadata, kn) { + currentKey = ffjtLayerMetadata + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerParent, kn) { + currentKey = ffjtLayerParent + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerNames, kn) { + currentKey = ffjtLayerNames + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerID, kn) { + currentKey = ffjtLayerID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtLayernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtLayerID: + goto handle_ID + + case ffjtLayerNames: + goto handle_Names + + case ffjtLayerParent: + goto handle_Parent + + case ffjtLayerMetadata: + goto handle_Metadata + + case ffjtLayerMountLabel: + goto handle_MountLabel + + case ffjtLayerCreated: + goto handle_Created + + case ffjtLayerCompressedDigest: + goto handle_CompressedDigest + + case ffjtLayerCompressedSize: + goto handle_CompressedSize + + case ffjtLayerUncompressedDigest: + goto handle_UncompressedDigest + + case ffjtLayerUncompressedSize: + goto handle_UncompressedSize + + case ffjtLayerCompressionType: + goto handle_CompressionType + + case ffjtLayerFlags: + goto handle_Flags + + case ffjtLayernosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ID: + + /* handler: j.ID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.ID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Names: + + /* handler: j.Names type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.Names = nil + } else { + + j.Names = []string{} + + wantVal := true + + for { + + var tmpJNames string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJNames type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJNames = string(string(outBuf)) + + } + } + + j.Names = append(j.Names, tmpJNames) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Parent: + + /* handler: j.Parent type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.Parent = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Metadata: + + /* handler: j.Metadata type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.Metadata = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MountLabel: + + /* handler: j.MountLabel type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.MountLabel = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Created: + + /* handler: j.Created type=time.Time kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + } else { + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = j.Created.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CompressedDigest: + + /* handler: j.CompressedDigest type=digest.Digest kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.CompressedDigest = digest.Digest(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CompressedSize: + + /* handler: j.CompressedSize type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + j.CompressedSize = int64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UncompressedDigest: + + /* handler: j.UncompressedDigest type=digest.Digest kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.UncompressedDigest = digest.Digest(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UncompressedSize: + + /* handler: j.UncompressedSize type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + j.UncompressedSize = int64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CompressionType: + + /* handler: j.CompressionType type=archive.Compression kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + j.CompressionType = archive.Compression(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Flags: + + /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.Flags = nil + } else { + + j.Flags = make(map[string]interface{}, 0) + + wantVal := true + + for { + + var k string + + var tmpJFlags interface{} + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/ + + { + /* Falling back. type=interface {} kind=interface */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJFlags) + if err != nil { + return fs.WrapErr(err) + } + } + + j.Flags[k] = tmpJFlags + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *layerMountPoint) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *layerMountPoint) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"id":`) + fflib.WriteJsonString(buf, string(j.ID)) + buf.WriteString(`,"path":`) + fflib.WriteJsonString(buf, string(j.MountPoint)) + buf.WriteString(`,"count":`) + fflib.FormatBits2(buf, uint64(j.MountCount), 10, j.MountCount < 0) + buf.WriteByte('}') + return nil +} + +const ( + ffjtlayerMountPointbase = iota + ffjtlayerMountPointnosuchkey + + ffjtlayerMountPointID + + ffjtlayerMountPointMountPoint + + ffjtlayerMountPointMountCount +) + +var ffjKeylayerMountPointID = []byte("id") + +var ffjKeylayerMountPointMountPoint = []byte("path") + +var ffjKeylayerMountPointMountCount = []byte("count") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *layerMountPoint) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *layerMountPoint) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtlayerMountPointbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtlayerMountPointnosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffjKeylayerMountPointMountCount, kn) { + currentKey = ffjtlayerMountPointMountCount + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffjKeylayerMountPointID, kn) { + currentKey = ffjtlayerMountPointID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffjKeylayerMountPointMountPoint, kn) { + currentKey = ffjtlayerMountPointMountPoint + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointMountCount, kn) { + currentKey = ffjtlayerMountPointMountCount + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointMountPoint, kn) { + currentKey = ffjtlayerMountPointMountPoint + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointID, kn) { + currentKey = ffjtlayerMountPointID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtlayerMountPointnosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtlayerMountPointID: + goto handle_ID + + case ffjtlayerMountPointMountPoint: + goto handle_MountPoint + + case ffjtlayerMountPointMountCount: + goto handle_MountCount + + case ffjtlayerMountPointnosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ID: + + /* handler: j.ID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.ID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MountPoint: + + /* handler: j.MountPoint type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.MountPoint = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MountCount: + + /* handler: j.MountCount type=int kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + j.MountCount = int(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *layerStore) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *layerStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{}`) + return nil +} + +const ( + ffjtlayerStorebase = iota + ffjtlayerStorenosuchkey +) + +// UnmarshalJSON umarshall json - template of ffjson +func (j *layerStore) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *layerStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtlayerStorebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtlayerStorenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + } + + currentKey = ffjtlayerStorenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtlayerStorenosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *simpleGetCloser) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *simpleGetCloser) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{}`) + return nil +} + +const ( + ffjtsimpleGetCloserbase = iota + ffjtsimpleGetClosernosuchkey +) + +// UnmarshalJSON umarshall json - template of ffjson +func (j *simpleGetCloser) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *simpleGetCloser) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtsimpleGetCloserbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtsimpleGetClosernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + } + + currentKey = ffjtsimpleGetClosernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtsimpleGetClosernosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index add87d3d..de605432 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -20,7 +20,7 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/stringid" - "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) @@ -87,6 +87,10 @@ type ROBigDataStore interface { // data associated with this ID, if it has previously been set. BigDataSize(id, key string) (int64, error) + // BigDataDigest retrieves the digest of a (potentially large) piece of + // data associated with this ID, if it has previously been set. + BigDataDigest(id, key string) (digest.Digest, error) + // BigDataNames() returns a list of the names of previously-stored pieces of // data. BigDataNames(id string) ([]string, error) @@ -327,6 +331,10 @@ type Store interface { // of named data associated with an image. ImageBigDataSize(id, key string) (int64, error) + // ImageBigDataDigest retrieves the digest of a (possibly large) chunk + // of named data associated with an image. + ImageBigDataDigest(id, key string) (digest.Digest, error) + // SetImageBigData stores a (possibly large) chunk of named data associated // with an image. SetImageBigData(id, key string, data []byte) error @@ -343,6 +351,10 @@ type Store interface { // chunk of named data associated with a container. ContainerBigDataSize(id, key string) (int64, error) + // ContainerBigDataDigest retrieves the digest of a (possibly large) + // chunk of named data associated with a container. + ContainerBigDataDigest(id, key string) (digest.Digest, error) + // SetContainerBigData stores a (possibly large) chunk of named data // associated with a container. SetContainerBigData(id, key string, data []byte) error @@ -358,6 +370,10 @@ type Store interface { // and may have different metadata, big data items, and flags. ImagesByTopLayer(id string) ([]*Image, error) + // ImagesByDigest returns a list of images which contain a big data item + // named ImageDigestBigDataKey whose contents have the specified digest. + ImagesByDigest(d digest.Digest) ([]*Image, error) + // Container returns a specific container. Container(id string) (*Container, error) @@ -418,6 +434,8 @@ type ImageOptions struct { // CreationDate, if not zero, will override the default behavior of marking the image as having been // created when CreateImage() was called, recording CreationDate instead. CreationDate time.Time + // Digest is a hard-coded digest value that we can use to look up the image. It is optional. + Digest digest.Digest } // ContainerOptions is used for passing options to a Store's CreateContainer() method. @@ -475,11 +493,6 @@ func GetStore(options StoreOptions) (Store, error) { if err := os.MkdirAll(options.RunRoot, 0700); err != nil && !os.IsExist(err) { return nil, err } - for _, subdir := range []string{} { - if err := os.MkdirAll(filepath.Join(options.RunRoot, subdir), 0700); err != nil && !os.IsExist(err) { - return nil, err - } - } if err := os.MkdirAll(options.GraphRoot, 0700); err != nil && !os.IsExist(err) { return nil, err } @@ -728,11 +741,14 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w if err != nil { return nil, -1, err } + rlstores, err := s.ROLayerStores() + if err != nil { + return nil, -1, err + } rcstore, err := s.ContainerStore() if err != nil { return nil, -1, err } - rlstore.Lock() defer rlstore.Unlock() if modified, err := rlstore.Modified(); modified || err != nil { @@ -747,9 +763,15 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w id = stringid.GenerateRandomID() } if parent != "" { - if l, err := rlstore.Get(parent); err == nil && l != nil { - parent = l.ID - } else { + var ilayer *Layer + for _, lstore := range append([]ROLayerStore{rlstore}, rlstores...) { + if l, err := lstore.Get(parent); err == nil && l != nil { + ilayer = l + parent = ilayer.ID + break + } + } + if ilayer == nil { return nil, -1, ErrLayerUnknown } containers, err := rcstore.Containers() @@ -813,11 +835,11 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o } creationDate := time.Now().UTC() - if options != nil { + if options != nil && !options.CreationDate.IsZero() { creationDate = options.CreationDate } - return ristore.Create(id, names, layer, metadata, creationDate) + return ristore.Create(id, names, layer, metadata, creationDate, options.Digest) } func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) { @@ -1026,6 +1048,30 @@ func (s *store) ImageBigDataSize(id, key string) (int64, error) { return -1, ErrSizeUnknown } +func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) { + ristore, err := s.ImageStore() + if err != nil { + return "", err + } + stores, err := s.ROImageStores() + if err != nil { + return "", err + } + stores = append([]ROImageStore{ristore}, stores...) + for _, ristore := range stores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + d, err := ristore.BigDataDigest(id, key) + if err == nil && d.Validate() == nil { + return d, nil + } + } + return "", ErrDigestUnknown +} + func (s *store) ImageBigData(id, key string) ([]byte, error) { istore, err := s.ImageStore() if err != nil { @@ -1089,10 +1135,22 @@ func (s *store) ContainerBigDataSize(id, key string) (int64, error) { if modified, err := rcstore.Modified(); modified || err != nil { rcstore.Load() } - return rcstore.BigDataSize(id, key) } +func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return "", err + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + return rcstore.BigDataDigest(id, key) +} + func (s *store) ContainerBigData(id, key string) ([]byte, error) { rcstore, err := s.ContainerStore() if err != nil { @@ -1103,7 +1161,6 @@ func (s *store) ContainerBigData(id, key string) ([]byte, error) { if modified, err := rcstore.Modified(); modified || err != nil { rcstore.Load() } - return rcstore.BigData(id, key) } @@ -1117,7 +1174,6 @@ func (s *store) SetContainerBigData(id, key string, data []byte) error { if modified, err := rcstore.Modified(); modified || err != nil { rcstore.Load() } - return rcstore.SetBigData(id, key, data) } @@ -1833,18 +1889,30 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye } storeLayers, err := m(store, d) if err != nil { - return nil, err + if errors.Cause(err) != ErrLayerUnknown { + return nil, err + } + continue } layers = append(layers, storeLayers...) } + if len(layers) == 0 { + return nil, ErrLayerUnknown + } return layers, nil } func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { + if err := d.Validate(); err != nil { + return nil, errors.Wrapf(err, "error looking for compressed layers matching digest %q", d) + } return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d) } func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { + if err := d.Validate(); err != nil { + return nil, errors.Wrapf(err, "error looking for layers matching digest %q", d) + } return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d) } @@ -2019,6 +2087,33 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { return images, nil } +func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) { + images := []*Image{} + + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, store := range append([]ROImageStore{istore}, istores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() + } + imageList, err := store.ByDigest(d) + if err != nil && err != ErrImageUnknown { + return nil, err + } + images = append(images, imageList...) + } + return images, nil +} + func (s *store) Container(id string) (*Container, error) { rcstore, err := s.ContainerStore() if err != nil { @@ -2238,7 +2333,7 @@ func makeBigDataBaseName(key string) string { } func stringSliceWithoutValue(slice []string, value string) []string { - modified := []string{} + modified := make([]string, 0, len(slice)) for _, v := range slice { if v == value { continue diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf index 27eeee2c..a30f8feb 100644 --- a/vendor/github.com/containers/storage/vendor.conf +++ b/vendor/github.com/containers/storage/vendor.conf @@ -3,7 +3,6 @@ github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165 github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8 github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/docker/engine-api 4290f40c056686fcaa5c9caf02eac1dde9315adf -github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52 github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6 github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062 @@ -16,6 +15,7 @@ github.com/pmezard/go-difflib v1.0.0 github.com/sirupsen/logrus v1.0.0 github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 github.com/tchap/go-patricia v2.2.6 -github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721 +github.com/vbatts/tar-split v0.10.2 golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5 +github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac diff --git a/vendor/github.com/docker/docker/hack/README.md b/vendor/github.com/docker/docker/hack/README.md deleted file mode 100644 index 802395d5..00000000 --- a/vendor/github.com/docker/docker/hack/README.md +++ /dev/null @@ -1,60 +0,0 @@ -## About - -This directory contains a collection of scripts used to build and manage this -repository. If there are any issues regarding the intention of a particular -script (or even part of a certain script), please reach out to us. -It may help us either refine our current scripts, or add on new ones -that are appropriate for a given use case. - -## DinD (dind.sh) - -DinD is a wrapper script which allows Docker to be run inside a Docker -container. DinD requires the container to -be run with privileged mode enabled. - -## Generate Authors (generate-authors.sh) - -Generates AUTHORS; a file with all the names and corresponding emails of -individual contributors. AUTHORS can be found in the home directory of -this repository. - -## Make - -There are two make files, each with different extensions. Neither are supposed -to be called directly; only invoke `make`. Both scripts run inside a Docker -container. - -### make.ps1 - -- The Windows native build script that uses PowerShell semantics; it is limited -unlike `hack\make.sh` since it does not provide support for the full set of -operations provided by the Linux counterpart, `make.sh`. However, `make.ps1` -does provide support for local Windows development and Windows to Windows CI. -More information is found within `make.ps1` by the author, @jhowardmsft - -### make.sh - -- Referenced via `make test` when running tests on a local machine, -or directly referenced when running tests inside a Docker development container. -- When running on a local machine, `make test` to run all tests found in -`test`, `test-unit`, `test-integration-cli`, and `test-docker-py` on -your local machine. The default timeout is set in `make.sh` to 60 minutes -(`${TIMEOUT:=60m}`), since it currently takes up to an hour to run -all of the tests. -- When running inside a Docker development container, `hack/make.sh` does -not have a single target that runs all the tests. You need to provide a -single command line with multiple targets that performs the same thing. -An example referenced from [Run targets inside a development container](https://docs.docker.com/opensource/project/test-and-docs/#run-targets-inside-a-development-container): `root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit test-integration-cli test-docker-py` -- For more information related to testing outside the scope of this README, -refer to -[Run tests and test documentation](https://docs.docker.com/opensource/project/test-and-docs/) - -## Release (release.sh) - -Releases any bundles built by `make` on a public AWS S3 bucket. -For information regarding configuration, please view `release.sh`. - -## Vendor (vendor.sh) - -A shell script that is a wrapper around Vndr. For information on how to use -this, please refer to [vndr's README](https://github.com/LK4D4/vndr/blob/master/README.md) diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md deleted file mode 100644 index 1cea5252..00000000 --- a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Integration Testing on Swarm - -IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster - -## Architecture - -### Master service - - - Works as a funker caller - - Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`) - -### Worker service - - - Works as a funker callee - - Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`) - -### Client - - - Controls master and workers via `docker stack` - - No need to have a local daemon - -Typically, the master and workers are supposed to be running on a cloud environment, -while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows. - -## Requirement - - - Docker daemon 1.13 or later - - Private registry for distributed execution with multiple nodes - -## Usage - -### Step 1: Prepare images - - $ make build-integration-cli-on-swarm - -Following environment variables are known to work in this step: - - - `BUILDFLAGS` - - `DOCKER_INCREMENTAL_BINARY` - -Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`. - -### Step 2: Execute tests - - $ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest - -Following environment variables are known to work in this step: - - - `DOCKER_GRAPHDRIVER` - - `DOCKER_EXPERIMENTAL` - -#### Flags - -Basic flags: - - - `-replicas N`: the number of worker service replicas. i.e. degree of parallelism. - - `-chunks N`: the number of chunks. By default, `chunks` == `replicas`. - - `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`. - -Experimental flags for mitigating makespan nonuniformity: - - - `-shuffle`: Shuffle the test filter strings - -Flags for debugging IT on Swarm itself: - - - `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used. - - `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated. - - `-dry-run`: skip the actual workload - - `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf deleted file mode 100644 index efd6d6d0..00000000 --- a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf +++ /dev/null @@ -1,2 +0,0 @@ -# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here -github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773 diff --git a/cmd/kpod/formats/templates.go b/vendor/github.com/docker/docker/pkg/templates/templates.go similarity index 96% rename from cmd/kpod/formats/templates.go rename to vendor/github.com/docker/docker/pkg/templates/templates.go index c2582552..d2d7e0c3 100644 --- a/cmd/kpod/formats/templates.go +++ b/vendor/github.com/docker/docker/pkg/templates/templates.go @@ -1,4 +1,4 @@ -package formats +package templates import ( "bytes" @@ -14,7 +14,7 @@ var basicFunctions = template.FuncMap{ buf := &bytes.Buffer{} enc := json.NewEncoder(buf) enc.SetEscapeHTML(false) - _ = enc.Encode(v) + enc.Encode(v) // Remove the trailing new line added by the encoder return strings.TrimSpace(buf.String()) }, @@ -31,7 +31,7 @@ var basicFunctions = template.FuncMap{ // This is a replacement of basicFunctions for header generation // because we want the header to remain intact. // Some functions like `split` are irrelevant so not added. -var headerFunctions = template.FuncMap{ +var HeaderFunctions = template.FuncMap{ "json": func(v string) string { return v }, diff --git a/vendor/github.com/emicklei/go-restful-swagger12/LICENSE b/vendor/github.com/emicklei/go-restful-swagger12/LICENSE deleted file mode 100644 index aeab5b44..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2017 Ernest Micklei - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful-swagger12/README.md b/vendor/github.com/emicklei/go-restful-swagger12/README.md deleted file mode 100644 index 037b9b09..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/README.md +++ /dev/null @@ -1,83 +0,0 @@ -# go-restful-swagger12 - -[![Build Status](https://travis-ci.org/emicklei/go-restful-swagger12.png)](https://travis-ci.org/emicklei/go-restful-swagger12) -[![GoDoc](https://godoc.org/github.com/emicklei/go-restful-swagger12?status.svg)](https://godoc.org/github.com/emicklei/go-restful-swagger12) - -How to use Swagger UI with go-restful -= - -Get the Swagger UI sources (version 1.2 only) - - git clone https://github.com/wordnik/swagger-ui.git - -The project contains a "dist" folder. -Its contents has all the Swagger UI files you need. - -The `index.html` has an `url` set to `http://petstore.swagger.wordnik.com/api/api-docs`. -You need to change that to match your WebService JSON endpoint e.g. `http://localhost:8080/apidocs.json` - -Now, you can install the Swagger WebService for serving the Swagger specification in JSON. - - config := swagger.Config{ - WebServices: restful.RegisteredWebServices(), - ApiPath: "/apidocs.json", - SwaggerPath: "/apidocs/", - SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"} - swagger.InstallSwaggerService(config) - - -Documenting Structs --- - -Currently there are 2 ways to document your structs in the go-restful Swagger. - -###### By using struct tags -- Use tag "description" to annotate a struct field with a description to show in the UI -- Use tag "modelDescription" to annotate the struct itself with a description to show in the UI. The tag can be added in an field of the struct and in case that there are multiple definition, they will be appended with an empty line. - -###### By using the SwaggerDoc method -Here is an example with an `Address` struct and the documentation for each of the fields. The `""` is a special entry for **documenting the struct itself**. - - type Address struct { - Country string `json:"country,omitempty"` - PostCode int `json:"postcode,omitempty"` - } - - func (Address) SwaggerDoc() map[string]string { - return map[string]string{ - "": "Address doc", - "country": "Country doc", - "postcode": "PostCode doc", - } - } - -This example will generate a JSON like this - - { - "Address": { - "id": "Address", - "description": "Address doc", - "properties": { - "country": { - "type": "string", - "description": "Country doc" - }, - "postcode": { - "type": "integer", - "format": "int32", - "description": "PostCode doc" - } - } - } - } - -**Very Important Notes:** -- `SwaggerDoc()` is using a **NON-Pointer** receiver (e.g. func (Address) and not func (*Address)) -- The returned map should use as key the name of the field as defined in the JSON parameter (e.g. `"postcode"` and not `"PostCode"`) - -Notes --- -- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..) -- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints. - -© 2017, ernestmicklei.com. MIT License. Contributions welcome. \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go b/vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go deleted file mode 100644 index 9f4c3690..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go +++ /dev/null @@ -1,64 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "encoding/json" -) - -// ApiDeclarationList maintains an ordered list of ApiDeclaration. -type ApiDeclarationList struct { - List []ApiDeclaration -} - -// At returns the ApiDeclaration by its path unless absent, then ok is false -func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) { - for _, each := range l.List { - if each.ResourcePath == path { - return each, true - } - } - return a, false -} - -// Put adds or replaces a ApiDeclaration with this name -func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) { - // maybe replace existing - for i, each := range l.List { - if each.ResourcePath == path { - // replace - l.List[i] = a - return - } - } - // add - l.List = append(l.List, a) -} - -// Do enumerates all the properties, each with its assigned name -func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) { - for _, each := range l.List { - block(each.ResourcePath, each) - } -} - -// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty -func (l ApiDeclarationList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - buf.WriteString("{\n") - for i, each := range l.List { - buf.WriteString("\"") - buf.WriteString(each.ResourcePath) - buf.WriteString("\": ") - encoder.Encode(each) - if i < len(l.List)-1 { - buf.WriteString(",\n") - } - } - buf.WriteString("}") - return buf.Bytes(), nil -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/config.go b/vendor/github.com/emicklei/go-restful-swagger12/config.go deleted file mode 100644 index 18f8e57d..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/config.go +++ /dev/null @@ -1,46 +0,0 @@ -package swagger - -import ( - "net/http" - "reflect" - - "github.com/emicklei/go-restful" -) - -// PostBuildDeclarationMapFunc can be used to modify the api declaration map. -type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList) - -// MapSchemaFormatFunc can be used to modify typeName at definition time. -type MapSchemaFormatFunc func(typeName string) string - -// MapModelTypeNameFunc can be used to return the desired typeName for a given -// type. It will return false if the default name should be used. -type MapModelTypeNameFunc func(t reflect.Type) (string, bool) - -type Config struct { - // url where the services are available, e.g. http://localhost:8080 - // if left empty then the basePath of Swagger is taken from the actual request - WebServicesUrl string - // path where the JSON api is avaiable , e.g. /apidocs - ApiPath string - // [optional] path where the swagger UI will be served, e.g. /swagger - SwaggerPath string - // [optional] location of folder containing Swagger HTML5 application index.html - SwaggerFilePath string - // api listing is constructed from this list of restful WebServices. - WebServices []*restful.WebService - // will serve all static content (scripts,pages,images) - StaticHandler http.Handler - // [optional] on default CORS (Cross-Origin-Resource-Sharing) is enabled. - DisableCORS bool - // Top-level API version. Is reflected in the resource listing. - ApiVersion string - // If set then call this handler after building the complete ApiDeclaration Map - PostBuildHandler PostBuildDeclarationMapFunc - // Swagger global info struct - Info Info - // [optional] If set, model builder should call this handler to get addition typename-to-swagger-format-field conversion. - SchemaFormatHandler MapSchemaFormatFunc - // [optional] If set, model builder should call this handler to retrieve the name for a given type. - ModelTypeNameHandler MapModelTypeNameFunc -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_builder.go b/vendor/github.com/emicklei/go-restful-swagger12/model_builder.go deleted file mode 100644 index d40786f2..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_builder.go +++ /dev/null @@ -1,467 +0,0 @@ -package swagger - -import ( - "encoding/json" - "reflect" - "strings" -) - -// ModelBuildable is used for extending Structs that need more control over -// how the Model appears in the Swagger api declaration. -type ModelBuildable interface { - PostBuildModel(m *Model) *Model -} - -type modelBuilder struct { - Models *ModelList - Config *Config -} - -type documentable interface { - SwaggerDoc() map[string]string -} - -// Check if this structure has a method with signature func () SwaggerDoc() map[string]string -// If it exists, retrive the documentation and overwrite all struct tag descriptions -func getDocFromMethodSwaggerDoc2(model reflect.Type) map[string]string { - if docable, ok := reflect.New(model).Elem().Interface().(documentable); ok { - return docable.SwaggerDoc() - } - return make(map[string]string) -} - -// addModelFrom creates and adds a Model to the builder and detects and calls -// the post build hook for customizations -func (b modelBuilder) addModelFrom(sample interface{}) { - if modelOrNil := b.addModel(reflect.TypeOf(sample), ""); modelOrNil != nil { - // allow customizations - if buildable, ok := sample.(ModelBuildable); ok { - modelOrNil = buildable.PostBuildModel(modelOrNil) - b.Models.Put(modelOrNil.Id, *modelOrNil) - } - } -} - -func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model { - // Turn pointers into simpler types so further checks are - // correct. - if st.Kind() == reflect.Ptr { - st = st.Elem() - } - - modelName := b.keyFrom(st) - if nameOverride != "" { - modelName = nameOverride - } - // no models needed for primitive types - if b.isPrimitiveType(modelName) { - return nil - } - // golang encoding/json packages says array and slice values encode as - // JSON arrays, except that []byte encodes as a base64-encoded string. - // If we see a []byte here, treat it at as a primitive type (string) - // and deal with it in buildArrayTypeProperty. - if (st.Kind() == reflect.Slice || st.Kind() == reflect.Array) && - st.Elem().Kind() == reflect.Uint8 { - return nil - } - // see if we already have visited this model - if _, ok := b.Models.At(modelName); ok { - return nil - } - sm := Model{ - Id: modelName, - Required: []string{}, - Properties: ModelPropertyList{}} - - // reference the model before further initializing (enables recursive structs) - b.Models.Put(modelName, sm) - - // check for slice or array - if st.Kind() == reflect.Slice || st.Kind() == reflect.Array { - b.addModel(st.Elem(), "") - return &sm - } - // check for structure or primitive type - if st.Kind() != reflect.Struct { - return &sm - } - - fullDoc := getDocFromMethodSwaggerDoc2(st) - modelDescriptions := []string{} - - for i := 0; i < st.NumField(); i++ { - field := st.Field(i) - jsonName, modelDescription, prop := b.buildProperty(field, &sm, modelName) - if len(modelDescription) > 0 { - modelDescriptions = append(modelDescriptions, modelDescription) - } - - // add if not omitted - if len(jsonName) != 0 { - // update description - if fieldDoc, ok := fullDoc[jsonName]; ok { - prop.Description = fieldDoc - } - // update Required - if b.isPropertyRequired(field) { - sm.Required = append(sm.Required, jsonName) - } - sm.Properties.Put(jsonName, prop) - } - } - - // We always overwrite documentation if SwaggerDoc method exists - // "" is special for documenting the struct itself - if modelDoc, ok := fullDoc[""]; ok { - sm.Description = modelDoc - } else if len(modelDescriptions) != 0 { - sm.Description = strings.Join(modelDescriptions, "\n") - } - - // update model builder with completed model - b.Models.Put(modelName, sm) - - return &sm -} - -func (b modelBuilder) isPropertyRequired(field reflect.StructField) bool { - required := true - if jsonTag := field.Tag.Get("json"); jsonTag != "" { - s := strings.Split(jsonTag, ",") - if len(s) > 1 && s[1] == "omitempty" { - return false - } - } - return required -} - -func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, modelName string) (jsonName, modelDescription string, prop ModelProperty) { - jsonName = b.jsonNameOfField(field) - if len(jsonName) == 0 { - // empty name signals skip property - return "", "", prop - } - - if field.Name == "XMLName" && field.Type.String() == "xml.Name" { - // property is metadata for the xml.Name attribute, can be skipped - return "", "", prop - } - - if tag := field.Tag.Get("modelDescription"); tag != "" { - modelDescription = tag - } - - prop.setPropertyMetadata(field) - if prop.Type != nil { - return jsonName, modelDescription, prop - } - fieldType := field.Type - - // check if type is doing its own marshalling - marshalerType := reflect.TypeOf((*json.Marshaler)(nil)).Elem() - if fieldType.Implements(marshalerType) { - var pType = "string" - if prop.Type == nil { - prop.Type = &pType - } - if prop.Format == "" { - prop.Format = b.jsonSchemaFormat(b.keyFrom(fieldType)) - } - return jsonName, modelDescription, prop - } - - // check if annotation says it is a string - if jsonTag := field.Tag.Get("json"); jsonTag != "" { - s := strings.Split(jsonTag, ",") - if len(s) > 1 && s[1] == "string" { - stringt := "string" - prop.Type = &stringt - return jsonName, modelDescription, prop - } - } - - fieldKind := fieldType.Kind() - switch { - case fieldKind == reflect.Struct: - jsonName, prop := b.buildStructTypeProperty(field, jsonName, model) - return jsonName, modelDescription, prop - case fieldKind == reflect.Slice || fieldKind == reflect.Array: - jsonName, prop := b.buildArrayTypeProperty(field, jsonName, modelName) - return jsonName, modelDescription, prop - case fieldKind == reflect.Ptr: - jsonName, prop := b.buildPointerTypeProperty(field, jsonName, modelName) - return jsonName, modelDescription, prop - case fieldKind == reflect.String: - stringt := "string" - prop.Type = &stringt - return jsonName, modelDescription, prop - case fieldKind == reflect.Map: - // if it's a map, it's unstructured, and swagger 1.2 can't handle it - objectType := "object" - prop.Type = &objectType - return jsonName, modelDescription, prop - } - - fieldTypeName := b.keyFrom(fieldType) - if b.isPrimitiveType(fieldTypeName) { - mapped := b.jsonSchemaType(fieldTypeName) - prop.Type = &mapped - prop.Format = b.jsonSchemaFormat(fieldTypeName) - return jsonName, modelDescription, prop - } - modelType := b.keyFrom(fieldType) - prop.Ref = &modelType - - if fieldType.Name() == "" { // override type of anonymous structs - nestedTypeName := modelName + "." + jsonName - prop.Ref = &nestedTypeName - b.addModel(fieldType, nestedTypeName) - } - return jsonName, modelDescription, prop -} - -func hasNamedJSONTag(field reflect.StructField) bool { - parts := strings.Split(field.Tag.Get("json"), ",") - if len(parts) == 0 { - return false - } - for _, s := range parts[1:] { - if s == "inline" { - return false - } - } - return len(parts[0]) > 0 -} - -func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonName string, model *Model) (nameJson string, prop ModelProperty) { - prop.setPropertyMetadata(field) - // Check for type override in tag - if prop.Type != nil { - return jsonName, prop - } - fieldType := field.Type - // check for anonymous - if len(fieldType.Name()) == 0 { - // anonymous - anonType := model.Id + "." + jsonName - b.addModel(fieldType, anonType) - prop.Ref = &anonType - return jsonName, prop - } - - if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) { - // embedded struct - sub := modelBuilder{new(ModelList), b.Config} - sub.addModel(fieldType, "") - subKey := sub.keyFrom(fieldType) - // merge properties from sub - subModel, _ := sub.Models.At(subKey) - subModel.Properties.Do(func(k string, v ModelProperty) { - model.Properties.Put(k, v) - // if subModel says this property is required then include it - required := false - for _, each := range subModel.Required { - if k == each { - required = true - break - } - } - if required { - model.Required = append(model.Required, k) - } - }) - // add all new referenced models - sub.Models.Do(func(key string, sub Model) { - if key != subKey { - if _, ok := b.Models.At(key); !ok { - b.Models.Put(key, sub) - } - } - }) - // empty name signals skip property - return "", prop - } - // simple struct - b.addModel(fieldType, "") - var pType = b.keyFrom(fieldType) - prop.Ref = &pType - return jsonName, prop -} - -func (b modelBuilder) buildArrayTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) { - // check for type override in tags - prop.setPropertyMetadata(field) - if prop.Type != nil { - return jsonName, prop - } - fieldType := field.Type - if fieldType.Elem().Kind() == reflect.Uint8 { - stringt := "string" - prop.Type = &stringt - return jsonName, prop - } - var pType = "array" - prop.Type = &pType - isPrimitive := b.isPrimitiveType(fieldType.Elem().Name()) - elemTypeName := b.getElementTypeName(modelName, jsonName, fieldType.Elem()) - prop.Items = new(Item) - if isPrimitive { - mapped := b.jsonSchemaType(elemTypeName) - prop.Items.Type = &mapped - } else { - prop.Items.Ref = &elemTypeName - } - // add|overwrite model for element type - if fieldType.Elem().Kind() == reflect.Ptr { - fieldType = fieldType.Elem() - } - if !isPrimitive { - b.addModel(fieldType.Elem(), elemTypeName) - } - return jsonName, prop -} - -func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) { - prop.setPropertyMetadata(field) - // Check for type override in tags - if prop.Type != nil { - return jsonName, prop - } - fieldType := field.Type - - // override type of pointer to list-likes - if fieldType.Elem().Kind() == reflect.Slice || fieldType.Elem().Kind() == reflect.Array { - var pType = "array" - prop.Type = &pType - isPrimitive := b.isPrimitiveType(fieldType.Elem().Elem().Name()) - elemName := b.getElementTypeName(modelName, jsonName, fieldType.Elem().Elem()) - if isPrimitive { - primName := b.jsonSchemaType(elemName) - prop.Items = &Item{Ref: &primName} - } else { - prop.Items = &Item{Ref: &elemName} - } - if !isPrimitive { - // add|overwrite model for element type - b.addModel(fieldType.Elem().Elem(), elemName) - } - } else { - // non-array, pointer type - fieldTypeName := b.keyFrom(fieldType.Elem()) - var pType = b.jsonSchemaType(fieldTypeName) // no star, include pkg path - if b.isPrimitiveType(fieldTypeName) { - prop.Type = &pType - prop.Format = b.jsonSchemaFormat(fieldTypeName) - return jsonName, prop - } - prop.Ref = &pType - elemName := "" - if fieldType.Elem().Name() == "" { - elemName = modelName + "." + jsonName - prop.Ref = &elemName - } - b.addModel(fieldType.Elem(), elemName) - } - return jsonName, prop -} - -func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.Type) string { - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - if t.Name() == "" { - return modelName + "." + jsonName - } - return b.keyFrom(t) -} - -func (b modelBuilder) keyFrom(st reflect.Type) string { - key := st.String() - if b.Config != nil && b.Config.ModelTypeNameHandler != nil { - if name, ok := b.Config.ModelTypeNameHandler(st); ok { - key = name - } - } - if len(st.Name()) == 0 { // unnamed type - // Swagger UI has special meaning for [ - key = strings.Replace(key, "[]", "||", -1) - } - return key -} - -// see also https://golang.org/ref/spec#Numeric_types -func (b modelBuilder) isPrimitiveType(modelName string) bool { - if len(modelName) == 0 { - return false - } - return strings.Contains("uint uint8 uint16 uint32 uint64 int int8 int16 int32 int64 float32 float64 bool string byte rune time.Time", modelName) -} - -// jsonNameOfField returns the name of the field as it should appear in JSON format -// An empty string indicates that this field is not part of the JSON representation -func (b modelBuilder) jsonNameOfField(field reflect.StructField) string { - if jsonTag := field.Tag.Get("json"); jsonTag != "" { - s := strings.Split(jsonTag, ",") - if s[0] == "-" { - // empty name signals skip property - return "" - } else if s[0] != "" { - return s[0] - } - } - return field.Name -} - -// see also http://json-schema.org/latest/json-schema-core.html#anchor8 -func (b modelBuilder) jsonSchemaType(modelName string) string { - schemaMap := map[string]string{ - "uint": "integer", - "uint8": "integer", - "uint16": "integer", - "uint32": "integer", - "uint64": "integer", - - "int": "integer", - "int8": "integer", - "int16": "integer", - "int32": "integer", - "int64": "integer", - - "byte": "integer", - "float64": "number", - "float32": "number", - "bool": "boolean", - "time.Time": "string", - } - mapped, ok := schemaMap[modelName] - if !ok { - return modelName // use as is (custom or struct) - } - return mapped -} - -func (b modelBuilder) jsonSchemaFormat(modelName string) string { - if b.Config != nil && b.Config.SchemaFormatHandler != nil { - if mapped := b.Config.SchemaFormatHandler(modelName); mapped != "" { - return mapped - } - } - schemaMap := map[string]string{ - "int": "int32", - "int32": "int32", - "int64": "int64", - "byte": "byte", - "uint": "integer", - "uint8": "byte", - "float64": "double", - "float32": "float", - "time.Time": "date-time", - "*time.Time": "date-time", - } - mapped, ok := schemaMap[modelName] - if !ok { - return "" // no format - } - return mapped -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_list.go b/vendor/github.com/emicklei/go-restful-swagger12/model_list.go deleted file mode 100644 index 9bb6cb67..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_list.go +++ /dev/null @@ -1,86 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "encoding/json" -) - -// NamedModel associates a name with a Model (not using its Id) -type NamedModel struct { - Name string - Model Model -} - -// ModelList encapsulates a list of NamedModel (association) -type ModelList struct { - List []NamedModel -} - -// Put adds or replaces a Model by its name -func (l *ModelList) Put(name string, model Model) { - for i, each := range l.List { - if each.Name == name { - // replace - l.List[i] = NamedModel{name, model} - return - } - } - // add - l.List = append(l.List, NamedModel{name, model}) -} - -// At returns a Model by its name, ok is false if absent -func (l *ModelList) At(name string) (m Model, ok bool) { - for _, each := range l.List { - if each.Name == name { - return each.Model, true - } - } - return m, false -} - -// Do enumerates all the models, each with its assigned name -func (l *ModelList) Do(block func(name string, value Model)) { - for _, each := range l.List { - block(each.Name, each.Model) - } -} - -// MarshalJSON writes the ModelList as if it was a map[string]Model -func (l ModelList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - buf.WriteString("{\n") - for i, each := range l.List { - buf.WriteString("\"") - buf.WriteString(each.Name) - buf.WriteString("\": ") - encoder.Encode(each.Model) - if i < len(l.List)-1 { - buf.WriteString(",\n") - } - } - buf.WriteString("}") - return buf.Bytes(), nil -} - -// UnmarshalJSON reads back a ModelList. This is an expensive operation. -func (l *ModelList) UnmarshalJSON(data []byte) error { - raw := map[string]interface{}{} - json.NewDecoder(bytes.NewReader(data)).Decode(&raw) - for k, v := range raw { - // produces JSON bytes for each value - data, err := json.Marshal(v) - if err != nil { - return err - } - var m Model - json.NewDecoder(bytes.NewReader(data)).Decode(&m) - l.Put(k, m) - } - return nil -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go b/vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go deleted file mode 100644 index a433b6b7..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go +++ /dev/null @@ -1,81 +0,0 @@ -package swagger - -import ( - "reflect" - "strings" -) - -func (prop *ModelProperty) setDescription(field reflect.StructField) { - if tag := field.Tag.Get("description"); tag != "" { - prop.Description = tag - } -} - -func (prop *ModelProperty) setDefaultValue(field reflect.StructField) { - if tag := field.Tag.Get("default"); tag != "" { - prop.DefaultValue = Special(tag) - } -} - -func (prop *ModelProperty) setEnumValues(field reflect.StructField) { - // We use | to separate the enum values. This value is chosen - // since its unlikely to be useful in actual enumeration values. - if tag := field.Tag.Get("enum"); tag != "" { - prop.Enum = strings.Split(tag, "|") - } -} - -func (prop *ModelProperty) setMaximum(field reflect.StructField) { - if tag := field.Tag.Get("maximum"); tag != "" { - prop.Maximum = tag - } -} - -func (prop *ModelProperty) setType(field reflect.StructField) { - if tag := field.Tag.Get("type"); tag != "" { - // Check if the first two characters of the type tag are - // intended to emulate slice/array behaviour. - // - // If type is intended to be a slice/array then add the - // overriden type to the array item instead of the main property - if len(tag) > 2 && tag[0:2] == "[]" { - pType := "array" - prop.Type = &pType - prop.Items = new(Item) - - iType := tag[2:] - prop.Items.Type = &iType - return - } - - prop.Type = &tag - } -} - -func (prop *ModelProperty) setMinimum(field reflect.StructField) { - if tag := field.Tag.Get("minimum"); tag != "" { - prop.Minimum = tag - } -} - -func (prop *ModelProperty) setUniqueItems(field reflect.StructField) { - tag := field.Tag.Get("unique") - switch tag { - case "true": - v := true - prop.UniqueItems = &v - case "false": - v := false - prop.UniqueItems = &v - } -} - -func (prop *ModelProperty) setPropertyMetadata(field reflect.StructField) { - prop.setDescription(field) - prop.setEnumValues(field) - prop.setMinimum(field) - prop.setMaximum(field) - prop.setUniqueItems(field) - prop.setDefaultValue(field) - prop.setType(field) -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go b/vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go deleted file mode 100644 index 3babb194..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go +++ /dev/null @@ -1,87 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "encoding/json" -) - -// NamedModelProperty associates a name to a ModelProperty -type NamedModelProperty struct { - Name string - Property ModelProperty -} - -// ModelPropertyList encapsulates a list of NamedModelProperty (association) -type ModelPropertyList struct { - List []NamedModelProperty -} - -// At returns the ModelPropety by its name unless absent, then ok is false -func (l *ModelPropertyList) At(name string) (p ModelProperty, ok bool) { - for _, each := range l.List { - if each.Name == name { - return each.Property, true - } - } - return p, false -} - -// Put adds or replaces a ModelProperty with this name -func (l *ModelPropertyList) Put(name string, prop ModelProperty) { - // maybe replace existing - for i, each := range l.List { - if each.Name == name { - // replace - l.List[i] = NamedModelProperty{Name: name, Property: prop} - return - } - } - // add - l.List = append(l.List, NamedModelProperty{Name: name, Property: prop}) -} - -// Do enumerates all the properties, each with its assigned name -func (l *ModelPropertyList) Do(block func(name string, value ModelProperty)) { - for _, each := range l.List { - block(each.Name, each.Property) - } -} - -// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty -func (l ModelPropertyList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - buf.WriteString("{\n") - for i, each := range l.List { - buf.WriteString("\"") - buf.WriteString(each.Name) - buf.WriteString("\": ") - encoder.Encode(each.Property) - if i < len(l.List)-1 { - buf.WriteString(",\n") - } - } - buf.WriteString("}") - return buf.Bytes(), nil -} - -// UnmarshalJSON reads back a ModelPropertyList. This is an expensive operation. -func (l *ModelPropertyList) UnmarshalJSON(data []byte) error { - raw := map[string]interface{}{} - json.NewDecoder(bytes.NewReader(data)).Decode(&raw) - for k, v := range raw { - // produces JSON bytes for each value - data, err := json.Marshal(v) - if err != nil { - return err - } - var m ModelProperty - json.NewDecoder(bytes.NewReader(data)).Decode(&m) - l.Put(k, m) - } - return nil -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go b/vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go deleted file mode 100644 index b33ccfbe..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go +++ /dev/null @@ -1,36 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import "github.com/emicklei/go-restful" - -type orderedRouteMap struct { - elements map[string][]restful.Route - keys []string -} - -func newOrderedRouteMap() *orderedRouteMap { - return &orderedRouteMap{ - elements: map[string][]restful.Route{}, - keys: []string{}, - } -} - -func (o *orderedRouteMap) Add(key string, route restful.Route) { - routes, ok := o.elements[key] - if ok { - routes = append(routes, route) - o.elements[key] = routes - return - } - o.elements[key] = []restful.Route{route} - o.keys = append(o.keys, key) -} - -func (o *orderedRouteMap) Do(block func(key string, routes []restful.Route)) { - for _, k := range o.keys { - block(k, o.elements[k]) - } -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/swagger.go b/vendor/github.com/emicklei/go-restful-swagger12/swagger.go deleted file mode 100644 index 9c40833e..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/swagger.go +++ /dev/null @@ -1,185 +0,0 @@ -// Package swagger implements the structures of the Swagger -// https://github.com/wordnik/swagger-spec/blob/master/versions/1.2.md -package swagger - -const swaggerVersion = "1.2" - -// 4.3.3 Data Type Fields -type DataTypeFields struct { - Type *string `json:"type,omitempty"` // if Ref not used - Ref *string `json:"$ref,omitempty"` // if Type not used - Format string `json:"format,omitempty"` - DefaultValue Special `json:"defaultValue,omitempty"` - Enum []string `json:"enum,omitempty"` - Minimum string `json:"minimum,omitempty"` - Maximum string `json:"maximum,omitempty"` - Items *Item `json:"items,omitempty"` - UniqueItems *bool `json:"uniqueItems,omitempty"` -} - -type Special string - -// 4.3.4 Items Object -type Item struct { - Type *string `json:"type,omitempty"` - Ref *string `json:"$ref,omitempty"` - Format string `json:"format,omitempty"` -} - -// 5.1 Resource Listing -type ResourceListing struct { - SwaggerVersion string `json:"swaggerVersion"` // e.g 1.2 - Apis []Resource `json:"apis"` - ApiVersion string `json:"apiVersion"` - Info Info `json:"info"` - Authorizations []Authorization `json:"authorizations,omitempty"` -} - -// 5.1.2 Resource Object -type Resource struct { - Path string `json:"path"` // relative or absolute, must start with / - Description string `json:"description"` -} - -// 5.1.3 Info Object -type Info struct { - Title string `json:"title"` - Description string `json:"description"` - TermsOfServiceUrl string `json:"termsOfServiceUrl,omitempty"` - Contact string `json:"contact,omitempty"` - License string `json:"license,omitempty"` - LicenseUrl string `json:"licenseUrl,omitempty"` -} - -// 5.1.5 -type Authorization struct { - Type string `json:"type"` - PassAs string `json:"passAs"` - Keyname string `json:"keyname"` - Scopes []Scope `json:"scopes"` - GrantTypes []GrantType `json:"grandTypes"` -} - -// 5.1.6, 5.2.11 -type Scope struct { - // Required. The name of the scope. - Scope string `json:"scope"` - // Recommended. A short description of the scope. - Description string `json:"description"` -} - -// 5.1.7 -type GrantType struct { - Implicit Implicit `json:"implicit"` - AuthorizationCode AuthorizationCode `json:"authorization_code"` -} - -// 5.1.8 Implicit Object -type Implicit struct { - // Required. The login endpoint definition. - loginEndpoint LoginEndpoint `json:"loginEndpoint"` - // An optional alternative name to standard "access_token" OAuth2 parameter. - TokenName string `json:"tokenName"` -} - -// 5.1.9 Authorization Code Object -type AuthorizationCode struct { - TokenRequestEndpoint TokenRequestEndpoint `json:"tokenRequestEndpoint"` - TokenEndpoint TokenEndpoint `json:"tokenEndpoint"` -} - -// 5.1.10 Login Endpoint Object -type LoginEndpoint struct { - // Required. The URL of the authorization endpoint for the implicit grant flow. The value SHOULD be in a URL format. - Url string `json:"url"` -} - -// 5.1.11 Token Request Endpoint Object -type TokenRequestEndpoint struct { - // Required. The URL of the authorization endpoint for the authentication code grant flow. The value SHOULD be in a URL format. - Url string `json:"url"` - // An optional alternative name to standard "client_id" OAuth2 parameter. - ClientIdName string `json:"clientIdName"` - // An optional alternative name to the standard "client_secret" OAuth2 parameter. - ClientSecretName string `json:"clientSecretName"` -} - -// 5.1.12 Token Endpoint Object -type TokenEndpoint struct { - // Required. The URL of the token endpoint for the authentication code grant flow. The value SHOULD be in a URL format. - Url string `json:"url"` - // An optional alternative name to standard "access_token" OAuth2 parameter. - TokenName string `json:"tokenName"` -} - -// 5.2 API Declaration -type ApiDeclaration struct { - SwaggerVersion string `json:"swaggerVersion"` - ApiVersion string `json:"apiVersion"` - BasePath string `json:"basePath"` - ResourcePath string `json:"resourcePath"` // must start with / - Info Info `json:"info"` - Apis []Api `json:"apis,omitempty"` - Models ModelList `json:"models,omitempty"` - Produces []string `json:"produces,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Authorizations []Authorization `json:"authorizations,omitempty"` -} - -// 5.2.2 API Object -type Api struct { - Path string `json:"path"` // relative or absolute, must start with / - Description string `json:"description"` - Operations []Operation `json:"operations,omitempty"` -} - -// 5.2.3 Operation Object -type Operation struct { - DataTypeFields - Method string `json:"method"` - Summary string `json:"summary,omitempty"` - Notes string `json:"notes,omitempty"` - Nickname string `json:"nickname"` - Authorizations []Authorization `json:"authorizations,omitempty"` - Parameters []Parameter `json:"parameters"` - ResponseMessages []ResponseMessage `json:"responseMessages,omitempty"` // optional - Produces []string `json:"produces,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Deprecated string `json:"deprecated,omitempty"` -} - -// 5.2.4 Parameter Object -type Parameter struct { - DataTypeFields - ParamType string `json:"paramType"` // path,query,body,header,form - Name string `json:"name"` - Description string `json:"description"` - Required bool `json:"required"` - AllowMultiple bool `json:"allowMultiple"` -} - -// 5.2.5 Response Message Object -type ResponseMessage struct { - Code int `json:"code"` - Message string `json:"message"` - ResponseModel string `json:"responseModel,omitempty"` -} - -// 5.2.6, 5.2.7 Models Object -type Model struct { - Id string `json:"id"` - Description string `json:"description,omitempty"` - Required []string `json:"required,omitempty"` - Properties ModelPropertyList `json:"properties"` - SubTypes []string `json:"subTypes,omitempty"` - Discriminator string `json:"discriminator,omitempty"` -} - -// 5.2.8 Properties Object -type ModelProperty struct { - DataTypeFields - Description string `json:"description,omitempty"` -} - -// 5.2.10 -type Authorizations map[string]Authorization diff --git a/vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go b/vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go deleted file mode 100644 index 05a3c7e7..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go +++ /dev/null @@ -1,21 +0,0 @@ -package swagger - -type SwaggerBuilder struct { - SwaggerService -} - -func NewSwaggerBuilder(config Config) *SwaggerBuilder { - return &SwaggerBuilder{*newSwaggerService(config)} -} - -func (sb SwaggerBuilder) ProduceListing() ResourceListing { - return sb.SwaggerService.produceListing() -} - -func (sb SwaggerBuilder) ProduceAllDeclarations() map[string]ApiDeclaration { - return sb.SwaggerService.produceAllDeclarations() -} - -func (sb SwaggerBuilder) ProduceDeclarations(route string) (*ApiDeclaration, bool) { - return sb.SwaggerService.produceDeclarations(route) -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go b/vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go deleted file mode 100644 index d9062312..00000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go +++ /dev/null @@ -1,443 +0,0 @@ -package swagger - -import ( - "fmt" - - "github.com/emicklei/go-restful" - // "github.com/emicklei/hopwatch" - "net/http" - "reflect" - "sort" - "strings" - - "github.com/emicklei/go-restful/log" -) - -type SwaggerService struct { - config Config - apiDeclarationMap *ApiDeclarationList -} - -func newSwaggerService(config Config) *SwaggerService { - sws := &SwaggerService{ - config: config, - apiDeclarationMap: new(ApiDeclarationList)} - - // Build all ApiDeclarations - for _, each := range config.WebServices { - rootPath := each.RootPath() - // skip the api service itself - if rootPath != config.ApiPath { - if rootPath == "" || rootPath == "/" { - // use routes - for _, route := range each.Routes() { - entry := staticPathFromRoute(route) - _, exists := sws.apiDeclarationMap.At(entry) - if !exists { - sws.apiDeclarationMap.Put(entry, sws.composeDeclaration(each, entry)) - } - } - } else { // use root path - sws.apiDeclarationMap.Put(each.RootPath(), sws.composeDeclaration(each, each.RootPath())) - } - } - } - - // if specified then call the PostBuilderHandler - if config.PostBuildHandler != nil { - config.PostBuildHandler(sws.apiDeclarationMap) - } - return sws -} - -// LogInfo is the function that is called when this package needs to log. It defaults to log.Printf -var LogInfo = func(format string, v ...interface{}) { - // use the restful package-wide logger - log.Printf(format, v...) -} - -// InstallSwaggerService add the WebService that provides the API documentation of all services -// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki). -func InstallSwaggerService(aSwaggerConfig Config) { - RegisterSwaggerService(aSwaggerConfig, restful.DefaultContainer) -} - -// RegisterSwaggerService add the WebService that provides the API documentation of all services -// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki). -func RegisterSwaggerService(config Config, wsContainer *restful.Container) { - sws := newSwaggerService(config) - ws := new(restful.WebService) - ws.Path(config.ApiPath) - ws.Produces(restful.MIME_JSON) - if config.DisableCORS { - ws.Filter(enableCORS) - } - ws.Route(ws.GET("/").To(sws.getListing)) - ws.Route(ws.GET("/{a}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}/{g}").To(sws.getDeclarations)) - LogInfo("[restful/swagger] listing is available at %v%v", config.WebServicesUrl, config.ApiPath) - wsContainer.Add(ws) - - // Check paths for UI serving - if config.StaticHandler == nil && config.SwaggerFilePath != "" && config.SwaggerPath != "" { - swaggerPathSlash := config.SwaggerPath - // path must end with slash / - if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] { - LogInfo("[restful/swagger] use corrected SwaggerPath ; must end with slash (/)") - swaggerPathSlash += "/" - } - - LogInfo("[restful/swagger] %v%v is mapped to folder %v", config.WebServicesUrl, swaggerPathSlash, config.SwaggerFilePath) - wsContainer.Handle(swaggerPathSlash, http.StripPrefix(swaggerPathSlash, http.FileServer(http.Dir(config.SwaggerFilePath)))) - - //if we define a custom static handler use it - } else if config.StaticHandler != nil && config.SwaggerPath != "" { - swaggerPathSlash := config.SwaggerPath - // path must end with slash / - if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] { - LogInfo("[restful/swagger] use corrected SwaggerFilePath ; must end with slash (/)") - swaggerPathSlash += "/" - - } - LogInfo("[restful/swagger] %v%v is mapped to custom Handler %T", config.WebServicesUrl, swaggerPathSlash, config.StaticHandler) - wsContainer.Handle(swaggerPathSlash, config.StaticHandler) - - } else { - LogInfo("[restful/swagger] Swagger(File)Path is empty ; no UI is served") - } -} - -func staticPathFromRoute(r restful.Route) string { - static := r.Path - bracket := strings.Index(static, "{") - if bracket <= 1 { // result cannot be empty - return static - } - if bracket != -1 { - static = r.Path[:bracket] - } - if strings.HasSuffix(static, "/") { - return static[:len(static)-1] - } else { - return static - } -} - -func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - if origin := req.HeaderParameter(restful.HEADER_Origin); origin != "" { - // prevent duplicate header - if len(resp.Header().Get(restful.HEADER_AccessControlAllowOrigin)) == 0 { - resp.AddHeader(restful.HEADER_AccessControlAllowOrigin, origin) - } - } - chain.ProcessFilter(req, resp) -} - -func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) { - listing := sws.produceListing() - resp.WriteAsJson(listing) -} - -func (sws SwaggerService) produceListing() ResourceListing { - listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion, Info: sws.config.Info} - sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) { - ref := Resource{Path: k} - if len(v.Apis) > 0 { // use description of first (could still be empty) - ref.Description = v.Apis[0].Description - } - listing.Apis = append(listing.Apis, ref) - }) - return listing -} - -func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) { - decl, ok := sws.produceDeclarations(composeRootPath(req)) - if !ok { - resp.WriteErrorString(http.StatusNotFound, "ApiDeclaration not found") - return - } - // unless WebServicesUrl is given - if len(sws.config.WebServicesUrl) == 0 { - // update base path from the actual request - // TODO how to detect https? assume http for now - var host string - // X-Forwarded-Host or Host or Request.Host - hostvalues, ok := req.Request.Header["X-Forwarded-Host"] // apache specific? - if !ok || len(hostvalues) == 0 { - forwarded, ok := req.Request.Header["Host"] // without reverse-proxy - if !ok || len(forwarded) == 0 { - // fallback to Host field - host = req.Request.Host - } else { - host = forwarded[0] - } - } else { - host = hostvalues[0] - } - // inspect Referer for the scheme (http vs https) - scheme := "http" - if referer := req.Request.Header["Referer"]; len(referer) > 0 { - if strings.HasPrefix(referer[0], "https") { - scheme = "https" - } - } - decl.BasePath = fmt.Sprintf("%s://%s", scheme, host) - } - resp.WriteAsJson(decl) -} - -func (sws SwaggerService) produceAllDeclarations() map[string]ApiDeclaration { - decls := map[string]ApiDeclaration{} - sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) { - decls[k] = v - }) - return decls -} - -func (sws SwaggerService) produceDeclarations(route string) (*ApiDeclaration, bool) { - decl, ok := sws.apiDeclarationMap.At(route) - if !ok { - return nil, false - } - decl.BasePath = sws.config.WebServicesUrl - return &decl, true -} - -// composeDeclaration uses all routes and parameters to create a ApiDeclaration -func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix string) ApiDeclaration { - decl := ApiDeclaration{ - SwaggerVersion: swaggerVersion, - BasePath: sws.config.WebServicesUrl, - ResourcePath: pathPrefix, - Models: ModelList{}, - ApiVersion: ws.Version()} - - // collect any path parameters - rootParams := []Parameter{} - for _, param := range ws.PathParameters() { - rootParams = append(rootParams, asSwaggerParameter(param.Data())) - } - // aggregate by path - pathToRoutes := newOrderedRouteMap() - for _, other := range ws.Routes() { - if strings.HasPrefix(other.Path, pathPrefix) { - if len(pathPrefix) > 1 && len(other.Path) > len(pathPrefix) && other.Path[len(pathPrefix)] != '/' { - continue - } - pathToRoutes.Add(other.Path, other) - } - } - pathToRoutes.Do(func(path string, routes []restful.Route) { - api := Api{Path: strings.TrimSuffix(withoutWildcard(path), "/"), Description: ws.Documentation()} - voidString := "void" - for _, route := range routes { - operation := Operation{ - Method: route.Method, - Summary: route.Doc, - Notes: route.Notes, - // Type gets overwritten if there is a write sample - DataTypeFields: DataTypeFields{Type: &voidString}, - Parameters: []Parameter{}, - Nickname: route.Operation, - ResponseMessages: composeResponseMessages(route, &decl, &sws.config)} - - operation.Consumes = route.Consumes - operation.Produces = route.Produces - - // share root params if any - for _, swparam := range rootParams { - operation.Parameters = append(operation.Parameters, swparam) - } - // route specific params - for _, param := range route.ParameterDocs { - operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data())) - } - - sws.addModelsFromRouteTo(&operation, route, &decl) - api.Operations = append(api.Operations, operation) - } - decl.Apis = append(decl.Apis, api) - }) - return decl -} - -func withoutWildcard(path string) string { - if strings.HasSuffix(path, ":*}") { - return path[0:len(path)-3] + "}" - } - return path -} - -// composeResponseMessages takes the ResponseErrors (if any) and creates ResponseMessages from them. -func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config *Config) (messages []ResponseMessage) { - if route.ResponseErrors == nil { - return messages - } - // sort by code - codes := sort.IntSlice{} - for code := range route.ResponseErrors { - codes = append(codes, code) - } - codes.Sort() - for _, code := range codes { - each := route.ResponseErrors[code] - message := ResponseMessage{ - Code: code, - Message: each.Message, - } - if each.Model != nil { - st := reflect.TypeOf(each.Model) - isCollection, st := detectCollectionType(st) - // collection cannot be in responsemodel - if !isCollection { - modelName := modelBuilder{}.keyFrom(st) - modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "") - message.ResponseModel = modelName - } - } - messages = append(messages, message) - } - return -} - -// addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it. -func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) { - if route.ReadSample != nil { - sws.addModelFromSampleTo(operation, false, route.ReadSample, &decl.Models) - } - if route.WriteSample != nil { - sws.addModelFromSampleTo(operation, true, route.WriteSample, &decl.Models) - } -} - -func detectCollectionType(st reflect.Type) (bool, reflect.Type) { - isCollection := false - if st.Kind() == reflect.Slice || st.Kind() == reflect.Array { - st = st.Elem() - isCollection = true - } else { - if st.Kind() == reflect.Ptr { - if st.Elem().Kind() == reflect.Slice || st.Elem().Kind() == reflect.Array { - st = st.Elem().Elem() - isCollection = true - } - } - } - return isCollection, st -} - -// addModelFromSample creates and adds (or overwrites) a Model from a sample resource -func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) { - mb := modelBuilder{Models: models, Config: &sws.config} - if isResponse { - sampleType, items := asDataType(sample, &sws.config) - operation.Type = sampleType - operation.Items = items - } - mb.addModelFrom(sample) -} - -func asSwaggerParameter(param restful.ParameterData) Parameter { - return Parameter{ - DataTypeFields: DataTypeFields{ - Type: ¶m.DataType, - Format: asFormat(param.DataType, param.DataFormat), - DefaultValue: Special(param.DefaultValue), - }, - Name: param.Name, - Description: param.Description, - ParamType: asParamType(param.Kind), - - Required: param.Required} -} - -// Between 1..7 path parameters is supported -func composeRootPath(req *restful.Request) string { - path := "/" + req.PathParameter("a") - b := req.PathParameter("b") - if b == "" { - return path - } - path = path + "/" + b - c := req.PathParameter("c") - if c == "" { - return path - } - path = path + "/" + c - d := req.PathParameter("d") - if d == "" { - return path - } - path = path + "/" + d - e := req.PathParameter("e") - if e == "" { - return path - } - path = path + "/" + e - f := req.PathParameter("f") - if f == "" { - return path - } - path = path + "/" + f - g := req.PathParameter("g") - if g == "" { - return path - } - return path + "/" + g -} - -func asFormat(dataType string, dataFormat string) string { - if dataFormat != "" { - return dataFormat - } - return "" // TODO -} - -func asParamType(kind int) string { - switch { - case kind == restful.PathParameterKind: - return "path" - case kind == restful.QueryParameterKind: - return "query" - case kind == restful.BodyParameterKind: - return "body" - case kind == restful.HeaderParameterKind: - return "header" - case kind == restful.FormParameterKind: - return "form" - } - return "" -} - -func asDataType(any interface{}, config *Config) (*string, *Item) { - // If it's not a collection, return the suggested model name - st := reflect.TypeOf(any) - isCollection, st := detectCollectionType(st) - modelName := modelBuilder{}.keyFrom(st) - // if it's not a collection we are done - if !isCollection { - return &modelName, nil - } - - // XXX: This is not very elegant - // We create an Item object referring to the given model - models := ModelList{} - mb := modelBuilder{Models: &models, Config: config} - mb.addModelFrom(any) - - elemTypeName := mb.getElementTypeName(modelName, "", st) - item := new(Item) - if mb.isPrimitiveType(elemTypeName) { - mapped := mb.jsonSchemaType(elemTypeName) - item.Type = &mapped - } else { - item.Ref = &elemTypeName - } - tmp := "array" - return &tmp, item -} diff --git a/vendor/github.com/exponent-io/jsonpath/LICENSE b/vendor/github.com/exponent-io/jsonpath/LICENSE deleted file mode 100644 index 54197725..00000000 --- a/vendor/github.com/exponent-io/jsonpath/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Exponent Labs LLC - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/exponent-io/jsonpath/README.md b/vendor/github.com/exponent-io/jsonpath/README.md deleted file mode 100644 index 382fb313..00000000 --- a/vendor/github.com/exponent-io/jsonpath/README.md +++ /dev/null @@ -1,66 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/exponent-io/jsonpath?status.svg)](https://godoc.org/github.com/exponent-io/jsonpath) -[![Build Status](https://travis-ci.org/exponent-io/jsonpath.svg?branch=master)](https://travis-ci.org/exponent-io/jsonpath) - -# jsonpath - -This package extends the [json.Decoder](https://golang.org/pkg/encoding/json/#Decoder) to support navigating a stream of JSON tokens. You should be able to use this extended Decoder places where a json.Decoder would have been used. - -This Decoder has the following enhancements... - * The [Scan](https://godoc.org/github.com/exponent-io/jsonpath/#Decoder.Scan) method supports scanning a JSON stream while extracting particular values along the way using [PathActions](https://godoc.org/github.com/exponent-io/jsonpath#PathActions). - * The [SeekTo](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.SeekTo) method supports seeking forward in a JSON token stream to a particular path. - * The [Path](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Path) method returns the path of the most recently parsed token. - * The [Token](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Token) method has been modified to distinguish between strings that are object keys and strings that are values. Object key strings are returned as the [KeyString](https://godoc.org/github.com/exponent-io/jsonpath#KeyString) type rather than a native string. - -## Installation - - go get -u github.com/exponent-io/jsonpath - -## Example Usage - -#### SeekTo - -```go -import "github.com/exponent-io/jsonpath" - -var j = []byte(`[ - {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}}, - {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}} -]`) - -w := json.NewDecoder(bytes.NewReader(j)) -var v interface{} - -w.SeekTo(1, "Point", "G") -w.Decode(&v) // v is 218 -``` - -#### Scan with PathActions - -```go -var j = []byte(`{"colors":[ - {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10, "A": 58}}, - {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255, "A": 231}} -]}`) - -var actions PathActions - -// Extract the value at Point.A -actions.Add(func(d *Decoder) error { - var alpha int - err := d.Decode(&alpha) - fmt.Printf("Alpha: %v\n", alpha) - return err -}, "Point", "A") - -w := NewDecoder(bytes.NewReader(j)) -w.SeekTo("colors", 0) - -var ok = true -var err error -for ok { - ok, err = w.Scan(&actions) - if err != nil && err != io.EOF { - panic(err) - } -} -``` diff --git a/vendor/github.com/exponent-io/jsonpath/decoder.go b/vendor/github.com/exponent-io/jsonpath/decoder.go deleted file mode 100644 index 31de46c7..00000000 --- a/vendor/github.com/exponent-io/jsonpath/decoder.go +++ /dev/null @@ -1,210 +0,0 @@ -package jsonpath - -import ( - "encoding/json" - "io" -) - -// KeyString is returned from Decoder.Token to represent each key in a JSON object value. -type KeyString string - -// Decoder extends the Go runtime's encoding/json.Decoder to support navigating in a stream of JSON tokens. -type Decoder struct { - json.Decoder - - path JsonPath - context jsonContext -} - -// NewDecoder creates a new instance of the extended JSON Decoder. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{Decoder: *json.NewDecoder(r)} -} - -// SeekTo causes the Decoder to move forward to a given path in the JSON structure. -// -// The path argument must consist of strings or integers. Each string specifies an JSON object key, and -// each integer specifies an index into a JSON array. -// -// Consider the JSON structure -// -// { "a": [0,"s",12e4,{"b":0,"v":35} ] } -// -// SeekTo("a",3,"v") will move to the value referenced by the "a" key in the current object, -// followed by a move to the 4th value (index 3) in the array, followed by a move to the value at key "v". -// In this example, a subsequent call to the decoder's Decode() would unmarshal the value 35. -// -// SeekTo returns a boolean value indicating whether a match was found. -// -// Decoder is intended to be used with a stream of tokens. As a result it navigates forward only. -func (d *Decoder) SeekTo(path ...interface{}) (bool, error) { - - if len(path) == 0 { - return len(d.path) == 0, nil - } - last := len(path) - 1 - if i, ok := path[last].(int); ok { - path[last] = i - 1 - } - - for { - if d.path.Equal(path) { - return true, nil - } - _, err := d.Token() - if err == io.EOF { - return false, nil - } else if err != nil { - return false, err - } - } -} - -// Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. This is -// equivalent to encoding/json.Decode(). -func (d *Decoder) Decode(v interface{}) error { - switch d.context { - case objValue: - d.context = objKey - break - case arrValue: - d.path.incTop() - break - } - return d.Decoder.Decode(v) -} - -// Path returns a slice of string and/or int values representing the path from the root of the JSON object to the -// position of the most-recently parsed token. -func (d *Decoder) Path() JsonPath { - p := make(JsonPath, len(d.path)) - copy(p, d.path) - return p -} - -// Token is equivalent to the Token() method on json.Decoder. The primary difference is that it distinguishes -// between strings that are keys and and strings that are values. String tokens that are object keys are returned as a -// KeyString rather than as a native string. -func (d *Decoder) Token() (json.Token, error) { - t, err := d.Decoder.Token() - if err != nil { - return t, err - } - - if t == nil { - switch d.context { - case objValue: - d.context = objKey - break - case arrValue: - d.path.incTop() - break - } - return t, err - } - - switch t := t.(type) { - case json.Delim: - switch t { - case json.Delim('{'): - if d.context == arrValue { - d.path.incTop() - } - d.path.push("") - d.context = objKey - break - case json.Delim('}'): - d.path.pop() - d.context = d.path.inferContext() - break - case json.Delim('['): - if d.context == arrValue { - d.path.incTop() - } - d.path.push(-1) - d.context = arrValue - break - case json.Delim(']'): - d.path.pop() - d.context = d.path.inferContext() - break - } - case float64, json.Number, bool: - switch d.context { - case objValue: - d.context = objKey - break - case arrValue: - d.path.incTop() - break - } - break - case string: - switch d.context { - case objKey: - d.path.nameTop(t) - d.context = objValue - return KeyString(t), err - case objValue: - d.context = objKey - case arrValue: - d.path.incTop() - } - break - } - - return t, err -} - -// Scan moves forward over the JSON stream consuming all the tokens at the current level (current object, current array) -// invoking each matching PathAction along the way. -// -// Scan returns true if there are more contiguous values to scan (for example in an array). -func (d *Decoder) Scan(ext *PathActions) (bool, error) { - - rootPath := d.Path() - - // If this is an array path, increment the root path in our local copy. - if rootPath.inferContext() == arrValue { - rootPath.incTop() - } - - for { - // advance the token position - _, err := d.Token() - if err != nil { - return false, err - } - - match: - var relPath JsonPath - - // capture the new JSON path - path := d.Path() - - if len(path) > len(rootPath) { - // capture the path relative to where the scan started - relPath = path[len(rootPath):] - } else { - // if the path is not longer than the root, then we are done with this scan - // return boolean flag indicating if there are more items to scan at the same level - return d.Decoder.More(), nil - } - - // match the relative path against the path actions - if node := ext.node.match(relPath); node != nil { - if node.action != nil { - // we have a match so execute the action - err = node.action(d) - if err != nil { - return d.Decoder.More(), err - } - // The action may have advanced the decoder. If we are in an array, advancing it further would - // skip tokens. So, if we are scanning an array, jump to the top without advancing the token. - if d.path.inferContext() == arrValue && d.Decoder.More() { - goto match - } - } - } - } -} diff --git a/vendor/github.com/exponent-io/jsonpath/path.go b/vendor/github.com/exponent-io/jsonpath/path.go deleted file mode 100644 index d7db2ad3..00000000 --- a/vendor/github.com/exponent-io/jsonpath/path.go +++ /dev/null @@ -1,67 +0,0 @@ -// Extends the Go runtime's json.Decoder enabling navigation of a stream of json tokens. -package jsonpath - -import "fmt" - -type jsonContext int - -const ( - none jsonContext = iota - objKey - objValue - arrValue -) - -// AnyIndex can be used in a pattern to match any array index. -const AnyIndex = -2 - -// JsonPath is a slice of strings and/or integers. Each string specifies an JSON object key, and -// each integer specifies an index into a JSON array. -type JsonPath []interface{} - -func (p *JsonPath) push(n interface{}) { *p = append(*p, n) } -func (p *JsonPath) pop() { *p = (*p)[:len(*p)-1] } - -// increment the index at the top of the stack (must be an array index) -func (p *JsonPath) incTop() { (*p)[len(*p)-1] = (*p)[len(*p)-1].(int) + 1 } - -// name the key at the top of the stack (must be an object key) -func (p *JsonPath) nameTop(n string) { (*p)[len(*p)-1] = n } - -// infer the context from the item at the top of the stack -func (p *JsonPath) inferContext() jsonContext { - if len(*p) == 0 { - return none - } - t := (*p)[len(*p)-1] - switch t.(type) { - case string: - return objKey - case int: - return arrValue - default: - panic(fmt.Sprintf("Invalid stack type %T", t)) - } -} - -// Equal tests for equality between two JsonPath types. -func (p *JsonPath) Equal(o JsonPath) bool { - if len(*p) != len(o) { - return false - } - for i, v := range *p { - if v != o[i] { - return false - } - } - return true -} - -func (p *JsonPath) HasPrefix(o JsonPath) bool { - for i, v := range o { - if v != (*p)[i] { - return false - } - } - return true -} diff --git a/vendor/github.com/exponent-io/jsonpath/pathaction.go b/vendor/github.com/exponent-io/jsonpath/pathaction.go deleted file mode 100644 index 497ed686..00000000 --- a/vendor/github.com/exponent-io/jsonpath/pathaction.go +++ /dev/null @@ -1,61 +0,0 @@ -package jsonpath - -// pathNode is used to construct a trie of paths to be matched -type pathNode struct { - matchOn interface{} // string, or integer - childNodes []pathNode - action DecodeAction -} - -// match climbs the trie to find a node that matches the given JSON path. -func (n *pathNode) match(path JsonPath) *pathNode { - var node *pathNode = n - for _, ps := range path { - found := false - for i, n := range node.childNodes { - if n.matchOn == ps { - node = &node.childNodes[i] - found = true - break - } else if _, ok := ps.(int); ok && n.matchOn == AnyIndex { - node = &node.childNodes[i] - found = true - break - } - } - if !found { - return nil - } - } - return node -} - -// PathActions represents a collection of DecodeAction functions that should be called at certain path positions -// when scanning the JSON stream. PathActions can be created once and used many times in one or more JSON streams. -type PathActions struct { - node pathNode -} - -// DecodeAction handlers are called by the Decoder when scanning objects. See PathActions.Add for more detail. -type DecodeAction func(d *Decoder) error - -// Add specifies an action to call on the Decoder when the specified path is encountered. -func (je *PathActions) Add(action DecodeAction, path ...interface{}) { - - var node *pathNode = &je.node - for _, ps := range path { - found := false - for i, n := range node.childNodes { - if n.matchOn == ps { - node = &node.childNodes[i] - found = true - break - } - } - if !found { - node.childNodes = append(node.childNodes, pathNode{matchOn: ps}) - node = &node.childNodes[len(node.childNodes)-1] - } - } - node.action = action -} diff --git a/vendor/github.com/fatih/camelcase/LICENSE.md b/vendor/github.com/fatih/camelcase/LICENSE.md deleted file mode 100644 index aa4a536c..00000000 --- a/vendor/github.com/fatih/camelcase/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Fatih Arslan - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/camelcase/README.md b/vendor/github.com/fatih/camelcase/README.md deleted file mode 100644 index 105a6ae3..00000000 --- a/vendor/github.com/fatih/camelcase/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# CamelCase [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/camelcase) [![Build Status](http://img.shields.io/travis/fatih/camelcase.svg?style=flat-square)](https://travis-ci.org/fatih/camelcase) - -CamelCase is a Golang (Go) package to split the words of a camelcase type -string into a slice of words. It can be used to convert a camelcase word (lower -or upper case) into any type of word. - -## Splitting rules: - -1. If string is not valid UTF-8, return it without splitting as - single item array. -2. Assign all unicode characters into one of 4 sets: lower case - letters, upper case letters, numbers, and all other characters. -3. Iterate through characters of string, introducing splits - between adjacent characters that belong to different sets. -4. Iterate through array of split strings, and if a given string - is upper case: - * if subsequent string is lower case: - * move last character of upper case string to beginning of - lower case string - -## Install - -```bash -go get github.com/fatih/camelcase -``` - -## Usage and examples - -```go -splitted := camelcase.Split("GolangPackage") - -fmt.Println(splitted[0], splitted[1]) // prints: "Golang", "Package" -``` - -Both lower camel case and upper camel case are supported. For more info please -check: [http://en.wikipedia.org/wiki/CamelCase](http://en.wikipedia.org/wiki/CamelCase) - -Below are some example cases: - -``` -"" => [] -"lowercase" => ["lowercase"] -"Class" => ["Class"] -"MyClass" => ["My", "Class"] -"MyC" => ["My", "C"] -"HTML" => ["HTML"] -"PDFLoader" => ["PDF", "Loader"] -"AString" => ["A", "String"] -"SimpleXMLParser" => ["Simple", "XML", "Parser"] -"vimRPCPlugin" => ["vim", "RPC", "Plugin"] -"GL11Version" => ["GL", "11", "Version"] -"99Bottles" => ["99", "Bottles"] -"May5" => ["May", "5"] -"BFG9000" => ["BFG", "9000"] -"BöseÜberraschung" => ["Böse", "Überraschung"] -"Two spaces" => ["Two", " ", "spaces"] -"BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"] -``` diff --git a/vendor/github.com/fatih/camelcase/camelcase.go b/vendor/github.com/fatih/camelcase/camelcase.go deleted file mode 100644 index 02160c9a..00000000 --- a/vendor/github.com/fatih/camelcase/camelcase.go +++ /dev/null @@ -1,90 +0,0 @@ -// Package camelcase is a micro package to split the words of a camelcase type -// string into a slice of words. -package camelcase - -import ( - "unicode" - "unicode/utf8" -) - -// Split splits the camelcase word and returns a list of words. It also -// supports digits. Both lower camel case and upper camel case are supported. -// For more info please check: http://en.wikipedia.org/wiki/CamelCase -// -// Examples -// -// "" => [""] -// "lowercase" => ["lowercase"] -// "Class" => ["Class"] -// "MyClass" => ["My", "Class"] -// "MyC" => ["My", "C"] -// "HTML" => ["HTML"] -// "PDFLoader" => ["PDF", "Loader"] -// "AString" => ["A", "String"] -// "SimpleXMLParser" => ["Simple", "XML", "Parser"] -// "vimRPCPlugin" => ["vim", "RPC", "Plugin"] -// "GL11Version" => ["GL", "11", "Version"] -// "99Bottles" => ["99", "Bottles"] -// "May5" => ["May", "5"] -// "BFG9000" => ["BFG", "9000"] -// "BöseÜberraschung" => ["Böse", "Überraschung"] -// "Two spaces" => ["Two", " ", "spaces"] -// "BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"] -// -// Splitting rules -// -// 1) If string is not valid UTF-8, return it without splitting as -// single item array. -// 2) Assign all unicode characters into one of 4 sets: lower case -// letters, upper case letters, numbers, and all other characters. -// 3) Iterate through characters of string, introducing splits -// between adjacent characters that belong to different sets. -// 4) Iterate through array of split strings, and if a given string -// is upper case: -// if subsequent string is lower case: -// move last character of upper case string to beginning of -// lower case string -func Split(src string) (entries []string) { - // don't split invalid utf8 - if !utf8.ValidString(src) { - return []string{src} - } - entries = []string{} - var runes [][]rune - lastClass := 0 - class := 0 - // split into fields based on class of unicode character - for _, r := range src { - switch true { - case unicode.IsLower(r): - class = 1 - case unicode.IsUpper(r): - class = 2 - case unicode.IsDigit(r): - class = 3 - default: - class = 4 - } - if class == lastClass { - runes[len(runes)-1] = append(runes[len(runes)-1], r) - } else { - runes = append(runes, []rune{r}) - } - lastClass = class - } - // handle upper case -> lower case sequences, e.g. - // "PDFL", "oader" -> "PDF", "Loader" - for i := 0; i < len(runes)-1; i++ { - if unicode.IsUpper(runes[i][0]) && unicode.IsLower(runes[i+1][0]) { - runes[i+1] = append([]rune{runes[i][len(runes[i])-1]}, runes[i+1]...) - runes[i] = runes[i][:len(runes[i])-1] - } - } - // construct []string from results - for _, s := range runes { - if len(s) > 0 { - entries = append(entries, string(s)) - } - } - return -} diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md deleted file mode 100644 index d675c49d..00000000 --- a/vendor/github.com/go-openapi/analysis/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# OpenAPI initiative analysis [![Build Status](https://ci.vmware.run/api/badges/go-openapi/analysis/status.svg)](https://ci.vmware.run/go-openapi/analysis) [![Coverage](https://coverage.vmware.run/badges/go-openapi/analysis/coverage.svg)](https://coverage.vmware.run/go-openapi/analysis) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/analysis?status.svg)](http://godoc.org/github.com/go-openapi/analysis) - - -A foundational library to analyze an OAI specification document for easier reasoning about the content. \ No newline at end of file diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go deleted file mode 100644 index d388db3a..00000000 --- a/vendor/github.com/go-openapi/analysis/analyzer.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - slashpath "path" - "strconv" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -type referenceAnalysis struct { - schemas map[string]spec.Ref - responses map[string]spec.Ref - parameters map[string]spec.Ref - items map[string]spec.Ref - allRefs map[string]spec.Ref - referenced struct { - schemas map[string]SchemaRef - responses map[string]*spec.Response - parameters map[string]*spec.Parameter - } -} - -func (r *referenceAnalysis) addRef(key string, ref spec.Ref) { - r.allRefs["#"+key] = ref -} - -func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items) { - r.items["#"+key] = items.Ref - r.addRef(key, items.Ref) -} - -func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) { - r.schemas["#"+key] = ref.Schema.Ref - r.addRef(key, ref.Schema.Ref) -} - -func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) { - r.responses["#"+key] = resp.Ref - r.addRef(key, resp.Ref) -} - -func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) { - r.parameters["#"+key] = param.Ref - r.addRef(key, param.Ref) -} - -// New takes a swagger spec object and returns an analyzed spec document. -// The analyzed document contains a number of indices that make it easier to -// reason about semantics of a swagger specification for use in code generation -// or validation etc. -func New(doc *spec.Swagger) *Spec { - a := &Spec{ - spec: doc, - consumes: make(map[string]struct{}, 150), - produces: make(map[string]struct{}, 150), - authSchemes: make(map[string]struct{}, 150), - operations: make(map[string]map[string]*spec.Operation, 150), - allSchemas: make(map[string]SchemaRef, 150), - allOfs: make(map[string]SchemaRef, 150), - references: referenceAnalysis{ - schemas: make(map[string]spec.Ref, 150), - responses: make(map[string]spec.Ref, 150), - parameters: make(map[string]spec.Ref, 150), - items: make(map[string]spec.Ref, 150), - allRefs: make(map[string]spec.Ref, 150), - }, - } - a.references.referenced.schemas = make(map[string]SchemaRef, 150) - a.references.referenced.responses = make(map[string]*spec.Response, 150) - a.references.referenced.parameters = make(map[string]*spec.Parameter, 150) - a.initialize() - return a -} - -// Spec takes a swagger spec object and turns it into a registry -// with a bunch of utility methods to act on the information in the spec -type Spec struct { - spec *spec.Swagger - consumes map[string]struct{} - produces map[string]struct{} - authSchemes map[string]struct{} - operations map[string]map[string]*spec.Operation - references referenceAnalysis - allSchemas map[string]SchemaRef - allOfs map[string]SchemaRef -} - -func (s *Spec) initialize() { - for _, c := range s.spec.Consumes { - s.consumes[c] = struct{}{} - } - for _, c := range s.spec.Produces { - s.produces[c] = struct{}{} - } - for _, ss := range s.spec.Security { - for k := range ss { - s.authSchemes[k] = struct{}{} - } - } - for path, pathItem := range s.AllPaths() { - s.analyzeOperations(path, &pathItem) - } - - for name, parameter := range s.spec.Parameters { - refPref := slashpath.Join("/parameters", jsonpointer.Escape(name)) - if parameter.Items != nil { - s.analyzeItems("items", parameter.Items, refPref) - } - if parameter.In == "body" && parameter.Schema != nil { - s.analyzeSchema("schema", *parameter.Schema, refPref) - } - } - - for name, response := range s.spec.Responses { - refPref := slashpath.Join("/responses", jsonpointer.Escape(name)) - for _, v := range response.Headers { - if v.Items != nil { - s.analyzeItems("items", v.Items, refPref) - } - } - if response.Schema != nil { - s.analyzeSchema("schema", *response.Schema, refPref) - } - } - - for name, schema := range s.spec.Definitions { - s.analyzeSchema(name, schema, "/definitions") - } - // TODO: after analyzing all things and flattening schemas etc - // resolve all the collected references to their final representations - // best put in a separate method because this could get expensive -} - -func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) { - // TODO: resolve refs here? - op := pi - s.analyzeOperation("GET", path, op.Get) - s.analyzeOperation("PUT", path, op.Put) - s.analyzeOperation("POST", path, op.Post) - s.analyzeOperation("PATCH", path, op.Patch) - s.analyzeOperation("DELETE", path, op.Delete) - s.analyzeOperation("HEAD", path, op.Head) - s.analyzeOperation("OPTIONS", path, op.Options) - for i, param := range op.Parameters { - refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i)) - if param.Ref.String() != "" { - s.references.addParamRef(refPref, ¶m) - } - if param.Items != nil { - s.analyzeItems("items", param.Items, refPref) - } - if param.Schema != nil { - s.analyzeSchema("schema", *param.Schema, refPref) - } - } -} - -func (s *Spec) analyzeItems(name string, items *spec.Items, prefix string) { - if items == nil { - return - } - refPref := slashpath.Join(prefix, name) - s.analyzeItems(name, items.Items, refPref) - if items.Ref.String() != "" { - s.references.addItemsRef(refPref, items) - } -} - -func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) { - if op == nil { - return - } - - for _, c := range op.Consumes { - s.consumes[c] = struct{}{} - } - for _, c := range op.Produces { - s.produces[c] = struct{}{} - } - for _, ss := range op.Security { - for k := range ss { - s.authSchemes[k] = struct{}{} - } - } - if _, ok := s.operations[method]; !ok { - s.operations[method] = make(map[string]*spec.Operation) - } - s.operations[method][path] = op - prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method)) - for i, param := range op.Parameters { - refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i)) - if param.Ref.String() != "" { - s.references.addParamRef(refPref, ¶m) - } - s.analyzeItems("items", param.Items, refPref) - if param.In == "body" && param.Schema != nil { - s.analyzeSchema("schema", *param.Schema, refPref) - } - } - if op.Responses != nil { - if op.Responses.Default != nil { - refPref := slashpath.Join(prefix, "responses", "default") - if op.Responses.Default.Ref.String() != "" { - s.references.addResponseRef(refPref, op.Responses.Default) - } - for _, v := range op.Responses.Default.Headers { - s.analyzeItems("items", v.Items, refPref) - } - if op.Responses.Default.Schema != nil { - s.analyzeSchema("schema", *op.Responses.Default.Schema, refPref) - } - } - for k, res := range op.Responses.StatusCodeResponses { - refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k)) - if res.Ref.String() != "" { - s.references.addResponseRef(refPref, &res) - } - for _, v := range res.Headers { - s.analyzeItems("items", v.Items, refPref) - } - if res.Schema != nil { - s.analyzeSchema("schema", *res.Schema, refPref) - } - } - } -} - -func (s *Spec) analyzeSchema(name string, schema spec.Schema, prefix string) { - refURI := slashpath.Join(prefix, jsonpointer.Escape(name)) - schRef := SchemaRef{ - Name: name, - Schema: &schema, - Ref: spec.MustCreateRef("#" + refURI), - } - s.allSchemas["#"+refURI] = schRef - if schema.Ref.String() != "" { - s.references.addSchemaRef(refURI, schRef) - } - for k, v := range schema.Definitions { - s.analyzeSchema(k, v, slashpath.Join(refURI, "definitions")) - } - for k, v := range schema.Properties { - s.analyzeSchema(k, v, slashpath.Join(refURI, "properties")) - } - for k, v := range schema.PatternProperties { - s.analyzeSchema(k, v, slashpath.Join(refURI, "patternProperties")) - } - for i, v := range schema.AllOf { - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf")) - } - if len(schema.AllOf) > 0 { - s.allOfs["#"+refURI] = SchemaRef{Name: name, Schema: &schema, Ref: spec.MustCreateRef("#" + refURI)} - } - for i, v := range schema.AnyOf { - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf")) - } - for i, v := range schema.OneOf { - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf")) - } - if schema.Not != nil { - s.analyzeSchema("not", *schema.Not, refURI) - } - if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { - s.analyzeSchema("additionalProperties", *schema.AdditionalProperties.Schema, refURI) - } - if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { - s.analyzeSchema("additionalItems", *schema.AdditionalItems.Schema, refURI) - } - if schema.Items != nil { - if schema.Items.Schema != nil { - s.analyzeSchema("items", *schema.Items.Schema, refURI) - } - for i, sch := range schema.Items.Schemas { - s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items")) - } - } -} - -// SecurityRequirement is a representation of a security requirement for an operation -type SecurityRequirement struct { - Name string - Scopes []string -} - -// SecurityRequirementsFor gets the security requirements for the operation -func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) []SecurityRequirement { - if s.spec.Security == nil && operation.Security == nil { - return nil - } - - schemes := s.spec.Security - if operation.Security != nil { - schemes = operation.Security - } - - unique := make(map[string]SecurityRequirement) - for _, scheme := range schemes { - for k, v := range scheme { - if _, ok := unique[k]; !ok { - unique[k] = SecurityRequirement{Name: k, Scopes: v} - } - } - } - - var result []SecurityRequirement - for _, v := range unique { - result = append(result, v) - } - return result -} - -// SecurityDefinitionsFor gets the matching security definitions for a set of requirements -func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme { - requirements := s.SecurityRequirementsFor(operation) - if len(requirements) == 0 { - return nil - } - result := make(map[string]spec.SecurityScheme) - for _, v := range requirements { - if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { - if definition != nil { - result[v.Name] = *definition - } - } - } - return result -} - -// ConsumesFor gets the mediatypes for the operation -func (s *Spec) ConsumesFor(operation *spec.Operation) []string { - - if len(operation.Consumes) == 0 { - cons := make(map[string]struct{}, len(s.spec.Consumes)) - for _, k := range s.spec.Consumes { - cons[k] = struct{}{} - } - return s.structMapKeys(cons) - } - - cons := make(map[string]struct{}, len(operation.Consumes)) - for _, c := range operation.Consumes { - cons[c] = struct{}{} - } - return s.structMapKeys(cons) -} - -// ProducesFor gets the mediatypes for the operation -func (s *Spec) ProducesFor(operation *spec.Operation) []string { - if len(operation.Produces) == 0 { - prod := make(map[string]struct{}, len(s.spec.Produces)) - for _, k := range s.spec.Produces { - prod[k] = struct{}{} - } - return s.structMapKeys(prod) - } - - prod := make(map[string]struct{}, len(operation.Produces)) - for _, c := range operation.Produces { - prod[c] = struct{}{} - } - return s.structMapKeys(prod) -} - -func mapKeyFromParam(param *spec.Parameter) string { - return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param)) -} - -func fieldNameFromParam(param *spec.Parameter) string { - if nm, ok := param.Extensions.GetString("go-name"); ok { - return nm - } - return swag.ToGoName(param.Name) -} - -func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter) { - for _, param := range parameters { - pr := param - if pr.Ref.String() != "" { - obj, _, err := pr.Ref.GetPointer().Get(s.spec) - if err != nil { - panic(err) - } - pr = obj.(spec.Parameter) - } - res[mapKeyFromParam(&pr)] = pr - } -} - -// ParametersFor the specified operation id -func (s *Spec) ParametersFor(operationID string) []spec.Parameter { - gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { - bag := make(map[string]spec.Parameter) - s.paramsAsMap(pi.Parameters, bag) - s.paramsAsMap(op.Parameters, bag) - - var res []spec.Parameter - for _, v := range bag { - res = append(res, v) - } - return res - } - for _, pi := range s.spec.Paths.Paths { - if pi.Get != nil && pi.Get.ID == operationID { - return gatherParams(&pi, pi.Get) - } - if pi.Head != nil && pi.Head.ID == operationID { - return gatherParams(&pi, pi.Head) - } - if pi.Options != nil && pi.Options.ID == operationID { - return gatherParams(&pi, pi.Options) - } - if pi.Post != nil && pi.Post.ID == operationID { - return gatherParams(&pi, pi.Post) - } - if pi.Patch != nil && pi.Patch.ID == operationID { - return gatherParams(&pi, pi.Patch) - } - if pi.Put != nil && pi.Put.ID == operationID { - return gatherParams(&pi, pi.Put) - } - if pi.Delete != nil && pi.Delete.ID == operationID { - return gatherParams(&pi, pi.Delete) - } - } - return nil -} - -// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that -// apply for the method and path. -func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { - res := make(map[string]spec.Parameter) - if pi, ok := s.spec.Paths.Paths[path]; ok { - s.paramsAsMap(pi.Parameters, res) - s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res) - } - return res -} - -// OperationForName gets the operation for the given id -func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) { - for method, pathItem := range s.operations { - for path, op := range pathItem { - if operationID == op.ID { - return method, path, op, true - } - } - } - return "", "", nil, false -} - -// OperationFor the given method and path -func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { - if mp, ok := s.operations[strings.ToUpper(method)]; ok { - op, fn := mp[path] - return op, fn - } - return nil, false -} - -// Operations gathers all the operations specified in the spec document -func (s *Spec) Operations() map[string]map[string]*spec.Operation { - return s.operations -} - -func (s *Spec) structMapKeys(mp map[string]struct{}) []string { - if len(mp) == 0 { - return nil - } - - result := make([]string, 0, len(mp)) - for k := range mp { - result = append(result, k) - } - return result -} - -// AllPaths returns all the paths in the swagger spec -func (s *Spec) AllPaths() map[string]spec.PathItem { - if s.spec == nil || s.spec.Paths == nil { - return nil - } - return s.spec.Paths.Paths -} - -// OperationIDs gets all the operation ids based on method an dpath -func (s *Spec) OperationIDs() []string { - if len(s.operations) == 0 { - return nil - } - result := make([]string, 0, len(s.operations)) - for method, v := range s.operations { - for p, o := range v { - if o.ID != "" { - result = append(result, o.ID) - } else { - result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) - } - } - } - return result -} - -// RequiredConsumes gets all the distinct consumes that are specified in the specification document -func (s *Spec) RequiredConsumes() []string { - return s.structMapKeys(s.consumes) -} - -// RequiredProduces gets all the distinct produces that are specified in the specification document -func (s *Spec) RequiredProduces() []string { - return s.structMapKeys(s.produces) -} - -// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec -func (s *Spec) RequiredSecuritySchemes() []string { - return s.structMapKeys(s.authSchemes) -} - -// SchemaRef is a reference to a schema -type SchemaRef struct { - Name string - Ref spec.Ref - Schema *spec.Schema -} - -// SchemasWithAllOf returns schema references to all schemas that are defined -// with an allOf key -func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { - for _, v := range s.allOfs { - result = append(result, v) - } - return -} - -// AllDefinitions returns schema references for all the definitions that were discovered -func (s *Spec) AllDefinitions() (result []SchemaRef) { - for _, v := range s.allSchemas { - result = append(result, v) - } - return -} - -// AllDefinitionReferences returns json refs for all the discovered schemas -func (s *Spec) AllDefinitionReferences() (result []string) { - for _, v := range s.references.schemas { - result = append(result, v.String()) - } - return -} - -// AllParameterReferences returns json refs for all the discovered parameters -func (s *Spec) AllParameterReferences() (result []string) { - for _, v := range s.references.parameters { - result = append(result, v.String()) - } - return -} - -// AllResponseReferences returns json refs for all the discovered responses -func (s *Spec) AllResponseReferences() (result []string) { - for _, v := range s.references.responses { - result = append(result, v.String()) - } - return -} - -// AllItemsReferences returns the references for all the items -func (s *Spec) AllItemsReferences() (result []string) { - for _, v := range s.references.items { - result = append(result, v.String()) - } - return -} - -// AllReferences returns all the references found in the document -func (s *Spec) AllReferences() (result []string) { - for _, v := range s.references.allRefs { - result = append(result, v.String()) - } - return -} - -// AllRefs returns all the unique references found in the document -func (s *Spec) AllRefs() (result []spec.Ref) { - set := make(map[string]struct{}) - for _, v := range s.references.allRefs { - a := v.String() - if a == "" { - continue - } - if _, ok := set[a]; !ok { - set[a] = struct{}{} - result = append(result, v) - } - } - return -} diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md deleted file mode 100644 index 9d5c8999..00000000 --- a/vendor/github.com/go-openapi/loads/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Loads OAI specs [![Build Status](https://ci.vmware.run/api/badges/go-openapi/loads/status.svg)](https://ci.vmware.run/go-openapi/loads) [![Coverage](https://coverage.vmware.run/badges/go-openapi/loads/coverage.svg)](https://coverage.vmware.run/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) - -Loading of OAI specification documents from local or remote locations. diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go deleted file mode 100644 index ff1ee1c9..00000000 --- a/vendor/github.com/go-openapi/loads/spec.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package loads - -import ( - "encoding/json" - "fmt" - "net/url" - - "github.com/go-openapi/analysis" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -// JSONDoc loads a json document from either a file or a remote url -func JSONDoc(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil -} - -// DocLoader represents a doc loader type -type DocLoader func(string) (json.RawMessage, error) - -// DocMatcher represents a predicate to check if a loader matches -type DocMatcher func(string) bool - -var loaders = &loader{Match: func(_ string) bool { return true }, Fn: JSONDoc} - -// AddLoader for a document -func AddLoader(predicate DocMatcher, load DocLoader) { - prev := loaders - loaders = &loader{ - Match: predicate, - Fn: load, - Next: prev, - } - -} - -type loader struct { - Fn DocLoader - Match DocMatcher - Next *loader -} - -// JSONSpec loads a spec from a json document -func JSONSpec(path string) (*Document, error) { - data, err := JSONDoc(path) - if err != nil { - return nil, err - } - // convert to json - return Analyzed(json.RawMessage(data), "") -} - -// Document represents a swagger spec document -type Document struct { - // specAnalyzer - Analyzer *analysis.Spec - spec *spec.Swagger - origSpec *spec.Swagger - schema *spec.Schema - raw json.RawMessage -} - -// Spec loads a new spec document -func Spec(path string) (*Document, error) { - specURL, err := url.Parse(path) - if err != nil { - return nil, err - } - for l := loaders.Next; l != nil; l = l.Next { - if loaders.Match(specURL.Path) { - b, err2 := loaders.Fn(path) - if err2 != nil { - return nil, err2 - } - return Analyzed(b, "") - } - } - b, err := loaders.Fn(path) - if err != nil { - return nil, err - } - return Analyzed(b, "") -} - -var swag20Schema = spec.MustLoadSwagger20Schema() - -// Analyzed creates a new analyzed spec document -func Analyzed(data json.RawMessage, version string) (*Document, error) { - if version == "" { - version = "2.0" - } - if version != "2.0" { - return nil, fmt.Errorf("spec version %q is not supported", version) - } - - swspec := new(spec.Swagger) - if err := json.Unmarshal(data, swspec); err != nil { - return nil, err - } - - origsqspec := new(spec.Swagger) - if err := json.Unmarshal(data, origsqspec); err != nil { - return nil, err - } - - d := &Document{ - Analyzer: analysis.New(swspec), - schema: swag20Schema, - spec: swspec, - raw: data, - origSpec: origsqspec, - } - return d, nil -} - -// Expanded expands the ref fields in the spec document and returns a new spec document -func (d *Document) Expanded() (*Document, error) { - swspec := new(spec.Swagger) - if err := json.Unmarshal(d.raw, swspec); err != nil { - return nil, err - } - if err := spec.ExpandSpec(swspec); err != nil { - return nil, err - } - - dd := &Document{ - Analyzer: analysis.New(swspec), - spec: swspec, - schema: swag20Schema, - raw: d.raw, - origSpec: d.origSpec, - } - return dd, nil -} - -// BasePath the base path for this spec -func (d *Document) BasePath() string { - return d.spec.BasePath -} - -// Version returns the version of this spec -func (d *Document) Version() string { - return d.spec.Swagger -} - -// Schema returns the swagger 2.0 schema -func (d *Document) Schema() *spec.Schema { - return d.schema -} - -// Spec returns the swagger spec object model -func (d *Document) Spec() *spec.Swagger { - return d.spec -} - -// Host returns the host for the API -func (d *Document) Host() string { - return d.spec.Host -} - -// Raw returns the raw swagger spec as json bytes -func (d *Document) Raw() json.RawMessage { - return d.raw -} - -func (d *Document) OrigSpec() *spec.Swagger { - return d.origSpec -} - -// ResetDefinitions gives a shallow copy with the models reset -func (d *Document) ResetDefinitions() *Document { - defs := make(map[string]spec.Schema, len(d.origSpec.Definitions)) - for k, v := range d.origSpec.Definitions { - defs[k] = v - } - - d.spec.Definitions = defs - return d -} - -// Pristine creates a new pristine document instance based on the input data -func (d *Document) Pristine() *Document { - dd, _ := Analyzed(d.Raw(), d.Version()) - return dd -} diff --git a/vendor/github.com/godbus/dbus/README.markdown b/vendor/github.com/godbus/dbus/README.markdown index 0a6e7e5b..d37f4e2e 100644 --- a/vendor/github.com/godbus/dbus/README.markdown +++ b/vendor/github.com/godbus/dbus/README.markdown @@ -1,3 +1,5 @@ +[![Build Status](https://travis-ci.org/godbus/dbus.svg?branch=master)](https://travis-ci.org/godbus/dbus) + dbus ---- @@ -29,6 +31,7 @@ gives a short overview over the basic usage. #### Projects using godbus - [notify](https://github.com/esiqveland/notify) provides desktop notifications over dbus into a library. +- [go-bluetooth](https://github.com/muka/go-bluetooth) provides a bluetooth client over bluez dbus API. Please note that the API is considered unstable for now and may change without further notice. diff --git a/vendor/github.com/godbus/dbus/conn.go b/vendor/github.com/godbus/dbus/conn.go index 9aa2e128..5720e2eb 100644 --- a/vendor/github.com/godbus/dbus/conn.go +++ b/vendor/github.com/godbus/dbus/conn.go @@ -9,8 +9,6 @@ import ( "sync" ) -const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket" - var ( systemBus *Conn systemBusLck sync.Mutex @@ -47,15 +45,13 @@ type Conn struct { calls map[uint32]*Call callsLck sync.RWMutex - handlers map[ObjectPath]map[string]exportedObj - handlersLck sync.RWMutex + handler Handler out chan *Message closed bool outLck sync.RWMutex - signals []chan<- *Signal - signalsLck sync.Mutex + signalHandler SignalHandler eavesdropped chan<- *Message eavesdroppedLck sync.Mutex @@ -90,16 +86,33 @@ func SessionBus() (conn *Conn, err error) { return } -// SessionBusPrivate returns a new private connection to the session bus. -func SessionBusPrivate() (*Conn, error) { +func getSessionBusAddress() (string, error) { sessionEnvLck.Lock() defer sessionEnvLck.Unlock() address := os.Getenv("DBUS_SESSION_BUS_ADDRESS") if address != "" && address != "autolaunch:" { - return Dial(address) + return address, nil + } + return getSessionBusPlatformAddress() +} + +// SessionBusPrivate returns a new private connection to the session bus. +func SessionBusPrivate() (*Conn, error) { + address, err := getSessionBusAddress() + if err != nil { + return nil, err } - return sessionBusPlatform() + return Dial(address) +} + +// SessionBusPrivate returns a new private connection to the session bus. +func SessionBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) { + address, err := getSessionBusAddress() + if err != nil { + return nil, err + } + return DialHandler(address, handler, signalHandler) } // SystemBus returns a shared connection to the system bus, connecting to it if @@ -133,11 +146,12 @@ func SystemBus() (conn *Conn, err error) { // SystemBusPrivate returns a new private connection to the system bus. func SystemBusPrivate() (*Conn, error) { - address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS") - if address != "" { - return Dial(address) - } - return Dial(defaultSystemBusAddress) + return Dial(getSystemBusPlatformAddress()) +} + +// SystemBusPrivateHandler returns a new private connection to the system bus, using the provided handlers. +func SystemBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) { + return DialHandler(getSystemBusPlatformAddress(), handler, signalHandler) } // Dial establishes a new private connection to the message bus specified by address. @@ -146,21 +160,36 @@ func Dial(address string) (*Conn, error) { if err != nil { return nil, err } - return newConn(tr) + return newConn(tr, NewDefaultHandler(), NewDefaultSignalHandler()) +} + +// DialHandler establishes a new private connection to the message bus specified by address, using the supplied handlers. +func DialHandler(address string, handler Handler, signalHandler SignalHandler) (*Conn, error) { + tr, err := getTransport(address) + if err != nil { + return nil, err + } + return newConn(tr, handler, signalHandler) } // NewConn creates a new private *Conn from an already established connection. func NewConn(conn io.ReadWriteCloser) (*Conn, error) { - return newConn(genericTransport{conn}) + return NewConnHandler(conn, NewDefaultHandler(), NewDefaultSignalHandler()) +} + +// NewConnHandler creates a new private *Conn from an already established connection, using the supplied handlers. +func NewConnHandler(conn io.ReadWriteCloser, handler Handler, signalHandler SignalHandler) (*Conn, error) { + return newConn(genericTransport{conn}, handler, signalHandler) } // newConn creates a new *Conn from a transport. -func newConn(tr transport) (*Conn, error) { +func newConn(tr transport, handler Handler, signalHandler SignalHandler) (*Conn, error) { conn := new(Conn) conn.transport = tr conn.calls = make(map[uint32]*Call) conn.out = make(chan *Message, 10) - conn.handlers = make(map[ObjectPath]map[string]exportedObj) + conn.handler = handler + conn.signalHandler = signalHandler conn.nextSerial = 1 conn.serialUsed = map[uint32]bool{0: true} conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus") @@ -188,16 +217,21 @@ func (conn *Conn) Close() error { close(conn.out) conn.closed = true conn.outLck.Unlock() - conn.signalsLck.Lock() - for _, ch := range conn.signals { - close(ch) + + if term, ok := conn.signalHandler.(Terminator); ok { + term.Terminate() } - conn.signalsLck.Unlock() + + if term, ok := conn.handler.(Terminator); ok { + term.Terminate() + } + conn.eavesdroppedLck.Lock() if conn.eavesdropped != nil { close(conn.eavesdropped) } conn.eavesdroppedLck.Unlock() + return conn.transport.Close() } @@ -334,17 +368,7 @@ func (conn *Conn) inWorker() { conn.namesLck.Unlock() } } - signal := &Signal{ - Sender: sender, - Path: msg.Headers[FieldPath].value.(ObjectPath), - Name: iface + "." + member, - Body: msg.Body, - } - conn.signalsLck.Lock() - for _, ch := range conn.signals { - ch <- signal - } - conn.signalsLck.Unlock() + conn.handleSignal(msg) case TypeMethodCall: go conn.handleCall(msg) } @@ -365,6 +389,21 @@ func (conn *Conn) inWorker() { } } +func (conn *Conn) handleSignal(msg *Message) { + iface := msg.Headers[FieldInterface].value.(string) + member := msg.Headers[FieldMember].value.(string) + // as per http://dbus.freedesktop.org/doc/dbus-specification.html , + // sender is optional for signals. + sender, _ := msg.Headers[FieldSender].value.(string) + signal := &Signal{ + Sender: sender, + Path: msg.Headers[FieldPath].value.(ObjectPath), + Name: iface + "." + member, + Body: msg.Body, + } + conn.signalHandler.DeliverSignal(iface, member, signal) +} + // Names returns the list of all names that are currently owned by this // connection. The slice is always at least one element long, the first element // being the unique name of the connection. @@ -455,7 +494,19 @@ func (conn *Conn) Send(msg *Message, ch chan *Call) *Call { // sendError creates an error message corresponding to the parameters and sends // it to conn.out. -func (conn *Conn) sendError(e Error, dest string, serial uint32) { +func (conn *Conn) sendError(err error, dest string, serial uint32) { + var e *Error + switch em := err.(type) { + case Error: + e = &em + case *Error: + e = em + case DBusError: + name, body := em.DBusError() + e = NewError(name, body) + default: + e = MakeFailedError(err) + } msg := new(Message) msg.Type = TypeError msg.serial = conn.getSerial() @@ -498,6 +549,14 @@ func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) { conn.outLck.RUnlock() } +func (conn *Conn) defaultSignalAction(fn func(h *defaultSignalHandler, ch chan<- *Signal), ch chan<- *Signal) { + if !isDefaultSignalHandler(conn.signalHandler) { + return + } + handler := conn.signalHandler.(*defaultSignalHandler) + fn(handler, ch) +} + // Signal registers the given channel to be passed all received signal messages. // The caller has to make sure that ch is sufficiently buffered; if a message // arrives when a write to c is not possible, it is discarded. @@ -508,22 +567,12 @@ func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) { // channel for eavesdropped messages, this channel receives all signals, and // none of the channels passed to Signal will receive any signals. func (conn *Conn) Signal(ch chan<- *Signal) { - conn.signalsLck.Lock() - conn.signals = append(conn.signals, ch) - conn.signalsLck.Unlock() + conn.defaultSignalAction((*defaultSignalHandler).addSignal, ch) } // RemoveSignal removes the given channel from the list of the registered channels. func (conn *Conn) RemoveSignal(ch chan<- *Signal) { - conn.signalsLck.Lock() - for i := len(conn.signals) - 1; i >= 0; i-- { - if ch == conn.signals[i] { - copy(conn.signals[i:], conn.signals[i+1:]) - conn.signals[len(conn.signals)-1] = nil - conn.signals = conn.signals[:len(conn.signals)-1] - } - } - conn.signalsLck.Unlock() + conn.defaultSignalAction((*defaultSignalHandler).removeSignal, ch) } // SupportsUnixFDs returns whether the underlying transport supports passing of diff --git a/vendor/github.com/godbus/dbus/conn_darwin.go b/vendor/github.com/godbus/dbus/conn_darwin.go index b67bb1b8..c015f80c 100644 --- a/vendor/github.com/godbus/dbus/conn_darwin.go +++ b/vendor/github.com/godbus/dbus/conn_darwin.go @@ -2,20 +2,32 @@ package dbus import ( "errors" + "fmt" + "os" "os/exec" ) -func sessionBusPlatform() (*Conn, error) { +const defaultSystemBusAddress = "unix:path=/opt/local/var/run/dbus/system_bus_socket" + +func getSessionBusPlatformAddress() (string, error) { cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET") b, err := cmd.CombinedOutput() if err != nil { - return nil, err + return "", err } if len(b) == 0 { - return nil, errors.New("dbus: couldn't determine address of session bus") + return "", errors.New("dbus: couldn't determine address of session bus") } - return Dial("unix:path=" + string(b[:len(b)-1])) + return "unix:path=" + string(b[:len(b)-1]), nil +} + +func getSystemBusPlatformAddress() string { + address := os.Getenv("DBUS_LAUNCHD_SESSION_BUS_SOCKET") + if address != "" { + return fmt.Sprintf("unix:path=%s", address) + } + return defaultSystemBusAddress } diff --git a/vendor/github.com/godbus/dbus/conn_other.go b/vendor/github.com/godbus/dbus/conn_other.go index 289e8c5d..254c9f2e 100644 --- a/vendor/github.com/godbus/dbus/conn_other.go +++ b/vendor/github.com/godbus/dbus/conn_other.go @@ -5,27 +5,38 @@ package dbus import ( "bytes" "errors" + "fmt" "os" "os/exec" ) -func sessionBusPlatform() (*Conn, error) { +const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket" + +func getSessionBusPlatformAddress() (string, error) { cmd := exec.Command("dbus-launch") b, err := cmd.CombinedOutput() if err != nil { - return nil, err + return "", err } i := bytes.IndexByte(b, '=') j := bytes.IndexByte(b, '\n') if i == -1 || j == -1 { - return nil, errors.New("dbus: couldn't determine address of session bus") + return "", errors.New("dbus: couldn't determine address of session bus") } env, addr := string(b[0:i]), string(b[i+1:j]) os.Setenv(env, addr) - return Dial(addr) + return addr, nil +} + +func getSystemBusPlatformAddress() string { + address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS") + if address != "" { + return fmt.Sprintf("unix:path=%s", address) + } + return defaultSystemBusAddress } diff --git a/vendor/github.com/godbus/dbus/dbus.go b/vendor/github.com/godbus/dbus/dbus.go index 2ce68735..c6d0d3ce 100644 --- a/vendor/github.com/godbus/dbus/dbus.go +++ b/vendor/github.com/godbus/dbus/dbus.go @@ -2,6 +2,7 @@ package dbus import ( "errors" + "fmt" "reflect" "strings" ) @@ -12,6 +13,8 @@ var ( uint8Type = reflect.TypeOf(uint8(0)) int16Type = reflect.TypeOf(int16(0)) uint16Type = reflect.TypeOf(uint16(0)) + intType = reflect.TypeOf(int(0)) + uintType = reflect.TypeOf(uint(0)) int32Type = reflect.TypeOf(int32(0)) uint32Type = reflect.TypeOf(uint32(0)) int64Type = reflect.TypeOf(int64(0)) @@ -22,6 +25,7 @@ var ( objectPathType = reflect.TypeOf(ObjectPath("")) variantType = reflect.TypeOf(Variant{Signature{""}, nil}) interfacesType = reflect.TypeOf([]interface{}{}) + interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() unixFDType = reflect.TypeOf(UnixFD(0)) unixFDIndexType = reflect.TypeOf(UnixFDIndex(0)) ) @@ -46,86 +50,251 @@ func Store(src []interface{}, dest ...interface{}) error { } for i := range src { - if err := store(src[i], dest[i]); err != nil { + if err := storeInterfaces(src[i], dest[i]); err != nil { return err } } return nil } -func store(src, dest interface{}) error { - if reflect.TypeOf(dest).Elem() == reflect.TypeOf(src) { - reflect.ValueOf(dest).Elem().Set(reflect.ValueOf(src)) - return nil - } else if hasStruct(dest) { - rv := reflect.ValueOf(dest).Elem() - switch rv.Kind() { - case reflect.Struct: - vs, ok := src.([]interface{}) - if !ok { - return errors.New("dbus.Store: type mismatch") - } - t := rv.Type() - ndest := make([]interface{}, 0, rv.NumField()) - for i := 0; i < rv.NumField(); i++ { - field := t.Field(i) - if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { - ndest = append(ndest, rv.Field(i).Addr().Interface()) - } - } - if len(vs) != len(ndest) { - return errors.New("dbus.Store: type mismatch") - } - err := Store(vs, ndest...) - if err != nil { - return errors.New("dbus.Store: type mismatch") - } - case reflect.Slice: - sv := reflect.ValueOf(src) - if sv.Kind() != reflect.Slice { - return errors.New("dbus.Store: type mismatch") - } - rv.Set(reflect.MakeSlice(rv.Type(), sv.Len(), sv.Len())) - for i := 0; i < sv.Len(); i++ { - if err := store(sv.Index(i).Interface(), rv.Index(i).Addr().Interface()); err != nil { - return err - } - } - case reflect.Map: - sv := reflect.ValueOf(src) - if sv.Kind() != reflect.Map { - return errors.New("dbus.Store: type mismatch") - } - keys := sv.MapKeys() - rv.Set(reflect.MakeMap(sv.Type())) - for _, key := range keys { - v := reflect.New(sv.Type().Elem()) - if err := store(v, sv.MapIndex(key).Interface()); err != nil { - return err - } - rv.SetMapIndex(key, v.Elem()) - } - default: - return errors.New("dbus.Store: type mismatch") - } - return nil - } else { - return errors.New("dbus.Store: type mismatch") +func storeInterfaces(src, dest interface{}) error { + return store(reflect.ValueOf(dest), reflect.ValueOf(src)) +} + +func store(dest, src reflect.Value) error { + if dest.Kind() == reflect.Ptr { + return store(dest.Elem(), src) + } + switch src.Kind() { + case reflect.Slice: + return storeSlice(dest, src) + case reflect.Map: + return storeMap(dest, src) + default: + return storeBase(dest, src) } } -func hasStruct(v interface{}) bool { - t := reflect.TypeOf(v) - for { - switch t.Kind() { - case reflect.Struct: - return true - case reflect.Slice, reflect.Ptr, reflect.Map: - t = t.Elem() - default: - return false +func storeBase(dest, src reflect.Value) error { + return setDest(dest, src) +} + +func setDest(dest, src reflect.Value) error { + if !isVariant(src.Type()) && isVariant(dest.Type()) { + //special conversion for dbus.Variant + dest.Set(reflect.ValueOf(MakeVariant(src.Interface()))) + return nil + } + if isVariant(src.Type()) && !isVariant(dest.Type()) { + src = getVariantValue(src) + } + if !src.Type().ConvertibleTo(dest.Type()) { + return fmt.Errorf( + "dbus.Store: type mismatch: cannot convert %s to %s", + src.Type(), dest.Type()) + } + dest.Set(src.Convert(dest.Type())) + return nil +} + +func kindsAreCompatible(dest, src reflect.Type) bool { + switch { + case isVariant(dest): + return true + case dest.Kind() == reflect.Interface: + return true + default: + return dest.Kind() == src.Kind() + } +} + +func isConvertibleTo(dest, src reflect.Type) bool { + switch { + case isVariant(dest): + return true + case dest.Kind() == reflect.Interface: + return true + case dest.Kind() == reflect.Slice: + return src.Kind() == reflect.Slice && + isConvertibleTo(dest.Elem(), src.Elem()) + case dest.Kind() == reflect.Struct: + return src == interfacesType + default: + return src.ConvertibleTo(dest) + } +} + +func storeMap(dest, src reflect.Value) error { + switch { + case !kindsAreCompatible(dest.Type(), src.Type()): + return fmt.Errorf( + "dbus.Store: type mismatch: "+ + "map: cannot store a value of %s into %s", + src.Type(), dest.Type()) + case isVariant(dest.Type()): + return storeMapIntoVariant(dest, src) + case dest.Kind() == reflect.Interface: + return storeMapIntoInterface(dest, src) + case isConvertibleTo(dest.Type().Key(), src.Type().Key()) && + isConvertibleTo(dest.Type().Elem(), src.Type().Elem()): + return storeMapIntoMap(dest, src) + default: + return fmt.Errorf( + "dbus.Store: type mismatch: "+ + "map: cannot convert a value of %s into %s", + src.Type(), dest.Type()) + } +} + +func storeMapIntoVariant(dest, src reflect.Value) error { + dv := reflect.MakeMap(src.Type()) + err := store(dv, src) + if err != nil { + return err + } + return storeBase(dest, dv) +} + +func storeMapIntoInterface(dest, src reflect.Value) error { + var dv reflect.Value + if isVariant(src.Type().Elem()) { + //Convert variants to interface{} recursively when converting + //to interface{} + dv = reflect.MakeMap( + reflect.MapOf(src.Type().Key(), interfaceType)) + } else { + dv = reflect.MakeMap(src.Type()) + } + err := store(dv, src) + if err != nil { + return err + } + return storeBase(dest, dv) +} + +func storeMapIntoMap(dest, src reflect.Value) error { + if dest.IsNil() { + dest.Set(reflect.MakeMap(dest.Type())) + } + keys := src.MapKeys() + for _, key := range keys { + dkey := key.Convert(dest.Type().Key()) + dval := reflect.New(dest.Type().Elem()).Elem() + err := store(dval, getVariantValue(src.MapIndex(key))) + if err != nil { + return err + } + dest.SetMapIndex(dkey, dval) + } + return nil +} + +func storeSlice(dest, src reflect.Value) error { + switch { + case src.Type() == interfacesType && dest.Kind() == reflect.Struct: + //The decoder always decodes structs as slices of interface{} + return storeStruct(dest, src) + case !kindsAreCompatible(dest.Type(), src.Type()): + return fmt.Errorf( + "dbus.Store: type mismatch: "+ + "slice: cannot store a value of %s into %s", + src.Type(), dest.Type()) + case isVariant(dest.Type()): + return storeSliceIntoVariant(dest, src) + case dest.Kind() == reflect.Interface: + return storeSliceIntoInterface(dest, src) + case isConvertibleTo(dest.Type().Elem(), src.Type().Elem()): + return storeSliceIntoSlice(dest, src) + default: + return fmt.Errorf( + "dbus.Store: type mismatch: "+ + "slice: cannot convert a value of %s into %s", + src.Type(), dest.Type()) + } +} + +func storeStruct(dest, src reflect.Value) error { + if isVariant(dest.Type()) { + return storeBase(dest, src) + } + dval := make([]interface{}, 0, dest.NumField()) + dtype := dest.Type() + for i := 0; i < dest.NumField(); i++ { + field := dest.Field(i) + ftype := dtype.Field(i) + if ftype.PkgPath != "" { + continue + } + if ftype.Tag.Get("dbus") == "-" { + continue + } + dval = append(dval, field.Addr().Interface()) + } + if src.Len() != len(dval) { + return fmt.Errorf( + "dbus.Store: type mismatch: "+ + "destination struct does not have "+ + "enough fields need: %d have: %d", + src.Len(), len(dval)) + } + return Store(src.Interface().([]interface{}), dval...) +} + +func storeSliceIntoVariant(dest, src reflect.Value) error { + dv := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + err := store(dv, src) + if err != nil { + return err + } + return storeBase(dest, dv) +} + +func storeSliceIntoInterface(dest, src reflect.Value) error { + var dv reflect.Value + if isVariant(src.Type().Elem()) { + //Convert variants to interface{} recursively when converting + //to interface{} + dv = reflect.MakeSlice(reflect.SliceOf(interfaceType), + src.Len(), src.Cap()) + } else { + dv = reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + } + err := store(dv, src) + if err != nil { + return err + } + return storeBase(dest, dv) +} + +func storeSliceIntoSlice(dest, src reflect.Value) error { + if dest.IsNil() || dest.Len() < src.Len() { + dest.Set(reflect.MakeSlice(dest.Type(), src.Len(), src.Cap())) + } + if dest.Len() != src.Len() { + return fmt.Errorf( + "dbus.Store: type mismatch: "+ + "slices are different lengths "+ + "need: %d have: %d", + src.Len(), dest.Len()) + } + for i := 0; i < src.Len(); i++ { + err := store(dest.Index(i), getVariantValue(src.Index(i))) + if err != nil { + return err } } + return nil +} + +func getVariantValue(in reflect.Value) reflect.Value { + if isVariant(in.Type()) { + return reflect.ValueOf(in.Interface().(Variant).Value()) + } + return in +} + +func isVariant(t reflect.Type) bool { + return t == variantType } // An ObjectPath is an object path as defined by the D-Bus spec. @@ -177,15 +346,15 @@ func alignment(t reflect.Type) int { return 4 case signatureType: return 1 - case interfacesType: // sometimes used for structs - return 8 + case interfacesType: + return 4 } switch t.Kind() { case reflect.Uint8: return 1 case reflect.Uint16, reflect.Int16: return 2 - case reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map: + case reflect.Uint, reflect.Int, reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map: return 4 case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct: return 8 @@ -200,7 +369,7 @@ func isKeyType(t reflect.Type) bool { switch t.Kind() { case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64, - reflect.String: + reflect.String, reflect.Uint, reflect.Int: return true } diff --git a/vendor/github.com/godbus/dbus/default_handler.go b/vendor/github.com/godbus/dbus/default_handler.go new file mode 100644 index 00000000..e81f73ac --- /dev/null +++ b/vendor/github.com/godbus/dbus/default_handler.go @@ -0,0 +1,291 @@ +package dbus + +import ( + "bytes" + "reflect" + "strings" + "sync" +) + +func newIntrospectIntf(h *defaultHandler) *exportedIntf { + methods := make(map[string]Method) + methods["Introspect"] = exportedMethod{ + reflect.ValueOf(func(msg Message) (string, *Error) { + path := msg.Headers[FieldPath].value.(ObjectPath) + return h.introspectPath(path), nil + }), + } + return newExportedIntf(methods, true) +} + +//NewDefaultHandler returns an instance of the default +//call handler. This is useful if you want to implement only +//one of the two handlers but not both. +func NewDefaultHandler() *defaultHandler { + h := &defaultHandler{ + objects: make(map[ObjectPath]*exportedObj), + defaultIntf: make(map[string]*exportedIntf), + } + h.defaultIntf["org.freedesktop.DBus.Introspectable"] = newIntrospectIntf(h) + return h +} + +type defaultHandler struct { + sync.RWMutex + objects map[ObjectPath]*exportedObj + defaultIntf map[string]*exportedIntf +} + +func (h *defaultHandler) PathExists(path ObjectPath) bool { + _, ok := h.objects[path] + return ok +} + +func (h *defaultHandler) introspectPath(path ObjectPath) string { + subpath := make(map[string]struct{}) + var xml bytes.Buffer + xml.WriteString("") + for obj, _ := range h.objects { + p := string(path) + if p != "/" { + p += "/" + } + if strings.HasPrefix(string(obj), p) { + node_name := strings.Split(string(obj[len(p):]), "/")[0] + subpath[node_name] = struct{}{} + } + } + for s, _ := range subpath { + xml.WriteString("\n\t") + } + xml.WriteString("\n") + return xml.String() +} + +func (h *defaultHandler) LookupObject(path ObjectPath) (ServerObject, bool) { + h.RLock() + defer h.RUnlock() + object, ok := h.objects[path] + if ok { + return object, ok + } + + // If an object wasn't found for this exact path, + // look for a matching subtree registration + subtreeObject := newExportedObject() + path = path[:strings.LastIndex(string(path), "/")] + for len(path) > 0 { + object, ok = h.objects[path] + if ok { + for name, iface := range object.interfaces { + // Only include this handler if it registered for the subtree + if iface.isFallbackInterface() { + subtreeObject.interfaces[name] = iface + } + } + break + } + + path = path[:strings.LastIndex(string(path), "/")] + } + + for name, intf := range h.defaultIntf { + if _, exists := subtreeObject.interfaces[name]; exists { + continue + } + subtreeObject.interfaces[name] = intf + } + + return subtreeObject, true +} + +func (h *defaultHandler) AddObject(path ObjectPath, object *exportedObj) { + h.Lock() + h.objects[path] = object + h.Unlock() +} + +func (h *defaultHandler) DeleteObject(path ObjectPath) { + h.Lock() + delete(h.objects, path) + h.Unlock() +} + +type exportedMethod struct { + reflect.Value +} + +func (m exportedMethod) Call(args ...interface{}) ([]interface{}, error) { + t := m.Type() + + params := make([]reflect.Value, len(args)) + for i := 0; i < len(args); i++ { + params[i] = reflect.ValueOf(args[i]).Elem() + } + + ret := m.Value.Call(params) + + err := ret[t.NumOut()-1].Interface().(*Error) + ret = ret[:t.NumOut()-1] + out := make([]interface{}, len(ret)) + for i, val := range ret { + out[i] = val.Interface() + } + if err == nil { + //concrete type to interface nil is a special case + return out, nil + } + return out, err +} + +func (m exportedMethod) NumArguments() int { + return m.Value.Type().NumIn() +} + +func (m exportedMethod) ArgumentValue(i int) interface{} { + return reflect.Zero(m.Type().In(i)).Interface() +} + +func (m exportedMethod) NumReturns() int { + return m.Value.Type().NumOut() +} + +func (m exportedMethod) ReturnValue(i int) interface{} { + return reflect.Zero(m.Type().Out(i)).Interface() +} + +func newExportedObject() *exportedObj { + return &exportedObj{ + interfaces: make(map[string]*exportedIntf), + } +} + +type exportedObj struct { + interfaces map[string]*exportedIntf +} + +func (obj *exportedObj) LookupInterface(name string) (Interface, bool) { + if name == "" { + return obj, true + } + intf, exists := obj.interfaces[name] + return intf, exists +} + +func (obj *exportedObj) AddInterface(name string, iface *exportedIntf) { + obj.interfaces[name] = iface +} + +func (obj *exportedObj) DeleteInterface(name string) { + delete(obj.interfaces, name) +} + +func (obj *exportedObj) LookupMethod(name string) (Method, bool) { + for _, intf := range obj.interfaces { + method, exists := intf.LookupMethod(name) + if exists { + return method, exists + } + } + return nil, false +} + +func (obj *exportedObj) isFallbackInterface() bool { + return false +} + +func newExportedIntf(methods map[string]Method, includeSubtree bool) *exportedIntf { + return &exportedIntf{ + methods: methods, + includeSubtree: includeSubtree, + } +} + +type exportedIntf struct { + methods map[string]Method + + // Whether or not this export is for the entire subtree + includeSubtree bool +} + +func (obj *exportedIntf) LookupMethod(name string) (Method, bool) { + out, exists := obj.methods[name] + return out, exists +} + +func (obj *exportedIntf) isFallbackInterface() bool { + return obj.includeSubtree +} + +//NewDefaultSignalHandler returns an instance of the default +//signal handler. This is useful if you want to implement only +//one of the two handlers but not both. +func NewDefaultSignalHandler() *defaultSignalHandler { + return &defaultSignalHandler{} +} + +func isDefaultSignalHandler(handler SignalHandler) bool { + _, ok := handler.(*defaultSignalHandler) + return ok +} + +type defaultSignalHandler struct { + sync.RWMutex + closed bool + signals []chan<- *Signal +} + +func (sh *defaultSignalHandler) DeliverSignal(intf, name string, signal *Signal) { + go func() { + sh.RLock() + defer sh.RUnlock() + if sh.closed { + return + } + for _, ch := range sh.signals { + ch <- signal + } + }() +} + +func (sh *defaultSignalHandler) Init() error { + sh.Lock() + sh.signals = make([]chan<- *Signal, 0) + sh.Unlock() + return nil +} + +func (sh *defaultSignalHandler) Terminate() { + sh.Lock() + sh.closed = true + for _, ch := range sh.signals { + close(ch) + } + sh.signals = nil + sh.Unlock() +} + +func (sh *defaultSignalHandler) addSignal(ch chan<- *Signal) { + sh.Lock() + defer sh.Unlock() + if sh.closed { + return + } + sh.signals = append(sh.signals, ch) + +} + +func (sh *defaultSignalHandler) removeSignal(ch chan<- *Signal) { + sh.Lock() + defer sh.Unlock() + if sh.closed { + return + } + for i := len(sh.signals) - 1; i >= 0; i-- { + if ch == sh.signals[i] { + copy(sh.signals[i:], sh.signals[i+1:]) + sh.signals[len(sh.signals)-1] = nil + sh.signals = sh.signals[:len(sh.signals)-1] + } + } +} diff --git a/vendor/github.com/godbus/dbus/doc.go b/vendor/github.com/godbus/dbus/doc.go index deff554a..895036a8 100644 --- a/vendor/github.com/godbus/dbus/doc.go +++ b/vendor/github.com/godbus/dbus/doc.go @@ -19,6 +19,8 @@ respective D-Bus equivalents: bool | BOOLEAN int16 | INT16 uint16 | UINT16 + int | INT32 + uint | UINT32 int32 | INT32 uint32 | UINT32 int64 | INT64 @@ -28,6 +30,7 @@ respective D-Bus equivalents: ObjectPath | OBJECT_PATH Signature | SIGNATURE Variant | VARIANT + interface{} | VARIANT UnixFDIndex | UNIX_FD Slices and arrays encode as ARRAYs of their element type. @@ -41,6 +44,9 @@ be skipped. Pointers encode as the value they're pointed to. +Types convertible to one of the base types above will be mapped as the +base type. + Trying to encode any other type or a slice, map or struct containing an unsupported type will result in an InvalidTypeError. diff --git a/vendor/github.com/godbus/dbus/encoder.go b/vendor/github.com/godbus/dbus/encoder.go index 9f0a9e89..8bb71776 100644 --- a/vendor/github.com/godbus/dbus/encoder.go +++ b/vendor/github.com/godbus/dbus/encoder.go @@ -96,10 +96,10 @@ func (enc *encoder) encode(v reflect.Value, depth int) { case reflect.Uint16: enc.binwrite(uint16(v.Uint())) enc.pos += 2 - case reflect.Int32: + case reflect.Int, reflect.Int32: enc.binwrite(int32(v.Int())) enc.pos += 4 - case reflect.Uint32: + case reflect.Uint, reflect.Uint32: enc.binwrite(uint32(v.Uint())) enc.pos += 4 case reflect.Int64: @@ -202,6 +202,8 @@ func (enc *encoder) encode(v reflect.Value, depth int) { panic(err) } enc.pos += length + case reflect.Interface: + enc.encode(reflect.ValueOf(MakeVariant(v.Interface())), depth) default: panic(InvalidTypeError{v.Type()}) } diff --git a/vendor/github.com/godbus/dbus/export.go b/vendor/github.com/godbus/dbus/export.go index 6c335220..aae97088 100644 --- a/vendor/github.com/godbus/dbus/export.go +++ b/vendor/github.com/godbus/dbus/export.go @@ -1,7 +1,6 @@ package dbus import ( - "bytes" "errors" "fmt" "reflect" @@ -9,32 +8,29 @@ import ( ) var ( - errmsgInvalidArg = Error{ + ErrMsgInvalidArg = Error{ "org.freedesktop.DBus.Error.InvalidArgs", []interface{}{"Invalid type / number of args"}, } - errmsgNoObject = Error{ + ErrMsgNoObject = Error{ "org.freedesktop.DBus.Error.NoSuchObject", []interface{}{"No such object"}, } - errmsgUnknownMethod = Error{ + ErrMsgUnknownMethod = Error{ "org.freedesktop.DBus.Error.UnknownMethod", []interface{}{"Unknown / invalid method"}, } + ErrMsgUnknownInterface = Error{ + "org.freedesktop.DBus.Error.UnknownInterface", + []interface{}{"Object does not implement the interface"}, + } ) -// exportedObj represents an exported object. It stores a precomputed -// method table that represents the methods exported on the bus. -type exportedObj struct { - methods map[string]reflect.Value - - // Whether or not this export is for the entire subtree - includeSubtree bool -} - -func (obj exportedObj) Method(name string) (reflect.Value, bool) { - out, exists := obj.methods[name] - return out, exists +func MakeFailedError(err error) *Error { + return &Error{ + "org.freedesktop.DBus.Error.Failed", + []interface{}{err.Error()}, + } } // Sender is a type which can be used in exported methods to receive the message @@ -63,7 +59,7 @@ func getMethods(in interface{}, mapping map[string]string) map[string]reflect.Va // only track valid methods must return *Error as last arg // and must be exported if t.NumOut() == 0 || - t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) || + t.Out(t.NumOut()-1) != reflect.TypeOf(&ErrMsgInvalidArg) || methtype.PkgPath != "" { continue } @@ -73,119 +69,12 @@ func getMethods(in interface{}, mapping map[string]string) map[string]reflect.Va return methods } -// searchHandlers will look through all registered handlers looking for one -// to handle the given path. If a verbatim one isn't found, it will check for -// a subtree registration for the path as well. -func (conn *Conn) searchHandlers(path ObjectPath) (map[string]exportedObj, bool) { - conn.handlersLck.RLock() - defer conn.handlersLck.RUnlock() +func standardMethodArgumentDecode(m Method, sender string, msg *Message, body []interface{}) ([]interface{}, error) { + pointers := make([]interface{}, m.NumArguments()) + decode := make([]interface{}, 0, len(body)) - handlers, ok := conn.handlers[path] - if ok { - return handlers, ok - } - - // If handlers weren't found for this exact path, look for a matching subtree - // registration - handlers = make(map[string]exportedObj) - path = path[:strings.LastIndex(string(path), "/")] - for len(path) > 0 { - var subtreeHandlers map[string]exportedObj - subtreeHandlers, ok = conn.handlers[path] - if ok { - for iface, handler := range subtreeHandlers { - // Only include this handler if it registered for the subtree - if handler.includeSubtree { - handlers[iface] = handler - } - } - - break - } - - path = path[:strings.LastIndex(string(path), "/")] - } - - return handlers, ok -} - -// handleCall handles the given method call (i.e. looks if it's one of the -// pre-implemented ones and searches for a corresponding handler if not). -func (conn *Conn) handleCall(msg *Message) { - name := msg.Headers[FieldMember].value.(string) - path := msg.Headers[FieldPath].value.(ObjectPath) - ifaceName, hasIface := msg.Headers[FieldInterface].value.(string) - sender, hasSender := msg.Headers[FieldSender].value.(string) - serial := msg.serial - if ifaceName == "org.freedesktop.DBus.Peer" { - switch name { - case "Ping": - conn.sendReply(sender, serial) - case "GetMachineId": - conn.sendReply(sender, serial, conn.uuid) - default: - conn.sendError(errmsgUnknownMethod, sender, serial) - } - return - } else if ifaceName == "org.freedesktop.DBus.Introspectable" && name == "Introspect" { - if _, ok := conn.handlers[path]; !ok { - subpath := make(map[string]struct{}) - var xml bytes.Buffer - xml.WriteString("") - for h, _ := range conn.handlers { - p := string(path) - if p != "/" { - p += "/" - } - if strings.HasPrefix(string(h), p) { - node_name := strings.Split(string(h[len(p):]), "/")[0] - subpath[node_name] = struct{}{} - } - } - for s, _ := range subpath { - xml.WriteString("\n\t") - } - xml.WriteString("\n") - conn.sendReply(sender, serial, xml.String()) - return - } - } - if len(name) == 0 { - conn.sendError(errmsgUnknownMethod, sender, serial) - } - - // Find the exported handler (if any) for this path - handlers, ok := conn.searchHandlers(path) - if !ok { - conn.sendError(errmsgNoObject, sender, serial) - return - } - - var m reflect.Value - var exists bool - if hasIface { - iface := handlers[ifaceName] - m, exists = iface.Method(name) - } else { - for _, v := range handlers { - m, exists = v.Method(name) - if exists { - break - } - } - } - - if !exists { - conn.sendError(errmsgUnknownMethod, sender, serial) - return - } - - t := m.Type() - vs := msg.Body - pointers := make([]interface{}, t.NumIn()) - decode := make([]interface{}, 0, len(vs)) - for i := 0; i < t.NumIn(); i++ { - tp := t.In(i) + for i := 0; i < m.NumArguments(); i++ { + tp := reflect.TypeOf(m.ArgumentValue(i)) val := reflect.New(tp) pointers[i] = val.Interface() if tp == reflect.TypeOf((*Sender)(nil)).Elem() { @@ -197,26 +86,73 @@ func (conn *Conn) handleCall(msg *Message) { } } - if len(decode) != len(vs) { - conn.sendError(errmsgInvalidArg, sender, serial) + if len(decode) != len(body) { + return nil, ErrMsgInvalidArg + } + + if err := Store(body, decode...); err != nil { + return nil, ErrMsgInvalidArg + } + + return pointers, nil +} + +func (conn *Conn) decodeArguments(m Method, sender string, msg *Message) ([]interface{}, error) { + if decoder, ok := m.(ArgumentDecoder); ok { + return decoder.DecodeArguments(conn, sender, msg, msg.Body) + } + return standardMethodArgumentDecode(m, sender, msg, msg.Body) +} + +// handleCall handles the given method call (i.e. looks if it's one of the +// pre-implemented ones and searches for a corresponding handler if not). +func (conn *Conn) handleCall(msg *Message) { + name := msg.Headers[FieldMember].value.(string) + path := msg.Headers[FieldPath].value.(ObjectPath) + ifaceName, _ := msg.Headers[FieldInterface].value.(string) + sender, hasSender := msg.Headers[FieldSender].value.(string) + serial := msg.serial + if ifaceName == "org.freedesktop.DBus.Peer" { + switch name { + case "Ping": + conn.sendReply(sender, serial) + case "GetMachineId": + conn.sendReply(sender, serial, conn.uuid) + default: + conn.sendError(ErrMsgUnknownMethod, sender, serial) + } + return + } + if len(name) == 0 { + conn.sendError(ErrMsgUnknownMethod, sender, serial) + } + + object, ok := conn.handler.LookupObject(path) + if !ok { + conn.sendError(ErrMsgNoObject, sender, serial) return } - if err := Store(vs, decode...); err != nil { - conn.sendError(errmsgInvalidArg, sender, serial) + iface, exists := object.LookupInterface(ifaceName) + if !exists { + conn.sendError(ErrMsgUnknownInterface, sender, serial) return } - // Extract parameters - params := make([]reflect.Value, len(pointers)) - for i := 0; i < len(pointers); i++ { - params[i] = reflect.ValueOf(pointers[i]).Elem() + m, exists := iface.LookupMethod(name) + if !exists { + conn.sendError(ErrMsgUnknownMethod, sender, serial) + return + } + args, err := conn.decodeArguments(m, sender, msg) + if err != nil { + conn.sendError(err, sender, serial) + return } - // Call method - ret := m.Call(params) - if em := ret[t.NumOut()-1].Interface().(*Error); em != nil { - conn.sendError(*em, sender, serial) + ret, err := m.Call(args...) + if err != nil { + conn.sendError(err, sender, serial) return } @@ -229,13 +165,11 @@ func (conn *Conn) handleCall(msg *Message) { reply.Headers[FieldDestination] = msg.Headers[FieldSender] } reply.Headers[FieldReplySerial] = MakeVariant(msg.serial) - reply.Body = make([]interface{}, len(ret)-1) - for i := 0; i < len(ret)-1; i++ { - reply.Body[i] = ret[i].Interface() - } - if len(ret) != 1 { - reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...)) + reply.Body = make([]interface{}, len(ret)) + for i := 0; i < len(ret); i++ { + reply.Body[i] = ret[i] } + reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...)) conn.outLck.RLock() if !conn.closed { conn.out <- reply @@ -375,7 +309,7 @@ func (conn *Conn) exportMethodTable(methods map[string]interface{}, path ObjectP t := rval.Type() // only track valid methods must return *Error as last arg if t.NumOut() == 0 || - t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) { + t.Out(t.NumOut()-1) != reflect.TypeOf(&ErrMsgInvalidArg) { continue } out[name] = rval @@ -383,38 +317,49 @@ func (conn *Conn) exportMethodTable(methods map[string]interface{}, path ObjectP return conn.export(out, path, iface, includeSubtree) } +func (conn *Conn) unexport(h *defaultHandler, path ObjectPath, iface string) error { + if h.PathExists(path) { + obj := h.objects[path] + obj.DeleteInterface(iface) + if len(obj.interfaces) == 0 { + h.DeleteObject(path) + } + } + return nil +} + // exportWithMap is the worker function for all exports/registrations. func (conn *Conn) export(methods map[string]reflect.Value, path ObjectPath, iface string, includeSubtree bool) error { + h, ok := conn.handler.(*defaultHandler) + if !ok { + return fmt.Errorf( + `dbus: export only allowed on the default hander handler have %T"`, + conn.handler) + } + if !path.IsValid() { return fmt.Errorf(`dbus: Invalid path name: "%s"`, path) } - conn.handlersLck.Lock() - defer conn.handlersLck.Unlock() - // Remove a previous export if the interface is nil if methods == nil { - if _, ok := conn.handlers[path]; ok { - delete(conn.handlers[path], iface) - if len(conn.handlers[path]) == 0 { - delete(conn.handlers, path) - } - } - - return nil + return conn.unexport(h, path, iface) } // If this is the first handler for this path, make a new map to hold all // handlers for this path. - if _, ok := conn.handlers[path]; !ok { - conn.handlers[path] = make(map[string]exportedObj) + if !h.PathExists(path) { + h.AddObject(path, newExportedObject()) + } + + exportedMethods := make(map[string]Method) + for name, method := range methods { + exportedMethods[name] = exportedMethod{method} } // Finally, save this handler - conn.handlers[path][iface] = exportedObj{ - methods: methods, - includeSubtree: includeSubtree, - } + obj := h.objects[path] + obj.AddInterface(iface, newExportedIntf(exportedMethods, includeSubtree)) return nil } diff --git a/vendor/github.com/godbus/dbus/object.go b/vendor/github.com/godbus/dbus/object.go index 9573b709..6d95583d 100644 --- a/vendor/github.com/godbus/dbus/object.go +++ b/vendor/github.com/godbus/dbus/object.go @@ -43,7 +43,8 @@ func (o *Object) AddMatchSignal(iface, member string) *Call { // will be allocated. Otherwise, ch has to be buffered or Go will panic. // // If the flags include FlagNoReplyExpected, ch is ignored and a Call structure -// is returned of which only the Err member is valid. +// is returned with any error in Err and a closed channel in Done containing +// the returned Call as it's one entry. // // If the method parameter contains a dot ('.'), the part before the last dot // specifies the interface on which the method is called. @@ -97,11 +98,21 @@ func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface } o.conn.outLck.RLock() defer o.conn.outLck.RUnlock() + done := make(chan *Call, 1) + call := &Call{ + Err: nil, + Done: done, + } + defer func() { + call.Done <- call + close(done) + }() if o.conn.closed { - return &Call{Err: ErrClosed} + call.Err = ErrClosed + return call } o.conn.out <- msg - return &Call{Err: nil} + return call } // GetProperty calls org.freedesktop.DBus.Properties.GetProperty on the given @@ -125,12 +136,12 @@ func (o *Object) GetProperty(p string) (Variant, error) { return result, nil } -// Destination returns the destination that calls on o are sent to. +// Destination returns the destination that calls on (o *Object) are sent to. func (o *Object) Destination() string { return o.dest } -// Path returns the path that calls on o are sent to. +// Path returns the path that calls on (o *Object") are sent to. func (o *Object) Path() ObjectPath { return o.path } diff --git a/vendor/github.com/godbus/dbus/server_interfaces.go b/vendor/github.com/godbus/dbus/server_interfaces.go new file mode 100644 index 00000000..091948ae --- /dev/null +++ b/vendor/github.com/godbus/dbus/server_interfaces.go @@ -0,0 +1,89 @@ +package dbus + +// Terminator allows a handler to implement a shutdown mechanism that +// is called when the connection terminates. +type Terminator interface { + Terminate() +} + +// Handler is the representation of a D-Bus Application. +// +// The Handler must have a way to lookup objects given +// an ObjectPath. The returned object must implement the +// ServerObject interface. +type Handler interface { + LookupObject(path ObjectPath) (ServerObject, bool) +} + +// ServerObject is the representation of an D-Bus Object. +// +// Objects are registered at a path for a given Handler. +// The Objects implement D-Bus interfaces. The semantics +// of Interface lookup is up to the implementation of +// the ServerObject. The ServerObject implementation may +// choose to implement empty string as a valid interface +// represeting all methods or not per the D-Bus specification. +type ServerObject interface { + LookupInterface(name string) (Interface, bool) +} + +// An Interface is the representation of a D-Bus Interface. +// +// Interfaces are a grouping of methods implemented by the Objects. +// Interfaces are responsible for routing method calls. +type Interface interface { + LookupMethod(name string) (Method, bool) +} + +// A Method represents the exposed methods on D-Bus. +type Method interface { + // Call requires that all arguments are decoded before being passed to it. + Call(args ...interface{}) ([]interface{}, error) + NumArguments() int + NumReturns() int + // ArgumentValue returns a representative value for the argument at position + // it should be of the proper type. reflect.Zero would be a good mechanism + // to use for this Value. + ArgumentValue(position int) interface{} + // ReturnValue returns a representative value for the return at position + // it should be of the proper type. reflect.Zero would be a good mechanism + // to use for this Value. + ReturnValue(position int) interface{} +} + +// An Argument Decoder can decode arguments using the non-standard mechanism +// +// If a method implements this interface then the non-standard +// decoder will be used. +// +// Method arguments must be decoded from the message. +// The mechanism for doing this will vary based on the +// implementation of the method. A normal approach is provided +// as part of this library, but may be replaced with +// any other decoding scheme. +type ArgumentDecoder interface { + // To decode the arguments of a method the sender and message are + // provided incase the semantics of the implementer provides access + // to these as part of the method invocation. + DecodeArguments(conn *Conn, sender string, msg *Message, args []interface{}) ([]interface{}, error) +} + +// A SignalHandler is responsible for delivering a signal. +// +// Signal delivery may be changed from the default channel +// based approach by Handlers implementing the SignalHandler +// interface. +type SignalHandler interface { + DeliverSignal(iface, name string, signal *Signal) +} + +// A DBusError is used to convert a generic object to a D-Bus error. +// +// Any custom error mechanism may implement this interface to provide +// a custom encoding of the error on D-Bus. By default if a normal +// error is returned, it will be encoded as the generic +// "org.freedesktop.DBus.Error.Failed" error. By implementing this +// interface as well a custom encoding may be provided. +type DBusError interface { + DBusError() (string, []interface{}) +} diff --git a/vendor/github.com/godbus/dbus/sig.go b/vendor/github.com/godbus/dbus/sig.go index f45b53ce..c1b80920 100644 --- a/vendor/github.com/godbus/dbus/sig.go +++ b/vendor/github.com/godbus/dbus/sig.go @@ -57,12 +57,12 @@ func getSignature(t reflect.Type) string { return "n" case reflect.Uint16: return "q" - case reflect.Int32: + case reflect.Int, reflect.Int32: if t == unixFDType { return "h" } return "i" - case reflect.Uint32: + case reflect.Uint, reflect.Uint32: if t == unixFDIndexType { return "h" } @@ -101,6 +101,8 @@ func getSignature(t reflect.Type) string { panic(InvalidTypeError{t}) } return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}" + case reflect.Interface: + return "v" } panic(InvalidTypeError{t}) } @@ -162,7 +164,7 @@ func (e SignatureError) Error() string { return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason) } -// Try to read a single type from this string. If it was successfull, err is nil +// Try to read a single type from this string. If it was successful, err is nil // and rem is the remaining unparsed part. Otherwise, err is a non-nil // SignatureError and rem is "". depth is the current recursion depth which may // not be greater than 64 and should be given as 0 on the first call. diff --git a/vendor/github.com/godbus/dbus/transport_generic.go b/vendor/github.com/godbus/dbus/transport_generic.go index 46f8f49d..3fad859a 100644 --- a/vendor/github.com/godbus/dbus/transport_generic.go +++ b/vendor/github.com/godbus/dbus/transport_generic.go @@ -4,8 +4,23 @@ import ( "encoding/binary" "errors" "io" + "unsafe" ) +var nativeEndian binary.ByteOrder + +func detectEndianness() binary.ByteOrder { + var x uint32 = 0x01020304 + if *(*byte)(unsafe.Pointer(&x)) == 0x01 { + return binary.BigEndian + } + return binary.LittleEndian +} + +func init() { + nativeEndian = detectEndianness() +} + type genericTransport struct { io.ReadWriteCloser } @@ -31,5 +46,5 @@ func (t genericTransport) SendMessage(msg *Message) error { return errors.New("dbus: unix fd passing not enabled") } } - return msg.EncodeTo(t, binary.LittleEndian) + return msg.EncodeTo(t, nativeEndian) } diff --git a/vendor/github.com/godbus/dbus/transport_unix.go b/vendor/github.com/godbus/dbus/transport_unix.go index a1d00cbc..e56d5ca9 100644 --- a/vendor/github.com/godbus/dbus/transport_unix.go +++ b/vendor/github.com/godbus/dbus/transport_unix.go @@ -175,7 +175,7 @@ func (t *unixTransport) SendMessage(msg *Message) error { msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds))) oob := syscall.UnixRights(fds...) buf := new(bytes.Buffer) - msg.EncodeTo(buf, binary.LittleEndian) + msg.EncodeTo(buf, nativeEndian) n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil) if err != nil { return err @@ -184,7 +184,7 @@ func (t *unixTransport) SendMessage(msg *Message) error { return io.ErrShortWrite } } else { - if err := msg.EncodeTo(t, binary.LittleEndian); err != nil { + if err := msg.EncodeTo(t, nativeEndian); err != nil { return nil } } diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_freebsd.go b/vendor/github.com/godbus/dbus/transport_unixcred_freebsd.go new file mode 100644 index 00000000..0fc5b927 --- /dev/null +++ b/vendor/github.com/godbus/dbus/transport_unixcred_freebsd.go @@ -0,0 +1,91 @@ +// The UnixCredentials system call is currently only implemented on Linux +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// https://golang.org/s/go1.4-syscall +// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys + +// Local implementation of the UnixCredentials system call for FreeBSD + +package dbus + +/* +const int sizeofPtr = sizeof(void*); +#define _WANT_UCRED +#include +*/ +import "C" + +import ( + "io" + "os" + "syscall" + "unsafe" +) + +// http://golang.org/src/pkg/syscall/ztypes_linux_amd64.go +// https://golang.org/src/syscall/ztypes_freebsd_amd64.go +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +// http://golang.org/src/pkg/syscall/types_linux.go +// https://golang.org/src/syscall/types_freebsd.go +// https://github.com/freebsd/freebsd/blob/master/sys/sys/ucred.h +const ( + SizeofUcred = C.sizeof_struct_ucred +) + +// http://golang.org/src/pkg/syscall/sockcmsg_unix.go +func cmsgAlignOf(salen int) int { + salign := C.sizeofPtr + + return (salen + salign - 1) & ^(salign - 1) +} + +// http://golang.org/src/pkg/syscall/sockcmsg_unix.go +func cmsgData(h *syscall.Cmsghdr) unsafe.Pointer { + return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(syscall.SizeofCmsghdr))) +} + +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// UnixCredentials encodes credentials into a socket control message +// for sending to another process. This can be used for +// authentication. +func UnixCredentials(ucred *Ucred) []byte { + b := make([]byte, syscall.CmsgSpace(SizeofUcred)) + h := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = syscall.SOL_SOCKET + h.Type = syscall.SCM_CREDS + h.SetLen(syscall.CmsgLen(SizeofUcred)) + *((*Ucred)(cmsgData(h))) = *ucred + return b +} + +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// ParseUnixCredentials decodes a socket control message that contains +// credentials in a Ucred structure. To receive such a message, the +// SO_PASSCRED option must be enabled on the socket. +func ParseUnixCredentials(m *syscall.SocketControlMessage) (*Ucred, error) { + if m.Header.Level != syscall.SOL_SOCKET { + return nil, syscall.EINVAL + } + if m.Header.Type != syscall.SCM_CREDS { + return nil, syscall.EINVAL + } + ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) + return &ucred, nil +} + +func (t *unixTransport) SendNullByte() error { + ucred := &Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())} + b := UnixCredentials(ucred) + _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil) + if err != nil { + return err + } + if oobn != len(b) { + return io.ErrShortWrite + } + return nil +} diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_openbsd.go b/vendor/github.com/godbus/dbus/transport_unixcred_openbsd.go new file mode 100644 index 00000000..af7bafdf --- /dev/null +++ b/vendor/github.com/godbus/dbus/transport_unixcred_openbsd.go @@ -0,0 +1,14 @@ +package dbus + +import "io" + +func (t *unixTransport) SendNullByte() error { + n, _, err := t.UnixConn.WriteMsgUnix([]byte{0}, nil, nil) + if err != nil { + return err + } + if n != 1 { + return io.ErrShortWrite + } + return nil +} diff --git a/vendor/github.com/godbus/dbus/variant.go b/vendor/github.com/godbus/dbus/variant.go index b7b13ae9..0ca123b0 100644 --- a/vendor/github.com/godbus/dbus/variant.go +++ b/vendor/github.com/godbus/dbus/variant.go @@ -17,7 +17,12 @@ type Variant struct { // MakeVariant converts the given value to a Variant. It panics if v cannot be // represented as a D-Bus type. func MakeVariant(v interface{}) Variant { - return Variant{SignatureOf(v), v} + return MakeVariantWithSignature(v, SignatureOf(v)) +} + +// MakeVariantWithSignature converts the given value to a Variant. +func MakeVariantWithSignature(v interface{}, s Signature) Variant { + return Variant{s, v} } // ParseVariant parses the given string as a variant as described at diff --git a/vendor/github.com/gogo/protobuf/README b/vendor/github.com/gogo/protobuf/README index b4accc0c..0ad51363 100644 --- a/vendor/github.com/gogo/protobuf/README +++ b/vendor/github.com/gogo/protobuf/README @@ -207,6 +207,50 @@ the --go_out argument to protoc: protoc --gogo_out=plugins=grpc:. *.proto +## Compatibility ## + +The library and the generated code are expected to be stable over time. +However, we reserve the right to make breaking changes without notice for the +following reasons: + +- Security. A security issue in the specification or implementation may come to + light whose resolution requires breaking compatibility. We reserve the right + to address such security issues. +- Unspecified behavior. There are some aspects of the Protocol Buffers + specification that are undefined. Programs that depend on such unspecified + behavior may break in future releases. +- Specification errors or changes. If it becomes necessary to address an + inconsistency, incompleteness, or change in the Protocol Buffers + specification, resolving the issue could affect the meaning or legality of + existing programs. We reserve the right to address such issues, including + updating the implementations. +- Bugs. If the library has a bug that violates the specification, a program + that depends on the buggy behavior may break if the bug is fixed. We reserve + the right to fix such bugs. +- Adding methods or fields to generated structs. These may conflict with field + names that already exist in a schema, causing applications to break. When the + code generator encounters a field in the schema that would collide with a + generated field or method name, the code generator will append an underscore + to the generated field or method name. +- Adding, removing, or changing methods or fields in generated structs that + start with `XXX`. These parts of the generated code are exported out of + necessity, but should not be considered part of the public API. +- Adding, removing, or changing unexported symbols in generated code. + +Any breaking changes outside of these will be announced 6 months in advance to +protobuf@googlegroups.com. + +You should, whenever possible, use generated code created by the `protoc-gen-go` +tool built at the same commit as the `proto` package. The `proto` package +declares package-level constants in the form `ProtoPackageIsVersionX`. +Application code and generated code may depend on one of these constants to +ensure that compilation will fail if the available version of the proto library +is too old. Whenever we make a change to the generated code that requires newer +library support, in the same commit we will increment the version number of the +generated code and declare a new package-level constant whose name incorporates +the latest version number. Removing a compatibility constant is considered a +breaking change and would be subject to the announcement policy stated above. + ## Plugins ## The `protoc-gen-go/generator` package exposes a plugin interface, diff --git a/vendor/github.com/gogo/protobuf/Readme.md b/vendor/github.com/gogo/protobuf/Readme.md index 00b346f2..e97bb1ba 100644 --- a/vendor/github.com/gogo/protobuf/Readme.md +++ b/vendor/github.com/gogo/protobuf/Readme.md @@ -5,7 +5,7 @@ gogoprotobuf is a fork of golang/protobuf with extra code generation features. This code generation is used to achieve: - + - fast marshalling and unmarshalling - more canonical Go structures - goprotobuf compatibility @@ -20,22 +20,25 @@ Keeping track of how up to date gogoprotobuf is relative to golang/protobuf is d These projects use gogoprotobuf: - - etcd - blog + - etcd - blog - sample proto file - spacemonkey - blog - - bazil - - badoo - - mesos-go - - heka - - cockroachdb - - go-ipfs - - rkive-go + - badoo - sample proto file + - mesos-go - sample proto file + - heka - the switch from golang/protobuf to gogo/protobuf when it was still on code.google.com + - cockroachdb - sample proto file + - go-ipfs - sample proto file + - rkive-go - sample proto file - dropbox - - srclib - sample proto file + - srclib - sample proto file - adyoulike - - cloudfoundry - - kubernetes + - cloudfoundry - sample proto file + - kubernetes - go2idl built on top of gogoprotobuf - dgraph - release notes - benchmarks - centrifugo - release notes - blog + - docker swarmkit - sample proto file + - nats.io - go-nats-streaming + - tidb - Communication between tidb and tikv + - protoactor-go - vanity command that also generates actors from service definitions Please lets us know if you are using gogoprotobuf by posting on our GoogleGroup. @@ -45,21 +48,21 @@ Please lets us know if you are using gogoprotobuf by posting on our gophercon - alecthomas' go serialization benchmarks -## Getting Started +## Getting Started There are several ways to use gogoprotobuf, but for all you need to install go and protoc. After that you can choose: - + - Speed - More Speed and more generated code - Most Speed and most customization ### Installation -To install it, you must first have Go (at least version 1.3.3) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Go 1.4.2, 1.5.4, 1.6.3 and 1.7 are continuously tested. +To install it, you must first have Go (at least version 1.6.3) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Go 1.7.1 and 1.8 are continuously tested. Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf). -Most versions from 2.3.1 should not give any problems, but 2.5.0, 2.6.1 and 3 are continuously tested. +Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.2.0 are continuously tested. ### Speed @@ -106,11 +109,6 @@ Install protoc-gen-gogo: go get github.com/gogo/protobuf/protoc-gen-gogo go get github.com/gogo/protobuf/gogoproto -## Proto3 - -Proto3 is supported, but the new well known types are not supported yet. -[See Proto3 Issue](https://github.com/gogo/protobuf/issues/57) for more details. - ## GRPC It works the same as golang/protobuf, simply specify the plugin. diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go index 5ecfae11..147b5ecc 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/doc.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -148,6 +148,7 @@ The enumprefix, getters and stringer extensions can be used to remove some of th - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). Less Typing and Peace of Mind is explained in their specific plugin folders godoc: diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go index 6da0e3e7..9506b6fb 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -34,6 +34,7 @@ var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ Field: 62001, Name: "gogoproto.goproto_enum_prefix", Tag: "varint,62001,opt,name=goproto_enum_prefix,json=goprotoEnumPrefix", + Filename: "gogo.proto", } var E_GoprotoEnumStringer = &proto.ExtensionDesc{ @@ -42,6 +43,7 @@ var E_GoprotoEnumStringer = &proto.ExtensionDesc{ Field: 62021, Name: "gogoproto.goproto_enum_stringer", Tag: "varint,62021,opt,name=goproto_enum_stringer,json=goprotoEnumStringer", + Filename: "gogo.proto", } var E_EnumStringer = &proto.ExtensionDesc{ @@ -50,6 +52,7 @@ var E_EnumStringer = &proto.ExtensionDesc{ Field: 62022, Name: "gogoproto.enum_stringer", Tag: "varint,62022,opt,name=enum_stringer,json=enumStringer", + Filename: "gogo.proto", } var E_EnumCustomname = &proto.ExtensionDesc{ @@ -58,6 +61,16 @@ var E_EnumCustomname = &proto.ExtensionDesc{ Field: 62023, Name: "gogoproto.enum_customname", Tag: "bytes,62023,opt,name=enum_customname,json=enumCustomname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", } var E_EnumvalueCustomname = &proto.ExtensionDesc{ @@ -66,6 +79,7 @@ var E_EnumvalueCustomname = &proto.ExtensionDesc{ Field: 66001, Name: "gogoproto.enumvalue_customname", Tag: "bytes,66001,opt,name=enumvalue_customname,json=enumvalueCustomname", + Filename: "gogo.proto", } var E_GoprotoGettersAll = &proto.ExtensionDesc{ @@ -74,6 +88,7 @@ var E_GoprotoGettersAll = &proto.ExtensionDesc{ Field: 63001, Name: "gogoproto.goproto_getters_all", Tag: "varint,63001,opt,name=goproto_getters_all,json=goprotoGettersAll", + Filename: "gogo.proto", } var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ @@ -82,6 +97,7 @@ var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ Field: 63002, Name: "gogoproto.goproto_enum_prefix_all", Tag: "varint,63002,opt,name=goproto_enum_prefix_all,json=goprotoEnumPrefixAll", + Filename: "gogo.proto", } var E_GoprotoStringerAll = &proto.ExtensionDesc{ @@ -90,6 +106,7 @@ var E_GoprotoStringerAll = &proto.ExtensionDesc{ Field: 63003, Name: "gogoproto.goproto_stringer_all", Tag: "varint,63003,opt,name=goproto_stringer_all,json=goprotoStringerAll", + Filename: "gogo.proto", } var E_VerboseEqualAll = &proto.ExtensionDesc{ @@ -98,6 +115,7 @@ var E_VerboseEqualAll = &proto.ExtensionDesc{ Field: 63004, Name: "gogoproto.verbose_equal_all", Tag: "varint,63004,opt,name=verbose_equal_all,json=verboseEqualAll", + Filename: "gogo.proto", } var E_FaceAll = &proto.ExtensionDesc{ @@ -106,6 +124,7 @@ var E_FaceAll = &proto.ExtensionDesc{ Field: 63005, Name: "gogoproto.face_all", Tag: "varint,63005,opt,name=face_all,json=faceAll", + Filename: "gogo.proto", } var E_GostringAll = &proto.ExtensionDesc{ @@ -114,6 +133,7 @@ var E_GostringAll = &proto.ExtensionDesc{ Field: 63006, Name: "gogoproto.gostring_all", Tag: "varint,63006,opt,name=gostring_all,json=gostringAll", + Filename: "gogo.proto", } var E_PopulateAll = &proto.ExtensionDesc{ @@ -122,6 +142,7 @@ var E_PopulateAll = &proto.ExtensionDesc{ Field: 63007, Name: "gogoproto.populate_all", Tag: "varint,63007,opt,name=populate_all,json=populateAll", + Filename: "gogo.proto", } var E_StringerAll = &proto.ExtensionDesc{ @@ -130,6 +151,7 @@ var E_StringerAll = &proto.ExtensionDesc{ Field: 63008, Name: "gogoproto.stringer_all", Tag: "varint,63008,opt,name=stringer_all,json=stringerAll", + Filename: "gogo.proto", } var E_OnlyoneAll = &proto.ExtensionDesc{ @@ -138,6 +160,7 @@ var E_OnlyoneAll = &proto.ExtensionDesc{ Field: 63009, Name: "gogoproto.onlyone_all", Tag: "varint,63009,opt,name=onlyone_all,json=onlyoneAll", + Filename: "gogo.proto", } var E_EqualAll = &proto.ExtensionDesc{ @@ -146,6 +169,7 @@ var E_EqualAll = &proto.ExtensionDesc{ Field: 63013, Name: "gogoproto.equal_all", Tag: "varint,63013,opt,name=equal_all,json=equalAll", + Filename: "gogo.proto", } var E_DescriptionAll = &proto.ExtensionDesc{ @@ -154,6 +178,7 @@ var E_DescriptionAll = &proto.ExtensionDesc{ Field: 63014, Name: "gogoproto.description_all", Tag: "varint,63014,opt,name=description_all,json=descriptionAll", + Filename: "gogo.proto", } var E_TestgenAll = &proto.ExtensionDesc{ @@ -162,6 +187,7 @@ var E_TestgenAll = &proto.ExtensionDesc{ Field: 63015, Name: "gogoproto.testgen_all", Tag: "varint,63015,opt,name=testgen_all,json=testgenAll", + Filename: "gogo.proto", } var E_BenchgenAll = &proto.ExtensionDesc{ @@ -170,6 +196,7 @@ var E_BenchgenAll = &proto.ExtensionDesc{ Field: 63016, Name: "gogoproto.benchgen_all", Tag: "varint,63016,opt,name=benchgen_all,json=benchgenAll", + Filename: "gogo.proto", } var E_MarshalerAll = &proto.ExtensionDesc{ @@ -178,6 +205,7 @@ var E_MarshalerAll = &proto.ExtensionDesc{ Field: 63017, Name: "gogoproto.marshaler_all", Tag: "varint,63017,opt,name=marshaler_all,json=marshalerAll", + Filename: "gogo.proto", } var E_UnmarshalerAll = &proto.ExtensionDesc{ @@ -186,6 +214,7 @@ var E_UnmarshalerAll = &proto.ExtensionDesc{ Field: 63018, Name: "gogoproto.unmarshaler_all", Tag: "varint,63018,opt,name=unmarshaler_all,json=unmarshalerAll", + Filename: "gogo.proto", } var E_StableMarshalerAll = &proto.ExtensionDesc{ @@ -194,6 +223,7 @@ var E_StableMarshalerAll = &proto.ExtensionDesc{ Field: 63019, Name: "gogoproto.stable_marshaler_all", Tag: "varint,63019,opt,name=stable_marshaler_all,json=stableMarshalerAll", + Filename: "gogo.proto", } var E_SizerAll = &proto.ExtensionDesc{ @@ -202,6 +232,7 @@ var E_SizerAll = &proto.ExtensionDesc{ Field: 63020, Name: "gogoproto.sizer_all", Tag: "varint,63020,opt,name=sizer_all,json=sizerAll", + Filename: "gogo.proto", } var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ @@ -210,6 +241,7 @@ var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ Field: 63021, Name: "gogoproto.goproto_enum_stringer_all", Tag: "varint,63021,opt,name=goproto_enum_stringer_all,json=goprotoEnumStringerAll", + Filename: "gogo.proto", } var E_EnumStringerAll = &proto.ExtensionDesc{ @@ -218,6 +250,7 @@ var E_EnumStringerAll = &proto.ExtensionDesc{ Field: 63022, Name: "gogoproto.enum_stringer_all", Tag: "varint,63022,opt,name=enum_stringer_all,json=enumStringerAll", + Filename: "gogo.proto", } var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ @@ -226,6 +259,7 @@ var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ Field: 63023, Name: "gogoproto.unsafe_marshaler_all", Tag: "varint,63023,opt,name=unsafe_marshaler_all,json=unsafeMarshalerAll", + Filename: "gogo.proto", } var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ @@ -234,6 +268,7 @@ var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ Field: 63024, Name: "gogoproto.unsafe_unmarshaler_all", Tag: "varint,63024,opt,name=unsafe_unmarshaler_all,json=unsafeUnmarshalerAll", + Filename: "gogo.proto", } var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ @@ -242,6 +277,7 @@ var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ Field: 63025, Name: "gogoproto.goproto_extensions_map_all", Tag: "varint,63025,opt,name=goproto_extensions_map_all,json=goprotoExtensionsMapAll", + Filename: "gogo.proto", } var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ @@ -250,6 +286,7 @@ var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ Field: 63026, Name: "gogoproto.goproto_unrecognized_all", Tag: "varint,63026,opt,name=goproto_unrecognized_all,json=goprotoUnrecognizedAll", + Filename: "gogo.proto", } var E_GogoprotoImport = &proto.ExtensionDesc{ @@ -258,6 +295,7 @@ var E_GogoprotoImport = &proto.ExtensionDesc{ Field: 63027, Name: "gogoproto.gogoproto_import", Tag: "varint,63027,opt,name=gogoproto_import,json=gogoprotoImport", + Filename: "gogo.proto", } var E_ProtosizerAll = &proto.ExtensionDesc{ @@ -266,6 +304,7 @@ var E_ProtosizerAll = &proto.ExtensionDesc{ Field: 63028, Name: "gogoproto.protosizer_all", Tag: "varint,63028,opt,name=protosizer_all,json=protosizerAll", + Filename: "gogo.proto", } var E_CompareAll = &proto.ExtensionDesc{ @@ -274,6 +313,34 @@ var E_CompareAll = &proto.ExtensionDesc{ Field: 63029, Name: "gogoproto.compare_all", Tag: "varint,63029,opt,name=compare_all,json=compareAll", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all,json=typedeclAll", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all,json=enumdeclAll", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration,json=goprotoRegistration", + Filename: "gogo.proto", } var E_GoprotoGetters = &proto.ExtensionDesc{ @@ -282,6 +349,7 @@ var E_GoprotoGetters = &proto.ExtensionDesc{ Field: 64001, Name: "gogoproto.goproto_getters", Tag: "varint,64001,opt,name=goproto_getters,json=goprotoGetters", + Filename: "gogo.proto", } var E_GoprotoStringer = &proto.ExtensionDesc{ @@ -290,6 +358,7 @@ var E_GoprotoStringer = &proto.ExtensionDesc{ Field: 64003, Name: "gogoproto.goproto_stringer", Tag: "varint,64003,opt,name=goproto_stringer,json=goprotoStringer", + Filename: "gogo.proto", } var E_VerboseEqual = &proto.ExtensionDesc{ @@ -298,6 +367,7 @@ var E_VerboseEqual = &proto.ExtensionDesc{ Field: 64004, Name: "gogoproto.verbose_equal", Tag: "varint,64004,opt,name=verbose_equal,json=verboseEqual", + Filename: "gogo.proto", } var E_Face = &proto.ExtensionDesc{ @@ -306,6 +376,7 @@ var E_Face = &proto.ExtensionDesc{ Field: 64005, Name: "gogoproto.face", Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", } var E_Gostring = &proto.ExtensionDesc{ @@ -314,6 +385,7 @@ var E_Gostring = &proto.ExtensionDesc{ Field: 64006, Name: "gogoproto.gostring", Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", } var E_Populate = &proto.ExtensionDesc{ @@ -322,6 +394,7 @@ var E_Populate = &proto.ExtensionDesc{ Field: 64007, Name: "gogoproto.populate", Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", } var E_Stringer = &proto.ExtensionDesc{ @@ -330,6 +403,7 @@ var E_Stringer = &proto.ExtensionDesc{ Field: 67008, Name: "gogoproto.stringer", Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", } var E_Onlyone = &proto.ExtensionDesc{ @@ -338,6 +412,7 @@ var E_Onlyone = &proto.ExtensionDesc{ Field: 64009, Name: "gogoproto.onlyone", Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", } var E_Equal = &proto.ExtensionDesc{ @@ -346,6 +421,7 @@ var E_Equal = &proto.ExtensionDesc{ Field: 64013, Name: "gogoproto.equal", Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", } var E_Description = &proto.ExtensionDesc{ @@ -354,6 +430,7 @@ var E_Description = &proto.ExtensionDesc{ Field: 64014, Name: "gogoproto.description", Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", } var E_Testgen = &proto.ExtensionDesc{ @@ -362,6 +439,7 @@ var E_Testgen = &proto.ExtensionDesc{ Field: 64015, Name: "gogoproto.testgen", Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", } var E_Benchgen = &proto.ExtensionDesc{ @@ -370,6 +448,7 @@ var E_Benchgen = &proto.ExtensionDesc{ Field: 64016, Name: "gogoproto.benchgen", Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", } var E_Marshaler = &proto.ExtensionDesc{ @@ -378,6 +457,7 @@ var E_Marshaler = &proto.ExtensionDesc{ Field: 64017, Name: "gogoproto.marshaler", Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", } var E_Unmarshaler = &proto.ExtensionDesc{ @@ -386,6 +466,7 @@ var E_Unmarshaler = &proto.ExtensionDesc{ Field: 64018, Name: "gogoproto.unmarshaler", Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", } var E_StableMarshaler = &proto.ExtensionDesc{ @@ -394,6 +475,7 @@ var E_StableMarshaler = &proto.ExtensionDesc{ Field: 64019, Name: "gogoproto.stable_marshaler", Tag: "varint,64019,opt,name=stable_marshaler,json=stableMarshaler", + Filename: "gogo.proto", } var E_Sizer = &proto.ExtensionDesc{ @@ -402,6 +484,7 @@ var E_Sizer = &proto.ExtensionDesc{ Field: 64020, Name: "gogoproto.sizer", Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", } var E_UnsafeMarshaler = &proto.ExtensionDesc{ @@ -410,6 +493,7 @@ var E_UnsafeMarshaler = &proto.ExtensionDesc{ Field: 64023, Name: "gogoproto.unsafe_marshaler", Tag: "varint,64023,opt,name=unsafe_marshaler,json=unsafeMarshaler", + Filename: "gogo.proto", } var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ @@ -418,6 +502,7 @@ var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ Field: 64024, Name: "gogoproto.unsafe_unmarshaler", Tag: "varint,64024,opt,name=unsafe_unmarshaler,json=unsafeUnmarshaler", + Filename: "gogo.proto", } var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ @@ -426,6 +511,7 @@ var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ Field: 64025, Name: "gogoproto.goproto_extensions_map", Tag: "varint,64025,opt,name=goproto_extensions_map,json=goprotoExtensionsMap", + Filename: "gogo.proto", } var E_GoprotoUnrecognized = &proto.ExtensionDesc{ @@ -434,6 +520,7 @@ var E_GoprotoUnrecognized = &proto.ExtensionDesc{ Field: 64026, Name: "gogoproto.goproto_unrecognized", Tag: "varint,64026,opt,name=goproto_unrecognized,json=goprotoUnrecognized", + Filename: "gogo.proto", } var E_Protosizer = &proto.ExtensionDesc{ @@ -442,6 +529,7 @@ var E_Protosizer = &proto.ExtensionDesc{ Field: 64028, Name: "gogoproto.protosizer", Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", } var E_Compare = &proto.ExtensionDesc{ @@ -450,6 +538,16 @@ var E_Compare = &proto.ExtensionDesc{ Field: 64029, Name: "gogoproto.compare", Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", } var E_Nullable = &proto.ExtensionDesc{ @@ -458,6 +556,7 @@ var E_Nullable = &proto.ExtensionDesc{ Field: 65001, Name: "gogoproto.nullable", Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", } var E_Embed = &proto.ExtensionDesc{ @@ -466,6 +565,7 @@ var E_Embed = &proto.ExtensionDesc{ Field: 65002, Name: "gogoproto.embed", Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", } var E_Customtype = &proto.ExtensionDesc{ @@ -474,6 +574,7 @@ var E_Customtype = &proto.ExtensionDesc{ Field: 65003, Name: "gogoproto.customtype", Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", } var E_Customname = &proto.ExtensionDesc{ @@ -482,6 +583,7 @@ var E_Customname = &proto.ExtensionDesc{ Field: 65004, Name: "gogoproto.customname", Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", } var E_Jsontag = &proto.ExtensionDesc{ @@ -490,6 +592,7 @@ var E_Jsontag = &proto.ExtensionDesc{ Field: 65005, Name: "gogoproto.jsontag", Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", } var E_Moretags = &proto.ExtensionDesc{ @@ -498,6 +601,7 @@ var E_Moretags = &proto.ExtensionDesc{ Field: 65006, Name: "gogoproto.moretags", Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", } var E_Casttype = &proto.ExtensionDesc{ @@ -506,6 +610,7 @@ var E_Casttype = &proto.ExtensionDesc{ Field: 65007, Name: "gogoproto.casttype", Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", } var E_Castkey = &proto.ExtensionDesc{ @@ -514,6 +619,7 @@ var E_Castkey = &proto.ExtensionDesc{ Field: 65008, Name: "gogoproto.castkey", Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", } var E_Castvalue = &proto.ExtensionDesc{ @@ -522,6 +628,25 @@ var E_Castvalue = &proto.ExtensionDesc{ Field: 65009, Name: "gogoproto.castvalue", Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", } func init() { @@ -529,6 +654,7 @@ func init() { proto.RegisterExtension(E_GoprotoEnumStringer) proto.RegisterExtension(E_EnumStringer) proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) proto.RegisterExtension(E_EnumvalueCustomname) proto.RegisterExtension(E_GoprotoGettersAll) proto.RegisterExtension(E_GoprotoEnumPrefixAll) @@ -556,6 +682,9 @@ func init() { proto.RegisterExtension(E_GogoprotoImport) proto.RegisterExtension(E_ProtosizerAll) proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) proto.RegisterExtension(E_GoprotoGetters) proto.RegisterExtension(E_GoprotoStringer) proto.RegisterExtension(E_VerboseEqual) @@ -578,6 +707,7 @@ func init() { proto.RegisterExtension(E_GoprotoUnrecognized) proto.RegisterExtension(E_Protosizer) proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) proto.RegisterExtension(E_Nullable) proto.RegisterExtension(E_Embed) proto.RegisterExtension(E_Customtype) @@ -587,79 +717,88 @@ func init() { proto.RegisterExtension(E_Casttype) proto.RegisterExtension(E_Castkey) proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) } func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) } var fileDescriptorGogo = []byte{ - // 1098 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x97, 0xc9, 0x6f, 0x1c, 0x45, - 0x14, 0x87, 0x85, 0x70, 0xe4, 0x99, 0xe7, 0x0d, 0x8f, 0x8d, 0x09, 0x11, 0x88, 0xe4, 0xc6, 0xc9, - 0x39, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0xa3, 0x20, 0x0c, 0x23, 0x13, 0x07, 0x10, 0x87, - 0x51, 0xcf, 0xb8, 0xdc, 0x19, 0xe8, 0xee, 0x6a, 0xba, 0xba, 0xa3, 0x38, 0x37, 0x14, 0x16, 0x21, - 0xc4, 0x8e, 0x04, 0x09, 0x09, 0xcb, 0x81, 0x7d, 0x0d, 0xcb, 0x9d, 0x0b, 0x70, 0xe6, 0x7f, 0xe0, - 0x02, 0x98, 0x4d, 0xf2, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x1e, 0x8f, 0x54, 0x35, 0xb7, - 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xbf, 0x69, 0x00, 0x5f, 0xf9, 0x6a, 0x31, 0x4e, - 0x54, 0xaa, 0x1a, 0x75, 0xbc, 0xce, 0x2f, 0x8f, 0x1c, 0xf5, 0x95, 0xf2, 0x03, 0x79, 0x3c, 0xff, - 0xab, 0x93, 0x6d, 0x1f, 0xdf, 0x92, 0xba, 0x9b, 0xf4, 0xe2, 0x54, 0x25, 0xc5, 0x62, 0xf1, 0x20, - 0xcc, 0xd1, 0xe2, 0xb6, 0x8c, 0xb2, 0xb0, 0x1d, 0x27, 0x72, 0xbb, 0x77, 0xa9, 0x71, 0xd7, 0x62, - 0x41, 0x2e, 0x32, 0xb9, 0xb8, 0x16, 0x65, 0xe1, 0x43, 0x71, 0xda, 0x53, 0x91, 0x3e, 0x7c, 0xf3, - 0xb7, 0x5b, 0x8f, 0xde, 0x72, 0x6f, 0x6d, 0x63, 0x96, 0x50, 0xfc, 0x5f, 0x2b, 0x07, 0xc5, 0x06, - 0xdc, 0x5e, 0xf1, 0xe9, 0x34, 0xe9, 0x45, 0xbe, 0x4c, 0x2c, 0xc6, 0x9f, 0xc8, 0x38, 0x67, 0x18, - 0x1f, 0x26, 0x54, 0xac, 0xc2, 0xd4, 0x28, 0xae, 0x9f, 0xc9, 0x35, 0x29, 0x4d, 0x49, 0x13, 0x66, - 0x72, 0x49, 0x37, 0xd3, 0xa9, 0x0a, 0x23, 0x2f, 0x94, 0x16, 0xcd, 0x2f, 0xb9, 0xa6, 0xbe, 0x31, - 0x8d, 0xd8, 0x6a, 0x49, 0x89, 0xf3, 0x30, 0x8f, 0x9f, 0x5c, 0xf4, 0x82, 0x4c, 0x9a, 0xb6, 0x63, - 0x43, 0x6d, 0xe7, 0x71, 0x19, 0x2b, 0x7f, 0xbd, 0x32, 0x96, 0x2b, 0xe7, 0x4a, 0x81, 0xe1, 0x35, - 0x3a, 0xe1, 0xcb, 0x34, 0x95, 0x89, 0x6e, 0x7b, 0x41, 0x30, 0x64, 0x93, 0x67, 0x7a, 0x41, 0x69, - 0xbc, 0xba, 0x5b, 0xed, 0x44, 0xb3, 0x20, 0x57, 0x82, 0x40, 0x6c, 0xc2, 0x1d, 0x43, 0x3a, 0xeb, - 0xe0, 0xbc, 0x46, 0xce, 0xf9, 0x03, 0xdd, 0x45, 0x6d, 0x0b, 0xf8, 0xf3, 0xb2, 0x1f, 0x0e, 0xce, - 0x77, 0xc8, 0xd9, 0x20, 0x96, 0xdb, 0x82, 0xc6, 0xfb, 0x61, 0xf6, 0xa2, 0x4c, 0x3a, 0x4a, 0xcb, - 0xb6, 0x7c, 0x2a, 0xf3, 0x02, 0x07, 0xdd, 0x75, 0xd2, 0xcd, 0x10, 0xb8, 0x86, 0x1c, 0xba, 0x4e, - 0x42, 0x6d, 0xdb, 0xeb, 0x4a, 0x07, 0xc5, 0x0d, 0x52, 0x8c, 0xe3, 0x7a, 0x44, 0x57, 0x60, 0xd2, - 0x57, 0xc5, 0x2d, 0x39, 0xe0, 0xef, 0x12, 0x3e, 0xc1, 0x0c, 0x29, 0x62, 0x15, 0x67, 0x81, 0x97, - 0xba, 0xec, 0xe0, 0x3d, 0x56, 0x30, 0x43, 0x8a, 0x11, 0xca, 0xfa, 0x3e, 0x2b, 0xb4, 0x51, 0xcf, - 0x65, 0x98, 0x50, 0x51, 0xb0, 0xa3, 0x22, 0x97, 0x4d, 0x7c, 0x40, 0x06, 0x20, 0x04, 0x05, 0x4b, - 0x50, 0x77, 0x6d, 0xc4, 0x87, 0x84, 0xd7, 0x24, 0x77, 0xa0, 0x09, 0x33, 0x3c, 0x64, 0x7a, 0x2a, - 0x72, 0x50, 0x7c, 0x44, 0x8a, 0x69, 0x03, 0xa3, 0xdb, 0x48, 0xa5, 0x4e, 0x7d, 0xe9, 0x22, 0xf9, - 0x98, 0x6f, 0x83, 0x10, 0x2a, 0x65, 0x47, 0x46, 0xdd, 0x0b, 0x6e, 0x86, 0x4f, 0xb8, 0x94, 0xcc, - 0xa0, 0x62, 0x15, 0xa6, 0x42, 0x2f, 0xd1, 0x17, 0xbc, 0xc0, 0xa9, 0x1d, 0x9f, 0x92, 0x63, 0xb2, - 0x84, 0xa8, 0x22, 0x59, 0x34, 0x8a, 0xe6, 0x33, 0xae, 0x88, 0x81, 0xd1, 0xd1, 0xd3, 0xa9, 0xd7, - 0x09, 0x64, 0x7b, 0x14, 0xdb, 0xe7, 0x7c, 0xf4, 0x0a, 0x76, 0xdd, 0x34, 0x2e, 0x41, 0x5d, 0xf7, - 0x2e, 0x3b, 0x69, 0xbe, 0xe0, 0x4e, 0xe7, 0x00, 0xc2, 0x8f, 0xc1, 0x9d, 0x43, 0x47, 0xbd, 0x83, - 0xec, 0x4b, 0x92, 0x2d, 0x0c, 0x19, 0xf7, 0x34, 0x12, 0x46, 0x55, 0x7e, 0xc5, 0x23, 0x41, 0x0e, - 0xb8, 0x5a, 0x30, 0x9f, 0x45, 0xda, 0xdb, 0x1e, 0xad, 0x6a, 0x5f, 0x73, 0xd5, 0x0a, 0xb6, 0x52, - 0xb5, 0x73, 0xb0, 0x40, 0xc6, 0xd1, 0xfa, 0xfa, 0x0d, 0x0f, 0xd6, 0x82, 0xde, 0xac, 0x76, 0xf7, - 0x71, 0x38, 0x52, 0x96, 0xf3, 0x52, 0x2a, 0x23, 0x8d, 0x4c, 0x3b, 0xf4, 0x62, 0x07, 0xf3, 0x4d, - 0x32, 0xf3, 0xc4, 0x5f, 0x2b, 0x05, 0xeb, 0x5e, 0x8c, 0xf2, 0x47, 0xe1, 0x30, 0xcb, 0xb3, 0x28, - 0x91, 0x5d, 0xe5, 0x47, 0xbd, 0xcb, 0x72, 0xcb, 0x41, 0xfd, 0xed, 0x40, 0xab, 0x36, 0x0d, 0x1c, - 0xcd, 0x67, 0xe1, 0xb6, 0xf2, 0xf7, 0x46, 0xbb, 0x17, 0xc6, 0x2a, 0x49, 0x2d, 0xc6, 0xef, 0xb8, - 0x53, 0x25, 0x77, 0x36, 0xc7, 0xc4, 0x1a, 0x4c, 0xe7, 0x7f, 0xba, 0x3e, 0x92, 0xdf, 0x93, 0x68, - 0xaa, 0x4f, 0xd1, 0xe0, 0xe8, 0xaa, 0x30, 0xf6, 0x12, 0x97, 0xf9, 0xf7, 0x03, 0x0f, 0x0e, 0x42, - 0x8a, 0xa7, 0x6f, 0x66, 0x20, 0x89, 0x1b, 0xf7, 0x1c, 0x90, 0xac, 0x4b, 0xad, 0x3d, 0xbf, 0xf4, - 0x3c, 0xbd, 0x47, 0x67, 0xb6, 0x1a, 0xc4, 0xe2, 0x01, 0x2c, 0x4f, 0x35, 0x2e, 0xed, 0xb2, 0x2b, - 0x7b, 0x65, 0x85, 0x2a, 0x69, 0x29, 0xce, 0xc0, 0x54, 0x25, 0x2a, 0xed, 0xaa, 0x67, 0x48, 0x35, - 0x69, 0x26, 0xa5, 0x38, 0x01, 0x63, 0x18, 0x7b, 0x76, 0xfc, 0x59, 0xc2, 0xf3, 0xe5, 0xe2, 0x14, - 0xd4, 0x38, 0xee, 0xec, 0xe8, 0x73, 0x84, 0x96, 0x08, 0xe2, 0x1c, 0x75, 0x76, 0xfc, 0x79, 0xc6, - 0x19, 0x41, 0xdc, 0xbd, 0x84, 0x3f, 0xbe, 0x38, 0x46, 0xe3, 0x8a, 0x6b, 0xb7, 0x04, 0xe3, 0x94, - 0x71, 0x76, 0xfa, 0x05, 0xfa, 0x72, 0x26, 0xc4, 0x7d, 0x70, 0xc8, 0xb1, 0xe0, 0x2f, 0x11, 0x5a, - 0xac, 0x17, 0xab, 0x30, 0x61, 0xe4, 0x9a, 0x1d, 0x7f, 0x99, 0x70, 0x93, 0xc2, 0xad, 0x53, 0xae, - 0xd9, 0x05, 0xaf, 0xf0, 0xd6, 0x89, 0xc0, 0xb2, 0x71, 0xa4, 0xd9, 0xe9, 0x57, 0xb9, 0xea, 0x8c, - 0x88, 0x65, 0xa8, 0x97, 0x63, 0xca, 0xce, 0xbf, 0x46, 0x7c, 0x9f, 0xc1, 0x0a, 0x18, 0x63, 0xd2, - 0xae, 0x78, 0x9d, 0x2b, 0x60, 0x50, 0x78, 0x8c, 0x06, 0xa3, 0xcf, 0x6e, 0x7a, 0x83, 0x8f, 0xd1, - 0x40, 0xf2, 0x61, 0x37, 0xf3, 0x69, 0x61, 0x57, 0xbc, 0xc9, 0xdd, 0xcc, 0xd7, 0xe3, 0x36, 0x06, - 0xb3, 0xc4, 0xee, 0x78, 0x8b, 0xb7, 0x31, 0x10, 0x25, 0xa2, 0x05, 0x8d, 0x83, 0x39, 0x62, 0xf7, - 0xbd, 0x4d, 0xbe, 0xd9, 0x03, 0x31, 0x22, 0x1e, 0x81, 0x85, 0xe1, 0x19, 0x62, 0xb7, 0x5e, 0xdd, - 0x1b, 0xf8, 0xd5, 0x6f, 0x46, 0x88, 0x38, 0xd7, 0xff, 0xd5, 0x6f, 0xe6, 0x87, 0x5d, 0x7b, 0x6d, - 0xaf, 0xfa, 0x62, 0x67, 0xc6, 0x87, 0x58, 0x01, 0xe8, 0x8f, 0x6e, 0xbb, 0xeb, 0x3a, 0xb9, 0x0c, - 0x08, 0x8f, 0x06, 0x4d, 0x6e, 0x3b, 0x7f, 0x83, 0x8f, 0x06, 0x11, 0x62, 0x09, 0x6a, 0x51, 0x16, - 0x04, 0xf8, 0x70, 0x34, 0xee, 0x1e, 0x12, 0x13, 0x32, 0xd8, 0x62, 0xf6, 0xf7, 0x7d, 0x3a, 0x18, - 0x0c, 0x88, 0x13, 0x70, 0x48, 0x86, 0x1d, 0xb9, 0x65, 0x23, 0xff, 0xd8, 0xe7, 0x81, 0x80, 0xab, - 0xc5, 0x32, 0x40, 0xf1, 0xd2, 0x98, 0xee, 0xc4, 0xd6, 0x6f, 0xfd, 0x73, 0xbf, 0x78, 0x07, 0x35, - 0x90, 0xbe, 0x20, 0x7f, 0xeb, 0xb4, 0x08, 0x76, 0xab, 0x82, 0xfc, 0x45, 0xf3, 0x24, 0x8c, 0x3f, - 0xa1, 0x55, 0x94, 0x7a, 0xbe, 0x8d, 0xfe, 0x8b, 0x68, 0x5e, 0x8f, 0x05, 0x0b, 0x55, 0x22, 0x53, - 0xcf, 0xd7, 0x36, 0xf6, 0x6f, 0x62, 0x4b, 0x00, 0xe1, 0xae, 0xa7, 0x53, 0x97, 0xfb, 0xfe, 0x87, - 0x61, 0x06, 0x70, 0xd3, 0x78, 0xfd, 0xa4, 0xdc, 0xb1, 0xb1, 0xff, 0xf2, 0xa6, 0x69, 0xbd, 0x38, - 0x05, 0x75, 0xbc, 0xcc, 0xdf, 0xb7, 0x6d, 0xf0, 0x7f, 0x04, 0xf7, 0x89, 0xd3, 0xc7, 0x60, 0xae, - 0xab, 0xc2, 0x41, 0xec, 0x34, 0x34, 0x55, 0x53, 0xb5, 0xf2, 0x07, 0xf1, 0xff, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x87, 0x5c, 0xee, 0x2b, 0x7e, 0x11, 0x00, 0x00, + // 1201 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0xcb, 0x6f, 0x1c, 0x45, + 0x13, 0xc0, 0xf5, 0xe9, 0x73, 0x64, 0x6f, 0xf9, 0x85, 0xd7, 0xc6, 0x84, 0x08, 0x44, 0x72, 0xe3, + 0xe4, 0x9c, 0x22, 0x94, 0xb6, 0x22, 0xcb, 0xb1, 0x1c, 0x2b, 0x11, 0x06, 0x63, 0xe2, 0x00, 0xe2, + 0xb0, 0x9a, 0xdd, 0x6d, 0x4f, 0x06, 0x66, 0xa6, 0x87, 0x99, 0x9e, 0x28, 0xce, 0x0d, 0x85, 0x87, + 0x10, 0xe2, 0x8d, 0x04, 0x09, 0x49, 0x80, 0x03, 0xef, 0x67, 0x78, 0x1f, 0xb9, 0xf0, 0xb8, 0xf2, + 0x3f, 0x70, 0x01, 0xcc, 0xdb, 0x37, 0x5f, 0x50, 0xcd, 0x56, 0xcd, 0xf6, 0xac, 0x57, 0xea, 0xde, + 0xdb, 0xec, 0xba, 0x7f, 0xbf, 0xad, 0xa9, 0x9a, 0xae, 0xea, 0x31, 0x80, 0xaf, 0x7c, 0x35, 0x97, + 0xa4, 0x4a, 0xab, 0x7a, 0x0d, 0xaf, 0x8b, 0xcb, 0x03, 0x07, 0x7d, 0xa5, 0xfc, 0x50, 0x1e, 0x2e, + 0x3e, 0x35, 0xf3, 0xcd, 0xc3, 0x6d, 0x99, 0xb5, 0xd2, 0x20, 0xd1, 0x2a, 0xed, 0x2c, 0x16, 0x77, + 0xc1, 0x34, 0x2d, 0x6e, 0xc8, 0x38, 0x8f, 0x1a, 0x49, 0x2a, 0x37, 0x83, 0xf3, 0xf5, 0x5b, 0xe6, + 0x3a, 0xe4, 0x1c, 0x93, 0x73, 0xcb, 0x71, 0x1e, 0xdd, 0x9d, 0xe8, 0x40, 0xc5, 0xd9, 0xfe, 0xeb, + 0x3f, 0xff, 0xff, 0xe0, 0xff, 0x6e, 0x1f, 0x59, 0x9f, 0x22, 0x14, 0xff, 0xb6, 0x56, 0x80, 0x62, + 0x1d, 0x6e, 0xac, 0xf8, 0x32, 0x9d, 0x06, 0xb1, 0x2f, 0x53, 0x8b, 0xf1, 0x3b, 0x32, 0x4e, 0x1b, + 0xc6, 0x7b, 0x09, 0x15, 0x4b, 0x30, 0x3e, 0x88, 0xeb, 0x7b, 0x72, 0x8d, 0x49, 0x53, 0xb2, 0x02, + 0x93, 0x85, 0xa4, 0x95, 0x67, 0x5a, 0x45, 0xb1, 0x17, 0x49, 0x8b, 0xe6, 0x87, 0x42, 0x53, 0x5b, + 0x9f, 0x40, 0x6c, 0xa9, 0xa4, 0x84, 0x80, 0x11, 0xfc, 0xa6, 0x2d, 0x5b, 0xa1, 0xc5, 0xf0, 0x23, + 0x05, 0x52, 0xae, 0x17, 0x67, 0x60, 0x06, 0xaf, 0xcf, 0x79, 0x61, 0x2e, 0xcd, 0x48, 0x0e, 0xf5, + 0xf5, 0x9c, 0xc1, 0x65, 0x2c, 0xfb, 0xe9, 0xe2, 0x50, 0x11, 0xce, 0x74, 0x29, 0x30, 0x62, 0x32, + 0xaa, 0xe8, 0x4b, 0xad, 0x65, 0x9a, 0x35, 0xbc, 0xb0, 0x5f, 0x78, 0x27, 0x82, 0xb0, 0x34, 0x5e, + 0xda, 0xae, 0x56, 0x71, 0xa5, 0x43, 0x2e, 0x86, 0xa1, 0xd8, 0x80, 0x9b, 0xfa, 0x3c, 0x15, 0x0e, + 0xce, 0xcb, 0xe4, 0x9c, 0xd9, 0xf3, 0x64, 0xa0, 0x76, 0x0d, 0xf8, 0xfb, 0xb2, 0x96, 0x0e, 0xce, + 0xd7, 0xc8, 0x59, 0x27, 0x96, 0x4b, 0x8a, 0xc6, 0x53, 0x30, 0x75, 0x4e, 0xa6, 0x4d, 0x95, 0xc9, + 0x86, 0x7c, 0x24, 0xf7, 0x42, 0x07, 0xdd, 0x15, 0xd2, 0x4d, 0x12, 0xb8, 0x8c, 0x1c, 0xba, 0x8e, + 0xc2, 0xc8, 0xa6, 0xd7, 0x92, 0x0e, 0x8a, 0xab, 0xa4, 0x18, 0xc6, 0xf5, 0x88, 0x2e, 0xc2, 0x98, + 0xaf, 0x3a, 0xb7, 0xe4, 0x80, 0x5f, 0x23, 0x7c, 0x94, 0x19, 0x52, 0x24, 0x2a, 0xc9, 0x43, 0x4f, + 0xbb, 0x44, 0xf0, 0x3a, 0x2b, 0x98, 0x21, 0xc5, 0x00, 0x69, 0x7d, 0x83, 0x15, 0x99, 0x91, 0xcf, + 0x05, 0x18, 0x55, 0x71, 0xb8, 0xa5, 0x62, 0x97, 0x20, 0xde, 0x24, 0x03, 0x10, 0x82, 0x82, 0x79, + 0xa8, 0xb9, 0x16, 0xe2, 0xad, 0x6d, 0xde, 0x1e, 0x5c, 0x81, 0x15, 0x98, 0xe4, 0x06, 0x15, 0xa8, + 0xd8, 0x41, 0xf1, 0x36, 0x29, 0x26, 0x0c, 0x8c, 0x6e, 0x43, 0xcb, 0x4c, 0xfb, 0xd2, 0x45, 0xf2, + 0x0e, 0xdf, 0x06, 0x21, 0x94, 0xca, 0xa6, 0x8c, 0x5b, 0x67, 0xdd, 0x0c, 0xef, 0x72, 0x2a, 0x99, + 0x41, 0xc5, 0x12, 0x8c, 0x47, 0x5e, 0x9a, 0x9d, 0xf5, 0x42, 0xa7, 0x72, 0xbc, 0x47, 0x8e, 0xb1, + 0x12, 0xa2, 0x8c, 0xe4, 0xf1, 0x20, 0x9a, 0xf7, 0x39, 0x23, 0x06, 0x46, 0x5b, 0x2f, 0xd3, 0x5e, + 0x33, 0x94, 0x8d, 0x41, 0x6c, 0x1f, 0xf0, 0xd6, 0xeb, 0xb0, 0xab, 0xa6, 0x71, 0x1e, 0x6a, 0x59, + 0x70, 0xc1, 0x49, 0xf3, 0x21, 0x57, 0xba, 0x00, 0x10, 0x7e, 0x00, 0x6e, 0xee, 0x3b, 0x26, 0x1c, + 0x64, 0x1f, 0x91, 0x6c, 0xb6, 0xcf, 0xa8, 0xa0, 0x96, 0x30, 0xa8, 0xf2, 0x63, 0x6e, 0x09, 0xb2, + 0xc7, 0xb5, 0x06, 0x33, 0x79, 0x9c, 0x79, 0x9b, 0x83, 0x65, 0xed, 0x13, 0xce, 0x5a, 0x87, 0xad, + 0x64, 0xed, 0x34, 0xcc, 0x92, 0x71, 0xb0, 0xba, 0x7e, 0xca, 0x8d, 0xb5, 0x43, 0x6f, 0x54, 0xab, + 0xfb, 0x20, 0x1c, 0x28, 0xd3, 0x79, 0x5e, 0xcb, 0x38, 0x43, 0xa6, 0x11, 0x79, 0x89, 0x83, 0xf9, + 0x3a, 0x99, 0xb9, 0xe3, 0x2f, 0x97, 0x82, 0x55, 0x2f, 0x41, 0xf9, 0xfd, 0xb0, 0x9f, 0xe5, 0x79, + 0x9c, 0xca, 0x96, 0xf2, 0xe3, 0xe0, 0x82, 0x6c, 0x3b, 0xa8, 0x3f, 0xeb, 0x29, 0xd5, 0x86, 0x81, + 0xa3, 0xf9, 0x24, 0xdc, 0x50, 0x9e, 0x55, 0x1a, 0x41, 0x94, 0xa8, 0x54, 0x5b, 0x8c, 0x9f, 0x73, + 0xa5, 0x4a, 0xee, 0x64, 0x81, 0x89, 0x65, 0x98, 0x28, 0x3e, 0xba, 0x3e, 0x92, 0x5f, 0x90, 0x68, + 0xbc, 0x4b, 0x51, 0xe3, 0x68, 0xa9, 0x28, 0xf1, 0x52, 0x97, 0xfe, 0xf7, 0x25, 0x37, 0x0e, 0x42, + 0xa8, 0x71, 0xe8, 0xad, 0x44, 0xe2, 0xb4, 0x77, 0x30, 0x7c, 0xc5, 0x8d, 0x83, 0x19, 0x52, 0xf0, + 0x81, 0xc1, 0x41, 0xf1, 0x35, 0x2b, 0x98, 0x41, 0xc5, 0x3d, 0xdd, 0x41, 0x9b, 0x4a, 0x3f, 0xc8, + 0x74, 0xea, 0xe1, 0x6a, 0x8b, 0xea, 0x9b, 0xed, 0xea, 0x21, 0x6c, 0xdd, 0x40, 0xc5, 0x29, 0x98, + 0xec, 0x39, 0x62, 0xd4, 0x6f, 0xdb, 0x63, 0x5b, 0x95, 0x59, 0xe6, 0xf9, 0xa5, 0xf0, 0xd1, 0x1d, + 0x6a, 0x46, 0xd5, 0x13, 0x86, 0xb8, 0x13, 0xeb, 0x5e, 0x3d, 0x07, 0xd8, 0x65, 0x17, 0x77, 0xca, + 0xd2, 0x57, 0x8e, 0x01, 0xe2, 0x04, 0x8c, 0x57, 0xce, 0x00, 0x76, 0xd5, 0x63, 0xa4, 0x1a, 0x33, + 0x8f, 0x00, 0xe2, 0x08, 0x0c, 0xe1, 0x3c, 0xb7, 0xe3, 0x8f, 0x13, 0x5e, 0x2c, 0x17, 0xc7, 0x60, + 0x84, 0xe7, 0xb8, 0x1d, 0x7d, 0x82, 0xd0, 0x12, 0x41, 0x9c, 0x67, 0xb8, 0x1d, 0x7f, 0x92, 0x71, + 0x46, 0x10, 0x77, 0x4f, 0xe1, 0xb7, 0x4f, 0x0f, 0x51, 0x1f, 0xe6, 0xdc, 0xcd, 0xc3, 0x30, 0x0d, + 0x6f, 0x3b, 0xfd, 0x14, 0xfd, 0x38, 0x13, 0xe2, 0x0e, 0xd8, 0xe7, 0x98, 0xf0, 0x67, 0x08, 0xed, + 0xac, 0x17, 0x4b, 0x30, 0x6a, 0x0c, 0x6c, 0x3b, 0xfe, 0x2c, 0xe1, 0x26, 0x85, 0xa1, 0xd3, 0xc0, + 0xb6, 0x0b, 0x9e, 0xe3, 0xd0, 0x89, 0xc0, 0xb4, 0xf1, 0xac, 0xb6, 0xd3, 0xcf, 0x73, 0xd6, 0x19, + 0x11, 0x0b, 0x50, 0x2b, 0xfb, 0xaf, 0x9d, 0x7f, 0x81, 0xf8, 0x2e, 0x83, 0x19, 0x30, 0xfa, 0xbf, + 0x5d, 0xf1, 0x22, 0x67, 0xc0, 0xa0, 0x70, 0x1b, 0xf5, 0xce, 0x74, 0xbb, 0xe9, 0x25, 0xde, 0x46, + 0x3d, 0x23, 0x1d, 0xab, 0x59, 0xb4, 0x41, 0xbb, 0xe2, 0x65, 0xae, 0x66, 0xb1, 0x1e, 0xc3, 0xe8, + 0x1d, 0x92, 0x76, 0xc7, 0x2b, 0x1c, 0x46, 0xcf, 0x8c, 0x14, 0x6b, 0x50, 0xdf, 0x3b, 0x20, 0xed, + 0xbe, 0x57, 0xc9, 0x37, 0xb5, 0x67, 0x3e, 0x8a, 0xfb, 0x60, 0xb6, 0xff, 0x70, 0xb4, 0x5b, 0x2f, + 0xed, 0xf4, 0xbc, 0xce, 0x98, 0xb3, 0x51, 0x9c, 0xee, 0x76, 0x59, 0x73, 0x30, 0xda, 0xb5, 0x97, + 0x77, 0xaa, 0x8d, 0xd6, 0x9c, 0x8b, 0x62, 0x11, 0xa0, 0x3b, 0x93, 0xec, 0xae, 0x2b, 0xe4, 0x32, + 0x20, 0xdc, 0x1a, 0x34, 0x92, 0xec, 0xfc, 0x55, 0xde, 0x1a, 0x44, 0xe0, 0xd6, 0xe0, 0x69, 0x64, + 0xa7, 0xaf, 0xf1, 0xd6, 0x60, 0x44, 0xcc, 0xc3, 0x48, 0x9c, 0x87, 0x21, 0x3e, 0x5b, 0xf5, 0x5b, + 0xfb, 0x8c, 0x1b, 0x19, 0xb6, 0x19, 0xfe, 0x65, 0x97, 0x60, 0x06, 0xc4, 0x11, 0xd8, 0x27, 0xa3, + 0xa6, 0x6c, 0xdb, 0xc8, 0x5f, 0x77, 0xb9, 0x9f, 0xe0, 0x6a, 0xb1, 0x00, 0xd0, 0x79, 0x99, 0xc6, + 0x28, 0x6c, 0xec, 0x6f, 0xbb, 0x9d, 0xf7, 0x7a, 0x03, 0xe9, 0x0a, 0x8a, 0xb7, 0x71, 0x8b, 0x60, + 0xbb, 0x2a, 0x28, 0x5e, 0xc0, 0x8f, 0xc2, 0xf0, 0x43, 0x99, 0x8a, 0xb5, 0xe7, 0xdb, 0xe8, 0xdf, + 0x89, 0xe6, 0xf5, 0x98, 0xb0, 0x48, 0xa5, 0x52, 0x7b, 0x7e, 0x66, 0x63, 0xff, 0x20, 0xb6, 0x04, + 0x10, 0x6e, 0x79, 0x99, 0x76, 0xb9, 0xef, 0x3f, 0x19, 0x66, 0x00, 0x83, 0xc6, 0xeb, 0x87, 0xe5, + 0x96, 0x8d, 0xfd, 0x8b, 0x83, 0xa6, 0xf5, 0xe2, 0x18, 0xd4, 0xf0, 0xb2, 0xf8, 0x3f, 0x84, 0x0d, + 0xfe, 0x9b, 0xe0, 0x2e, 0x81, 0xbf, 0x9c, 0xe9, 0xb6, 0x0e, 0xec, 0xc9, 0xfe, 0x87, 0x2a, 0xcd, + 0xeb, 0xc5, 0x22, 0x8c, 0x66, 0xba, 0xdd, 0xce, 0xe9, 0x44, 0x63, 0xc1, 0xff, 0xdd, 0x2d, 0x5f, + 0x72, 0x4b, 0xe6, 0xf8, 0x21, 0x98, 0x6e, 0xa9, 0xa8, 0x17, 0x3c, 0x0e, 0x2b, 0x6a, 0x45, 0xad, + 0x15, 0xbb, 0xe8, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x9c, 0xec, 0xd8, 0x50, 0x13, 0x00, + 0x00, } diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto index 18a58c5d..fbca44cd 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -39,6 +39,7 @@ extend google.protobuf.EnumOptions { optional bool goproto_enum_stringer = 62021; optional bool enum_stringer = 62022; optional string enum_customname = 62023; + optional bool enumdecl = 62024; } extend google.protobuf.EnumValueOptions { @@ -77,6 +78,10 @@ extend google.protobuf.FileOptions { optional bool gogoproto_import = 63027; optional bool protosizer_all = 63028; optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; } extend google.protobuf.MessageOptions { @@ -107,6 +112,8 @@ extend google.protobuf.MessageOptions { optional bool protosizer = 64028; optional bool compare = 64029; + + optional bool typedecl = 64030; } extend google.protobuf.FieldOptions { @@ -119,4 +126,7 @@ extend google.protobuf.FieldOptions { optional string casttype = 65007; optional string castkey = 65008; optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; } diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go index 670021fe..6b851c56 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/helper.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -39,6 +39,14 @@ func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { return proto.GetBoolExtension(field.Options, E_Nullable, true) } +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { nullable := IsNullable(field) if field.IsMessage() || IsCustomType(field) { @@ -82,7 +90,18 @@ func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { return false } +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Customtype) if err == nil && v.(*string) != nil { @@ -93,6 +112,9 @@ func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { } func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Casttype) if err == nil && v.(*string) != nil { @@ -103,6 +125,9 @@ func GetCastType(field *google_protobuf.FieldDescriptorProto) string { } func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Castkey) if err == nil && v.(*string) != nil { @@ -113,6 +138,9 @@ func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { } func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Castvalue) if err == nil && v.(*string) != nil { @@ -147,6 +175,9 @@ func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool } func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Customname) if err == nil && v.(*string) != nil { @@ -157,6 +188,9 @@ func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { } func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_EnumCustomname) if err == nil && v.(*string) != nil { @@ -167,6 +201,9 @@ func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { } func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) if err == nil && v.(*string) != nil { @@ -177,6 +214,9 @@ func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) str } func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Jsontag) if err == nil && v.(*string) != nil { @@ -187,6 +227,9 @@ func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { } func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Moretags) if err == nil && v.(*string) != nil { @@ -308,3 +351,7 @@ func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) } + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go index 0d6634cc..737f2731 100644 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -61,7 +61,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. func DecodeVarint(buf []byte) (x uint64, n int) { - // x, n already 0 for shift := uint(0); shift < 64; shift += 7 { if n >= len(buf) { return 0, 0 @@ -78,13 +77,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) { return 0, 0 } -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - // x, err already 0 - +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { i := p.index l := len(p.buf) @@ -107,6 +100,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { return } +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + // DecodeFixed64 reads a 64-bit integer from the Buffer. // This is the format for the // fixed64, sfixed64, and double protocol buffer types. @@ -340,6 +434,8 @@ func (p *Buffer) DecodeGroup(pb Message) error { // Buffer and places the decoded result in pb. If the struct // underlying pb does not match the data in the buffer, the results can be // unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. if u, ok := pb.(Unmarshaler); ok { diff --git a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go index ecc63873..6fb74de4 100644 --- a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go @@ -98,7 +98,7 @@ func setPtrCustomType(base structPointer, f field, v interface{}) { if v == nil { return } - structPointer_SetStructPointer(base, f, structPointer(reflect.ValueOf(v).Pointer())) + structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v))) } func setCustomType(base structPointer, f field, value interface{}) { @@ -165,7 +165,8 @@ func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error } newBas := appendStructPointer(base, p.field, p.ctype) - setCustomType(newBas, 0, custom) + var zero field + setCustomType(newBas, zero, custom) return nil } diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 00000000..93464c91 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 00000000..18e2a5f7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,203 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} + +func (o *Buffer) decDuration() (time.Duration, error) { + b, err := o.DecodeRawBytes(true) + if err != nil { + return 0, err + } + dproto := &duration{} + if err := Unmarshal(b, dproto); err != nil { + return 0, err + } + return durationFromProto(dproto) +} + +func (o *Buffer) dec_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, uint64(d)) + return nil +} + +func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d)) + return nil +} + +func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))) + var zero field + setPtrCustomType(newBas, zero, &d) + return nil +} + +func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + structPointer_Word64Slice(base, p.field).Append(uint64(d)) + return nil +} + +func size_duration(p *Properties, base structPointer) (n int) { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + dur := structPointer_Interface(structp, durationType).(*time.Duration) + d := durationProto(*dur) + size := Size(d) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_duration(p *Properties, base structPointer) error { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + dur := structPointer_Interface(structp, durationType).(*time.Duration) + d := durationProto(*dur) + data, err := Marshal(d) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_ref_duration(p *Properties, base structPointer) (n int) { + dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) + d := durationProto(*dur) + size := Size(d) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error { + dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) + d := durationProto(*dur) + data, err := Marshal(d) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_slice_duration(p *Properties, base structPointer) (n int) { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + if durs[i] == nil { + return 0 + } + dproto := durationProto(*durs[i]) + size := Size(dproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + if durs[i] == nil { + return errRepeatedHasNil + } + dproto := durationProto(*durs[i]) + data, err := Marshal(dproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_slice_ref_duration(p *Properties, base structPointer) (n int) { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + dproto := durationProto(durs[i]) + size := Size(dproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + dproto := durationProto(durs[i]) + data, err := Marshal(dproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go index 8c1b8fd1..2b30f846 100644 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -234,10 +234,6 @@ func Marshal(pb Message) ([]byte, error) { } p := NewBuffer(nil) err := p.Marshal(pb) - var state errorState - if err != nil && !state.shouldContinue(err, nil) { - return nil, err - } if p.buf == nil && err == nil { // Return a non-nil slice on success. return []byte{}, nil @@ -266,11 +262,8 @@ func (p *Buffer) Marshal(pb Message) error { // Can the object marshal itself? if m, ok := pb.(Marshaler); ok { data, err := m.Marshal() - if err != nil { - return err - } p.buf = append(p.buf, data...) - return nil + return err } t, base, err := getbase(pb) @@ -282,7 +275,7 @@ func (p *Buffer) Marshal(pb Message) error { } if collectStats { - stats.Encode++ + (stats).Encode++ // Parens are to work around a goimports bug. } if len(p.buf) > maxMarshalSize { @@ -309,7 +302,7 @@ func Size(pb Message) (n int) { } if collectStats { - stats.Size++ + (stats).Size++ // Parens are to work around a goimports bug. } return @@ -1014,7 +1007,6 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) { if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, _ := m.Marshal() - n += len(p.tagcode) n += sizeRawBytes(data) continue } @@ -1083,10 +1075,17 @@ func (o *Buffer) enc_map(p *Properties, base structPointer) error { func (o *Buffer) enc_exts(p *Properties, base structPointer) error { exts := structPointer_Extensions(base, p.field) - if err := encodeExtensions(exts); err != nil { + + v, mu := exts.extensionsRead() + if v == nil { + return nil + } + + mu.Lock() + defer mu.Unlock() + if err := encodeExtensionsMap(v); err != nil { return err } - v, _ := exts.extensionsRead() return o.enc_map_body(v) } diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go index 66e7e163..32111b7f 100644 --- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -196,12 +196,10 @@ func size_ref_struct_message(p *Properties, base structPointer) int { // Encode a slice of references to message struct pointers ([]struct). func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { var state errorState - ss := structPointer_GetStructPointer(base, p.field) - ss1 := structPointer_GetRefStructPointer(ss, field(0)) - size := p.stype.Size() - l := structPointer_Len(base, p.field) + ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) + l := ss.Len() for i := 0; i < l; i++ { - structp := structPointer_Add(ss1, field(uintptr(i)*size)) + structp := ss.Index(i) if structPointer_IsNil(structp) { return errRepeatedHasNil } @@ -233,13 +231,11 @@ func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) //TODO this is only copied, please fix this func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { - ss := structPointer_GetStructPointer(base, p.field) - ss1 := structPointer_GetRefStructPointer(ss, field(0)) - size := p.stype.Size() - l := structPointer_Len(base, p.field) + ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) + l := ss.Len() n += l * len(p.tagcode) for i := 0; i < l; i++ { - structp := structPointer_Add(ss1, field(uintptr(i)*size)) + structp := ss.Index(i) if structPointer_IsNil(structp) { return // return the size up to this point } diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go index 8b16f951..2ed1cf59 100644 --- a/vendor/github.com/gogo/protobuf/proto/equal.go +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -54,13 +54,17 @@ Equality is defined in this way: in a proto3 .proto file, fields are not "set"; specifically, zero length proto3 "bytes" fields are equal (nil == {}). - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal (a "bytes" field, - although represented by []byte, is not a repeated field) + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. - Two unset fields are equal. - Two unknown field sets are equal if their current encoded state is equal. - Two extension sets are equal iff they have corresponding elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. - Every other combination of things are not equal. The return value is undefined if a and b are not protocol buffers. diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go index f7384baa..0dfcb538 100644 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -167,6 +167,7 @@ type ExtensionDesc struct { Field int32 // field number Name string // fully-qualified name of extension, for text formatting Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined } func (ed *ExtensionDesc) repeated() bool { @@ -587,6 +588,9 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { registeredExtensions := RegisteredExtensions(pb) emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } mu.Lock() defer mu.Unlock() extensions := make([]*ExtensionDesc, 0, len(emap)) diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go index 2c30d709..7580bb45 100644 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -308,7 +308,7 @@ func GetStats() Stats { return stats } // temporary Buffer and are fine for most applications. type Buffer struct { buf []byte // encode/decode byte stream - index int // write point + index int // read point // pools of basic types to amortize allocation. bools []bool diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 00000000..1763a5f2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,85 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +package proto + +import ( + "reflect" +) + +func structPointer_FieldPointer(p structPointer, f field) structPointer { + panic("not implemented") +} + +func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { + panic("not implemented") +} + +func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { + panic("not implemented") +} + +func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { + panic("not implemented") +} + +func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { + panic("not implemented") +} + +func structPointer_Add(p structPointer, size field) structPointer { + panic("not implemented") +} + +func structPointer_Len(p structPointer, f field) int { + panic("not implemented") +} + +func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { + panic("not implemented") +} + +func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { + panic("not implemented") +} + +func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { + panic("not implemented") +} + +type structRefSlice struct{} + +func (v *structRefSlice) Len() int { + panic("not implemented") +} + +func (v *structRefSlice) Index(i int) structPointer { + panic("not implemented") +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go index ad7c8517..f156a29f 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -26,7 +26,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine +// +build !appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -105,3 +105,24 @@ func structPointer_Add(p structPointer, size field) structPointer { func structPointer_Len(p structPointer, f field) int { return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) } + +func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { + return &structRefSlice{p: p, f: f, size: size} +} + +// A structRefSlice represents a slice of structs (themselves submessages or groups). +type structRefSlice struct { + p structPointer + f field + size uintptr +} + +func (v *structRefSlice) Len() int { + return structPointer_Len(v.p, v.f) +} + +func (v *structRefSlice) Index(i int) structPointer { + ss := structPointer_GetStructPointer(v.p, v.f) + ss1 := structPointer_GetRefStructPointer(ss, 0) + return structPointer_Add(ss1, field(uintptr(i)*v.size)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go index 3e4cad03..44b33205 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -190,10 +190,11 @@ type Properties struct { proto3 bool // whether this is known to be a proto3 field; set for []byte only oneof bool // whether this is a oneof field - Default string // default value - HasDefault bool // whether an explicit default was provided - CustomType string - def_uint64 uint64 + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + StdTime bool + StdDuration bool enc encoder valEnc valueEncoder // set for bool and numeric types only @@ -340,6 +341,10 @@ func (p *Properties) Parse(s string) { p.OrigName = strings.Split(f, "=")[1] case strings.HasPrefix(f, "customtype="): p.CustomType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true } } } @@ -355,11 +360,22 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock p.enc = nil p.dec = nil p.size = nil - if len(p.CustomType) > 0 { + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { p.setCustomEncAndDec(typ) p.setTag(lockGetProp) return } + if p.StdTime && !isMap { + p.setTimeEncAndDec(typ) + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setDurationEncAndDec(typ) + p.setTag(lockGetProp) + return + } switch t1 := typ; t1.Kind() { default: fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) @@ -630,6 +646,10 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock // so we need encoders for the pointer to this type. vtype = reflect.PtrTo(vtype) } + + p.mvalprop.CustomType = p.CustomType + p.mvalprop.StdDuration = p.StdDuration + p.mvalprop.StdTime = p.StdTime p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } p.setTag(lockGetProp) @@ -920,7 +940,15 @@ func RegisterType(x Message, name string) { } // MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} // MessageType returns the message type (pointer to struct) for a named message. func MessageType(name string) reflect.Type { return protoTypes[name] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go index 4607a975..b6b7176c 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -51,6 +51,51 @@ func (p *Properties) setCustomEncAndDec(typ reflect.Type) { } } +func (p *Properties) setDurationEncAndDec(typ reflect.Type) { + if p.Repeated { + if typ.Elem().Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_slice_duration + p.dec = (*Buffer).dec_slice_duration + p.size = size_slice_duration + } else { + p.enc = (*Buffer).enc_slice_ref_duration + p.dec = (*Buffer).dec_slice_ref_duration + p.size = size_slice_ref_duration + } + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_duration + p.dec = (*Buffer).dec_duration + p.size = size_duration + } else { + p.enc = (*Buffer).enc_ref_duration + p.dec = (*Buffer).dec_ref_duration + p.size = size_ref_duration + } +} + +func (p *Properties) setTimeEncAndDec(typ reflect.Type) { + if p.Repeated { + if typ.Elem().Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_slice_time + p.dec = (*Buffer).dec_slice_time + p.size = size_slice_time + } else { + p.enc = (*Buffer).enc_slice_ref_time + p.dec = (*Buffer).dec_slice_ref_time + p.size = size_slice_ref_time + } + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_time + p.dec = (*Buffer).dec_time + p.size = size_time + } else { + p.enc = (*Buffer).enc_ref_time + p.dec = (*Buffer).dec_ref_time + p.size = size_ref_time + } + +} + func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { t2 := typ.Elem() p.sstype = typ diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go index b3e12e26..d63732fc 100644 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -51,6 +51,7 @@ import ( "sort" "strings" "sync" + "time" ) var ( @@ -181,7 +182,93 @@ type raw interface { Bytes() []byte } -func writeStruct(w *textWriter, sv reflect.Value) error { +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } st := sv.Type() sprops := GetProperties(st) for i := 0; i < sv.NumField(); i++ { @@ -234,10 +321,10 @@ func writeStruct(w *textWriter, sv reflect.Value) error { continue } if len(props.Enum) > 0 { - if err := writeEnum(w, v, props); err != nil { + if err := tm.writeEnum(w, v, props); err != nil { return err } - } else if err := writeAny(w, v, props); err != nil { + } else if err := tm.writeAny(w, v, props); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -279,7 +366,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := writeAny(w, key, props.mkeyprop); err != nil { + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -296,7 +383,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := writeAny(w, val, props.mvalprop); err != nil { + if err := tm.writeAny(w, val, props.mvalprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -368,10 +455,10 @@ func writeStruct(w *textWriter, sv reflect.Value) error { } if len(props.Enum) > 0 { - if err := writeEnum(w, fv, props); err != nil { + if err := tm.writeEnum(w, fv, props); err != nil { return err } - } else if err := writeAny(w, fv, props); err != nil { + } else if err := tm.writeAny(w, fv, props); err != nil { return err } @@ -389,7 +476,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { pv.Elem().Set(sv) } if pv.Type().Implements(extensionRangeType) { - if err := writeExtensions(w, pv); err != nil { + if err := tm.writeExtensions(w, pv); err != nil { return err } } @@ -419,20 +506,45 @@ func writeRaw(w *textWriter, b []byte) error { } // writeAny writes an arbitrary field. -func writeAny(w *textWriter, v reflect.Value, props *Properties) error { +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) - if props != nil && len(props.CustomType) > 0 { - custom, ok := v.Interface().(Marshaler) - if ok { - data, err := custom.Marshal() + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) if err != nil { return err } - if err := writeString(w, string(data)); err != nil { - return err + props.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), props) + props.StdTime = true + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) } - return nil + dproto := durationProto(d) + props.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), props) + props.StdDuration = true + return err } } @@ -482,15 +594,15 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error { } } w.indent() - if tm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() if err != nil { return err } if _, err = w.Write(text); err != nil { return err } - } else if err := writeStruct(w, v); err != nil { + } else if err := tm.writeStruct(w, v); err != nil { return err } w.unindent() @@ -634,7 +746,7 @@ func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // writeExtensions writes all the extensions in pv. // pv is assumed to be a pointer to a protocol message struct that is extendable. -func writeExtensions(w *textWriter, pv reflect.Value) error { +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { emap := extensionMaps[pv.Type().Elem()] e := pv.Interface().(Message) @@ -689,13 +801,13 @@ func writeExtensions(w *textWriter, pv reflect.Value) error { // Repeated extensions will appear as a slice. if !desc.repeated() { - if err := writeExtension(w, desc.Name, pb); err != nil { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { return err } } else { v := reflect.ValueOf(pb) for i := 0; i < v.Len(); i++ { - if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { return err } } @@ -704,7 +816,7 @@ func writeExtensions(w *textWriter, pv reflect.Value) error { return nil } -func writeExtension(w *textWriter, name string, pb interface{}) error { +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { return err } @@ -713,7 +825,7 @@ func writeExtension(w *textWriter, name string, pb interface{}) error { return err } } - if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -740,12 +852,13 @@ func (w *textWriter) writeIndent() { // TextMarshaler is a configurable text format marshaler. type TextMarshaler struct { - Compact bool // use compact text format (one line). + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types } // Marshal writes a given protocol buffer in text format. // The only errors returned are from w. -func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error { +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { val := reflect.ValueOf(pb) if pb == nil || val.IsNil() { w.Write([]byte("")) @@ -760,11 +873,11 @@ func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error { aw := &textWriter{ w: ww, complete: true, - compact: m.Compact, + compact: tm.Compact, } - if tm, ok := pb.(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() if err != nil { return err } @@ -778,7 +891,7 @@ func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error { } // Dereference the received pointer so we don't have outer < and >. v := reflect.Indirect(val) - if err := writeStruct(aw, v); err != nil { + if err := tm.writeStruct(aw, v); err != nil { return err } if bw != nil { @@ -788,9 +901,9 @@ func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error { } // Text is the same as Marshal, but returns the string directly. -func (m *TextMarshaler) Text(pb Message) string { +func (tm *TextMarshaler) Text(pb Message) string { var buf bytes.Buffer - m.Marshal(&buf, pb) + tm.Marshal(&buf, pb) return buf.String() } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go index 58926741..1d6c6aa0 100644 --- a/vendor/github.com/gogo/protobuf/proto/text_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -33,10 +33,10 @@ import ( "reflect" ) -func writeEnum(w *textWriter, v reflect.Value, props *Properties) error { +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { m, ok := enumStringMaps[props.Enum] if !ok { - if err := writeAny(w, v, props); err != nil { + if err := tm.writeAny(w, v, props); err != nil { return err } } @@ -48,7 +48,7 @@ func writeEnum(w *textWriter, v reflect.Value, props *Properties) error { } s, ok := m[key] if !ok { - if err := writeAny(w, v, props); err != nil { + if err := tm.writeAny(w, v, props); err != nil { return err } } diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go index bcd732c3..9db12e96 100644 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -46,9 +46,13 @@ import ( "reflect" "strconv" "strings" + "time" "unicode/utf8" ) +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + type ParseError struct { Message string Line int // 1-based line number @@ -168,7 +172,7 @@ func (p *textParser) advance() { p.cur.offset, p.cur.line = p.offset, p.line p.cur.unquoted = "" switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',': + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': // Single symbol p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] case '"', '\'': @@ -456,7 +460,10 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { fieldSet := make(map[string]bool) // A struct is a sequence of "name: value", terminated by one of // '>' or '}', or the end of the input. A name may also be - // "[extension]". + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > for { tok := p.next() if tok.err != nil { @@ -466,33 +473,74 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { break } if tok.value == "[" { - // Looks like an extension. + // Looks like an extension or an Any. // // TODO: Check whether we need to handle // namespace rooted names (e.g. ".something.Foo"). - tok = p.next() - if tok.err != nil { - return tok.err + extName, err := p.consumeExtName() + if err != nil { + return err } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + var desc *ExtensionDesc // This could be faster, but it's functional. // TODO: Do something smarter than a linear scan. for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == tok.value { + if d.Name == extName { desc = d break } } if desc == nil { - return p.errorf("unrecognized extension %q", tok.value) - } - // Check the extension terminator. - tok = p.next() - if tok.err != nil { - return tok.err - } - if tok.value != "]" { - return p.errorf("unrecognized extension terminator %q", tok.value) + return p.errorf("unrecognized extension %q", extName) } props := &Properties{} @@ -550,7 +598,11 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { props = oop.Prop nv := reflect.New(oop.Type.Elem()) dst = nv.Elem().Field(0) - sv.Field(oop.Field).Set(nv) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) } if !dst.IsValid() { return p.errorf("unknown field name %q in %v", name, st) @@ -657,6 +709,35 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { return reqFieldErr } +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + // consumeOptionalSeparator consumes an optional semicolon or comma. // It is used in readStruct to provide backward compatibility. func (p *textParser) consumeOptionalSeparator() error { @@ -717,6 +798,80 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { } return nil } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } switch fv := v; fv.Kind() { case reflect.Slice: at := v.Type() @@ -759,12 +914,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) return p.readAny(fv.Index(fv.Len()-1), props) case reflect.Bool: - // Either "true", "false", 1 or 0. + // true/1/t/True or false/f/0/False. switch tok.value { - case "true", "1": + case "true", "1", "t", "True": fv.SetBool(true) return nil - case "false", "0": + case "false", "0", "f", "False": fv.SetBool(false) return nil } diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 00000000..9324f654 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 00000000..d4276474 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,229 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} + +func (o *Buffer) decTimestamp() (time.Time, error) { + b, err := o.DecodeRawBytes(true) + if err != nil { + return time.Time{}, err + } + tproto := ×tamp{} + if err := Unmarshal(b, tproto); err != nil { + return time.Time{}, err + } + return timestampFromProto(tproto) +} + +func (o *Buffer) dec_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + setPtrCustomType(base, p.field, &t) + return nil +} + +func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + setCustomType(base, p.field, &t) + return nil +} + +func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))) + var zero field + setPtrCustomType(newBas, zero, &t) + return nil +} + +func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType)) + var zero field + setCustomType(newBas, zero, &t) + return nil +} + +func size_time(p *Properties, base structPointer) (n int) { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + tim := structPointer_Interface(structp, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return 0 + } + size := Size(t) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_time(p *Properties, base structPointer) error { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + tim := structPointer_Interface(structp, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return err + } + data, err := Marshal(t) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_ref_time(p *Properties, base structPointer) (n int) { + tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return 0 + } + size := Size(t) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error { + tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return err + } + data, err := Marshal(t) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_slice_time(p *Properties, base structPointer) (n int) { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + if tims[i] == nil { + return 0 + } + tproto, err := timestampProto(*tims[i]) + if err != nil { + return 0 + } + size := Size(tproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + if tims[i] == nil { + return errRepeatedHasNil + } + tproto, err := timestampProto(*tims[i]) + if err != nil { + return err + } + data, err := Marshal(tproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_slice_ref_time(p *Properties, base structPointer) (n int) { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + tproto, err := timestampProto(tims[i]) + if err != nil { + return 0 + } + size := Size(tproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + tproto, err := timestampProto(tims[i]) + if err != nil { + return err + } + data, err := Marshal(tproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto new file mode 100644 index 00000000..7eaf2291 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto @@ -0,0 +1,139 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto index acaee1f4..6e4da2c1 100644 --- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto @@ -53,6 +53,16 @@ option go_package = "plugin_go"; import "google/protobuf/descriptor.proto"; +// The version number of protocol compiler. +message Version { + optional int32 major = 1; + optional int32 minor = 2; + optional int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + optional string suffix = 4; +} + // An encoded CodeGeneratorRequest is written to the plugin's stdin. message CodeGeneratorRequest { // The .proto files that were explicitly listed on the command-line. The @@ -75,6 +85,9 @@ message CodeGeneratorRequest { // is not similarly optimized on protoc's end -- it will store all fields in // memory at once before sending them to the plugin. repeated FileDescriptorProto proto_file = 15; + + // The version number of protocol compiler. + optional Version compiler_version = 3; } // The plugin writes an encoded CodeGeneratorResponse to stdout. diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto index c59a6022..2cc496b5 100644 --- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto @@ -139,7 +139,11 @@ message FieldDescriptorProto { TYPE_FIXED32 = 7; TYPE_BOOL = 8; TYPE_STRING = 9; - TYPE_GROUP = 10; // Tag-delimited aggregate. + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; TYPE_MESSAGE = 11; // Length-delimited aggregate. // New in version 2. @@ -157,7 +161,6 @@ message FieldDescriptorProto { LABEL_OPTIONAL = 1; LABEL_REQUIRED = 2; LABEL_REPEATED = 3; - // TODO(sanjay): Should we add LABEL_MAP? }; optional string name = 1; @@ -202,6 +205,7 @@ message FieldDescriptorProto { // Describes a oneof. message OneofDescriptorProto { optional string name = 1; + optional OneofOptions options = 2; } // Describes an enum type. @@ -304,19 +308,8 @@ message FileOptions { // top-level extensions defined in the file. optional bool java_multiple_files = 10 [default=false]; - // If set true, then the Java code generator will generate equals() and - // hashCode() methods for all messages defined in the .proto file. - // This increases generated code size, potentially substantially for large - // protos, which may harm a memory-constrained application. - // - In the full runtime this is a speed optimization, as the - // AbstractMessage base class includes reflection-based implementations of - // these methods. - // - In the lite runtime, setting this option changes the semantics of - // equals() and hashCode() to more closely match those of the full runtime; - // the generated methods compute their results based on field values rather - // than object identity. (Implementations should not assume that hashcodes - // will be consistent across runtimes or versions of the protocol compiler.) - optional bool java_generate_equals_and_hash = 20 [default=false]; + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 @@ -377,15 +370,19 @@ message FileOptions { // Namespace for generated classes; defaults to the package. optional string csharp_namespace = 37; - // Whether the nano proto compiler should generate in the deprecated non-nano - // suffixed package. - optional bool javanano_use_deprecated_package = 38; + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; + + //reserved 38; } message MessageOptions { @@ -443,6 +440,9 @@ message MessageOptions { // parser. optional bool map_entry = 7; + //reserved 8; // javalite_serializable + + // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; @@ -471,7 +471,6 @@ message FieldOptions { // false will avoid using packed encoding. optional bool packed = 2; - // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types // (int64, uint64, sint64, fixed64, sfixed64). By default these types are @@ -512,7 +511,7 @@ message FieldOptions { // // // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outher message + // a lazy sub-message. That is, calling IsInitialized() on the outer message // may return true even if the inner message has missing required fields. // This is necessary because otherwise the inner message would have to be // parsed in order to perform the check, defeating the purpose of lazy @@ -533,6 +532,16 @@ message FieldOptions { optional bool weak = 10 [default=false]; + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + //reserved 4; // removed jtype +} + +message OneofOptions { // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; @@ -552,6 +561,7 @@ message EnumOptions { // is a formalization for deprecating enums. optional bool deprecated = 3 [default=false]; + // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; @@ -606,6 +616,17 @@ message MethodOptions { // this is a formalization for deprecating methods. optional bool deprecated = 33 [default=false]; + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = + 34 [default=IDEMPOTENCY_UNKNOWN]; + // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; @@ -777,3 +798,29 @@ message SourceCodeInfo { repeated string leading_detached_comments = 6; } } + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed=true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto new file mode 100644 index 00000000..318922b7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto @@ -0,0 +1,104 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto new file mode 100644 index 00000000..6057c852 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto @@ -0,0 +1,52 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto new file mode 100644 index 00000000..994af79f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto @@ -0,0 +1,246 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "FieldMaskProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "types"; + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, the existing +// repeated values in the target resource will be overwritten by the new values. +// Note that a repeated field is only allowed in the last position of a `paths` +// string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then the existing sub-message in the target resource is +// overwritten. Given the target message: +// +// f { +// b { +// d : 1 +// x : 2 +// } +// c : 1 +// } +// +// And an update message: +// +// f { +// b { +// d : 10 +// } +// } +// +// then if the field mask is: +// +// paths: "f.b" +// +// then the result will be: +// +// f { +// b { +// d : 10 +// } +// c : 1 +// } +// +// However, if the update mask was: +// +// paths: "f.b.d" +// +// then the result would be: +// +// f { +// b { +// d : 10 +// x : 2 +// } +// c : 1 +// } +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +message FieldMask { + // The set of field mask paths. + repeated string paths = 1; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto new file mode 100644 index 00000000..4f78641f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto new file mode 100644 index 00000000..c544c83e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto @@ -0,0 +1,108 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto new file mode 100644 index 00000000..c5632e5c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto @@ -0,0 +1,118 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 00000000..a85bf198 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go index 341b59c5..e2703901 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -21,12 +21,14 @@ It has these top-level messages: FileOptions MessageOptions FieldOptions + OneofOptions EnumOptions EnumValueOptions ServiceOptions MethodOptions UninterpretedOption SourceCodeInfo + GeneratedCodeInfo */ package descriptor @@ -63,6 +65,10 @@ const ( FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // New in version 2. @@ -299,6 +305,48 @@ func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11, 1} } +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{16, 0} +} + // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { @@ -696,8 +744,9 @@ func (m *FieldDescriptorProto) GetOptions() *FieldOptions { // Describes a oneof. type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } @@ -712,6 +761,13 @@ func (m *OneofDescriptorProto) GetName() string { return "" } +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + // Describes an enum type. type EnumDescriptorProto struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` @@ -900,19 +956,8 @@ type FileOptions struct { // generated to contain the file's getDescriptor() method as well as any // top-level extensions defined in the file. JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` - // If set true, then the Java code generator will generate equals() and - // hashCode() methods for all messages defined in the .proto file. - // This increases generated code size, potentially substantially for large - // protos, which may harm a memory-constrained application. - // - In the full runtime this is a speed optimization, as the - // AbstractMessage base class includes reflection-based implementations of - // these methods. - // - In the lite runtime, setting this option changes the semantics of - // equals() and hashCode() to more closely match those of the full runtime; - // the generated methods compute their results based on field values rather - // than object identity. (Implementations should not assume that hashcodes - // will be consistent across runtimes or versions of the protocol compiler.) - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash,def=0" json:"java_generate_equals_and_hash,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 // byte sequence to a string field. @@ -953,9 +998,11 @@ type FileOptions struct { ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` // Namespace for generated classes; defaults to the package. CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` - // Whether the nano proto compiler should generate in the deprecated non-nano - // suffixed package. - JavananoUseDeprecatedPackage *bool `protobuf:"varint,38,opt,name=javanano_use_deprecated_package,json=javananoUseDeprecatedPackage" json:"javanano_use_deprecated_package,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` proto.XXX_InternalExtensions `json:"-"` @@ -976,7 +1023,6 @@ func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { } const Default_FileOptions_JavaMultipleFiles bool = false -const Default_FileOptions_JavaGenerateEqualsAndHash bool = false const Default_FileOptions_JavaStringCheckUtf8 bool = false const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED const Default_FileOptions_CcGenericServices bool = false @@ -1010,7 +1056,7 @@ func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { if m != nil && m.JavaGenerateEqualsAndHash != nil { return *m.JavaGenerateEqualsAndHash } - return Default_FileOptions_JavaGenerateEqualsAndHash + return false } func (m *FileOptions) GetJavaStringCheckUtf8() bool { @@ -1083,11 +1129,11 @@ func (m *FileOptions) GetCsharpNamespace() string { return "" } -func (m *FileOptions) GetJavananoUseDeprecatedPackage() bool { - if m != nil && m.JavananoUseDeprecatedPackage != nil { - return *m.JavananoUseDeprecatedPackage +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix } - return false + return "" } func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { @@ -1247,7 +1293,7 @@ type FieldOptions struct { // // // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outher message + // a lazy sub-message. That is, calling IsInitialized() on the outer message // may return true even if the inner message has missing required fields. // This is necessary because otherwise the inner message would have to be // parsed in order to perform the check, defeating the purpose of lazy @@ -1338,6 +1384,33 @@ func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { return nil } +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} } + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + type EnumOptions struct { // Set this option to true to allow mapping different tag names to the same // value. @@ -1356,7 +1429,7 @@ type EnumOptions struct { func (m *EnumOptions) Reset() { *m = EnumOptions{} } func (m *EnumOptions) String() string { return proto.CompactTextString(m) } func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} } +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} } var extRange_EnumOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1404,7 +1477,7 @@ type EnumValueOptions struct { func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} } +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} } var extRange_EnumValueOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1445,7 +1518,7 @@ type ServiceOptions struct { func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} } +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} } var extRange_ServiceOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1476,7 +1549,8 @@ type MethodOptions struct { // Depending on the target platform, this can emit Deprecated annotations // for the method, or it will be completely ignored; in the very least, // this is a formalization for deprecating methods. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` proto.XXX_InternalExtensions `json:"-"` @@ -1486,7 +1560,7 @@ type MethodOptions struct { func (m *MethodOptions) Reset() { *m = MethodOptions{} } func (m *MethodOptions) String() string { return proto.CompactTextString(m) } func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} } +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} } var extRange_MethodOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1497,6 +1571,7 @@ func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { } const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN func (m *MethodOptions) GetDeprecated() bool { if m != nil && m.Deprecated != nil { @@ -1505,6 +1580,13 @@ func (m *MethodOptions) GetDeprecated() bool { return Default_MethodOptions_Deprecated } +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { return m.UninterpretedOption @@ -1534,7 +1616,7 @@ type UninterpretedOption struct { func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} } +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} } func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { if m != nil { @@ -1600,7 +1682,7 @@ func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOptio func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption_NamePart) ProtoMessage() {} func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{16, 0} + return fileDescriptorDescriptor, []int{17, 0} } func (m *UninterpretedOption_NamePart) GetNamePart() string { @@ -1670,7 +1752,7 @@ type SourceCodeInfo struct { func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} } +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{18} } func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { if m != nil { @@ -1767,7 +1849,7 @@ func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo_Location) ProtoMessage() {} func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{17, 0} + return fileDescriptorDescriptor, []int{18, 0} } func (m *SourceCodeInfo_Location) GetPath() []int32 { @@ -1805,6 +1887,79 @@ func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { return nil } +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{19} } + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{19, 0} +} + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + func init() { proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") @@ -1820,6 +1975,7 @@ func init() { proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") @@ -1828,154 +1984,167 @@ func init() { proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) } func init() { proto.RegisterFile("descriptor.proto", fileDescriptorDescriptor) } var fileDescriptorDescriptor = []byte{ - // 2211 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x73, 0xdb, 0xc6, - 0x15, 0x0f, 0xf8, 0x4f, 0xe4, 0x23, 0x45, 0xad, 0x56, 0x8a, 0x03, 0xcb, 0x76, 0x2c, 0x33, 0x76, - 0x2c, 0xdb, 0xad, 0x9c, 0x91, 0xff, 0x44, 0x51, 0x3a, 0xe9, 0x50, 0x24, 0xac, 0xd0, 0x43, 0x89, - 0x2c, 0x28, 0xb6, 0x4e, 0x2e, 0x98, 0x15, 0xb0, 0xa4, 0x60, 0x83, 0x0b, 0x14, 0x00, 0x6d, 0x2b, - 0x27, 0xcf, 0xf4, 0xd4, 0x6f, 0xd0, 0x69, 0x3b, 0x3d, 0xe4, 0x92, 0x99, 0x7e, 0x80, 0x1e, 0x7a, - 0xef, 0xb5, 0x87, 0x9e, 0x7b, 0xec, 0x4c, 0xfb, 0x0d, 0x7a, 0xed, 0xec, 0x2e, 0x00, 0x82, 0x7f, - 0x14, 0xab, 0x99, 0x49, 0xd3, 0x93, 0xb4, 0xbf, 0xf7, 0x7b, 0x8f, 0x6f, 0xdf, 0xfe, 0xb0, 0xef, - 0x01, 0x80, 0x2c, 0x1a, 0x98, 0xbe, 0xed, 0x85, 0xae, 0xbf, 0xed, 0xf9, 0x6e, 0xe8, 0xe2, 0x95, - 0xa1, 0xeb, 0x0e, 0x1d, 0x2a, 0x57, 0x27, 0xe3, 0x41, 0xed, 0x10, 0x56, 0x9f, 0xd8, 0x0e, 0x6d, - 0x26, 0xc4, 0x1e, 0x0d, 0xf1, 0x2e, 0xe4, 0x06, 0xb6, 0x43, 0x55, 0x65, 0x33, 0xbb, 0x55, 0xde, - 0xb9, 0xb9, 0x3d, 0xe3, 0xb4, 0x3d, 0xed, 0xd1, 0xe5, 0xb0, 0x2e, 0x3c, 0x6a, 0xff, 0xc8, 0xc1, - 0xda, 0x02, 0x2b, 0xc6, 0x90, 0x63, 0x64, 0xc4, 0x23, 0x2a, 0x5b, 0x25, 0x5d, 0xfc, 0x8f, 0x55, - 0x58, 0xf2, 0x88, 0xf9, 0x82, 0x0c, 0xa9, 0x9a, 0x11, 0x70, 0xbc, 0xc4, 0xef, 0x03, 0x58, 0xd4, - 0xa3, 0xcc, 0xa2, 0xcc, 0x3c, 0x53, 0xb3, 0x9b, 0xd9, 0xad, 0x92, 0x9e, 0x42, 0xf0, 0x3d, 0x58, - 0xf5, 0xc6, 0x27, 0x8e, 0x6d, 0x1a, 0x29, 0x1a, 0x6c, 0x66, 0xb7, 0xf2, 0x3a, 0x92, 0x86, 0xe6, - 0x84, 0x7c, 0x1b, 0x56, 0x5e, 0x51, 0xf2, 0x22, 0x4d, 0x2d, 0x0b, 0x6a, 0x95, 0xc3, 0x29, 0x62, - 0x03, 0x2a, 0x23, 0x1a, 0x04, 0x64, 0x48, 0x8d, 0xf0, 0xcc, 0xa3, 0x6a, 0x4e, 0xec, 0x7e, 0x73, - 0x6e, 0xf7, 0xb3, 0x3b, 0x2f, 0x47, 0x5e, 0xc7, 0x67, 0x1e, 0xc5, 0x75, 0x28, 0x51, 0x36, 0x1e, - 0xc9, 0x08, 0xf9, 0x73, 0xea, 0xa7, 0xb1, 0xf1, 0x68, 0x36, 0x4a, 0x91, 0xbb, 0x45, 0x21, 0x96, - 0x02, 0xea, 0xbf, 0xb4, 0x4d, 0xaa, 0x16, 0x44, 0x80, 0xdb, 0x73, 0x01, 0x7a, 0xd2, 0x3e, 0x1b, - 0x23, 0xf6, 0xc3, 0x0d, 0x28, 0xd1, 0xd7, 0x21, 0x65, 0x81, 0xed, 0x32, 0x75, 0x49, 0x04, 0xb9, - 0xb5, 0xe0, 0x14, 0xa9, 0x63, 0xcd, 0x86, 0x98, 0xf8, 0xe1, 0xc7, 0xb0, 0xe4, 0x7a, 0xa1, 0xed, - 0xb2, 0x40, 0x2d, 0x6e, 0x2a, 0x5b, 0xe5, 0x9d, 0xab, 0x0b, 0x85, 0xd0, 0x91, 0x1c, 0x3d, 0x26, - 0xe3, 0x16, 0xa0, 0xc0, 0x1d, 0xfb, 0x26, 0x35, 0x4c, 0xd7, 0xa2, 0x86, 0xcd, 0x06, 0xae, 0x5a, - 0x12, 0x01, 0xae, 0xcf, 0x6f, 0x44, 0x10, 0x1b, 0xae, 0x45, 0x5b, 0x6c, 0xe0, 0xea, 0xd5, 0x60, - 0x6a, 0x8d, 0x2f, 0x41, 0x21, 0x38, 0x63, 0x21, 0x79, 0xad, 0x56, 0x84, 0x42, 0xa2, 0x55, 0xed, - 0xdf, 0x79, 0x58, 0xb9, 0x88, 0xc4, 0x3e, 0x85, 0xfc, 0x80, 0xef, 0x52, 0xcd, 0xfc, 0x37, 0x35, - 0x90, 0x3e, 0xd3, 0x45, 0x2c, 0x7c, 0xc7, 0x22, 0xd6, 0xa1, 0xcc, 0x68, 0x10, 0x52, 0x4b, 0x2a, - 0x22, 0x7b, 0x41, 0x4d, 0x81, 0x74, 0x9a, 0x97, 0x54, 0xee, 0x3b, 0x49, 0xea, 0x19, 0xac, 0x24, - 0x29, 0x19, 0x3e, 0x61, 0xc3, 0x58, 0x9b, 0xf7, 0xdf, 0x96, 0xc9, 0xb6, 0x16, 0xfb, 0xe9, 0xdc, - 0x4d, 0xaf, 0xd2, 0xa9, 0x35, 0x6e, 0x02, 0xb8, 0x8c, 0xba, 0x03, 0xc3, 0xa2, 0xa6, 0xa3, 0x16, - 0xcf, 0xa9, 0x52, 0x87, 0x53, 0xe6, 0xaa, 0xe4, 0x4a, 0xd4, 0x74, 0xf0, 0x27, 0x13, 0xa9, 0x2d, - 0x9d, 0xa3, 0x94, 0x43, 0xf9, 0x90, 0xcd, 0xa9, 0xad, 0x0f, 0x55, 0x9f, 0x72, 0xdd, 0x53, 0x2b, - 0xda, 0x59, 0x49, 0x24, 0xb1, 0xfd, 0xd6, 0x9d, 0xe9, 0x91, 0x9b, 0xdc, 0xd8, 0xb2, 0x9f, 0x5e, - 0xe2, 0x0f, 0x20, 0x01, 0x0c, 0x21, 0x2b, 0x10, 0xb7, 0x50, 0x25, 0x06, 0x8f, 0xc8, 0x88, 0x6e, - 0xec, 0x42, 0x75, 0xba, 0x3c, 0x78, 0x1d, 0xf2, 0x41, 0x48, 0xfc, 0x50, 0xa8, 0x30, 0xaf, 0xcb, - 0x05, 0x46, 0x90, 0xa5, 0xcc, 0x12, 0xb7, 0x5c, 0x5e, 0xe7, 0xff, 0x6e, 0x7c, 0x0c, 0xcb, 0x53, - 0x3f, 0x7f, 0x51, 0xc7, 0xda, 0x6f, 0x0a, 0xb0, 0xbe, 0x48, 0x73, 0x0b, 0xe5, 0x7f, 0x09, 0x0a, - 0x6c, 0x3c, 0x3a, 0xa1, 0xbe, 0x9a, 0x15, 0x11, 0xa2, 0x15, 0xae, 0x43, 0xde, 0x21, 0x27, 0xd4, - 0x51, 0x73, 0x9b, 0xca, 0x56, 0x75, 0xe7, 0xde, 0x85, 0x54, 0xbd, 0xdd, 0xe6, 0x2e, 0xba, 0xf4, - 0xc4, 0x9f, 0x41, 0x2e, 0xba, 0xe2, 0x78, 0x84, 0xbb, 0x17, 0x8b, 0xc0, 0xb5, 0xa8, 0x0b, 0x3f, - 0x7c, 0x05, 0x4a, 0xfc, 0xaf, 0xac, 0x6d, 0x41, 0xe4, 0x5c, 0xe4, 0x00, 0xaf, 0x2b, 0xde, 0x80, - 0xa2, 0x90, 0x99, 0x45, 0xe3, 0xd6, 0x90, 0xac, 0xf9, 0xc1, 0x58, 0x74, 0x40, 0xc6, 0x4e, 0x68, - 0xbc, 0x24, 0xce, 0x98, 0x0a, 0xc1, 0x94, 0xf4, 0x4a, 0x04, 0xfe, 0x9c, 0x63, 0xf8, 0x3a, 0x94, - 0xa5, 0x2a, 0x6d, 0x66, 0xd1, 0xd7, 0xe2, 0xf6, 0xc9, 0xeb, 0x52, 0xa8, 0x2d, 0x8e, 0xf0, 0x9f, - 0x7f, 0x1e, 0xb8, 0x2c, 0x3e, 0x5a, 0xf1, 0x13, 0x1c, 0x10, 0x3f, 0xff, 0xf1, 0xec, 0xc5, 0x77, - 0x6d, 0xf1, 0xf6, 0x66, 0xb5, 0x58, 0xfb, 0x53, 0x06, 0x72, 0xe2, 0x79, 0x5b, 0x81, 0xf2, 0xf1, - 0x17, 0x5d, 0xcd, 0x68, 0x76, 0xfa, 0xfb, 0x6d, 0x0d, 0x29, 0xb8, 0x0a, 0x20, 0x80, 0x27, 0xed, - 0x4e, 0xfd, 0x18, 0x65, 0x92, 0x75, 0xeb, 0xe8, 0xf8, 0xf1, 0x43, 0x94, 0x4d, 0x1c, 0xfa, 0x12, - 0xc8, 0xa5, 0x09, 0x0f, 0x76, 0x50, 0x1e, 0x23, 0xa8, 0xc8, 0x00, 0xad, 0x67, 0x5a, 0xf3, 0xf1, - 0x43, 0x54, 0x98, 0x46, 0x1e, 0xec, 0xa0, 0x25, 0xbc, 0x0c, 0x25, 0x81, 0xec, 0x77, 0x3a, 0x6d, - 0x54, 0x4c, 0x62, 0xf6, 0x8e, 0xf5, 0xd6, 0xd1, 0x01, 0x2a, 0x25, 0x31, 0x0f, 0xf4, 0x4e, 0xbf, - 0x8b, 0x20, 0x89, 0x70, 0xa8, 0xf5, 0x7a, 0xf5, 0x03, 0x0d, 0x95, 0x13, 0xc6, 0xfe, 0x17, 0xc7, - 0x5a, 0x0f, 0x55, 0xa6, 0xd2, 0x7a, 0xb0, 0x83, 0x96, 0x93, 0x9f, 0xd0, 0x8e, 0xfa, 0x87, 0xa8, - 0x8a, 0x57, 0x61, 0x59, 0xfe, 0x44, 0x9c, 0xc4, 0xca, 0x0c, 0xf4, 0xf8, 0x21, 0x42, 0x93, 0x44, - 0x64, 0x94, 0xd5, 0x29, 0xe0, 0xf1, 0x43, 0x84, 0x6b, 0x0d, 0xc8, 0x0b, 0x75, 0x61, 0x0c, 0xd5, - 0x76, 0x7d, 0x5f, 0x6b, 0x1b, 0x9d, 0xee, 0x71, 0xab, 0x73, 0x54, 0x6f, 0x23, 0x65, 0x82, 0xe9, - 0xda, 0xcf, 0xfa, 0x2d, 0x5d, 0x6b, 0xa2, 0x4c, 0x1a, 0xeb, 0x6a, 0xf5, 0x63, 0xad, 0x89, 0xb2, - 0xb5, 0xbb, 0xb0, 0xbe, 0xe8, 0x9e, 0x59, 0xf4, 0x64, 0xd4, 0xbe, 0x56, 0x60, 0x6d, 0xc1, 0x95, - 0xb9, 0xf0, 0x29, 0xfa, 0x29, 0xe4, 0xa5, 0xd2, 0x64, 0x13, 0xb9, 0xb3, 0xf0, 0xee, 0x15, 0xba, - 0x9b, 0x6b, 0x24, 0xc2, 0x2f, 0xdd, 0x48, 0xb3, 0xe7, 0x34, 0x52, 0x1e, 0x62, 0x4e, 0x4e, 0xbf, - 0x52, 0x40, 0x3d, 0x2f, 0xf6, 0x5b, 0x9e, 0xf7, 0xcc, 0xd4, 0xf3, 0xfe, 0xe9, 0x6c, 0x02, 0x37, - 0xce, 0xdf, 0xc3, 0x5c, 0x16, 0xdf, 0x28, 0x70, 0x69, 0xf1, 0xbc, 0xb1, 0x30, 0x87, 0xcf, 0xa0, - 0x30, 0xa2, 0xe1, 0xa9, 0x1b, 0xf7, 0xdc, 0x0f, 0x17, 0xdc, 0xe4, 0xdc, 0x3c, 0x5b, 0xab, 0xc8, - 0x2b, 0xdd, 0x0a, 0xb2, 0xe7, 0x0d, 0x0d, 0x32, 0x9b, 0xb9, 0x4c, 0x7f, 0x9d, 0x81, 0x77, 0x17, - 0x06, 0x5f, 0x98, 0xe8, 0x35, 0x00, 0x9b, 0x79, 0xe3, 0x50, 0xf6, 0x55, 0x79, 0xcd, 0x94, 0x04, - 0x22, 0x1e, 0x61, 0x7e, 0x85, 0x8c, 0xc3, 0xc4, 0x9e, 0x15, 0x76, 0x90, 0x90, 0x20, 0xec, 0x4e, - 0x12, 0xcd, 0x89, 0x44, 0xdf, 0x3f, 0x67, 0xa7, 0x73, 0x2d, 0xeb, 0x23, 0x40, 0xa6, 0x63, 0x53, - 0x16, 0x1a, 0x41, 0xe8, 0x53, 0x32, 0xb2, 0xd9, 0x50, 0xdc, 0xa3, 0xc5, 0xbd, 0xfc, 0x80, 0x38, - 0x01, 0xd5, 0x57, 0xa4, 0xb9, 0x17, 0x5b, 0xb9, 0x87, 0x68, 0x16, 0x7e, 0xca, 0xa3, 0x30, 0xe5, - 0x21, 0xcd, 0x89, 0x47, 0xed, 0x6f, 0x4b, 0x50, 0x4e, 0x4d, 0x67, 0xf8, 0x06, 0x54, 0x9e, 0x93, - 0x97, 0xc4, 0x88, 0x27, 0x6e, 0x59, 0x89, 0x32, 0xc7, 0xba, 0xd1, 0xd4, 0xfd, 0x11, 0xac, 0x0b, - 0x8a, 0x3b, 0x0e, 0xa9, 0x6f, 0x98, 0x0e, 0x09, 0x02, 0x51, 0xb4, 0xa2, 0xa0, 0x62, 0x6e, 0xeb, - 0x70, 0x53, 0x23, 0xb6, 0xe0, 0x47, 0xb0, 0x26, 0x3c, 0x46, 0x63, 0x27, 0xb4, 0x3d, 0x87, 0x1a, - 0xfc, 0x1d, 0x20, 0x10, 0xf7, 0x69, 0x92, 0xd9, 0x2a, 0x67, 0x1c, 0x46, 0x04, 0x9e, 0x51, 0x80, - 0x0f, 0xe0, 0x9a, 0x70, 0x1b, 0x52, 0x46, 0x7d, 0x12, 0x52, 0x83, 0xfe, 0x72, 0x4c, 0x9c, 0xc0, - 0x20, 0xcc, 0x32, 0x4e, 0x49, 0x70, 0xaa, 0xae, 0xa7, 0x03, 0x5c, 0xe6, 0xdc, 0x83, 0x88, 0xaa, - 0x09, 0x66, 0x9d, 0x59, 0x9f, 0x93, 0xe0, 0x14, 0xef, 0xc1, 0x25, 0x11, 0x28, 0x08, 0x7d, 0x9b, - 0x0d, 0x0d, 0xf3, 0x94, 0x9a, 0x2f, 0x8c, 0x71, 0x38, 0xd8, 0x55, 0xaf, 0xa4, 0x23, 0x88, 0x24, - 0x7b, 0x82, 0xd3, 0xe0, 0x94, 0x7e, 0x38, 0xd8, 0xc5, 0x3d, 0xa8, 0xf0, 0xf3, 0x18, 0xd9, 0x5f, - 0x51, 0x63, 0xe0, 0xfa, 0xa2, 0x47, 0x54, 0x17, 0x3c, 0xdc, 0xa9, 0x22, 0x6e, 0x77, 0x22, 0x87, - 0x43, 0xd7, 0xa2, 0x7b, 0xf9, 0x5e, 0x57, 0xd3, 0x9a, 0x7a, 0x39, 0x8e, 0xf2, 0xc4, 0xf5, 0xb9, - 0xa6, 0x86, 0x6e, 0x52, 0xe3, 0xb2, 0xd4, 0xd4, 0xd0, 0x8d, 0x2b, 0xfc, 0x08, 0xd6, 0x4c, 0x53, - 0x6e, 0xdb, 0x36, 0x8d, 0x68, 0x58, 0x0f, 0x54, 0x34, 0x55, 0x2f, 0xd3, 0x3c, 0x90, 0x84, 0x48, - 0xe6, 0x01, 0xfe, 0x04, 0xde, 0x9d, 0xd4, 0x2b, 0xed, 0xb8, 0x3a, 0xb7, 0xcb, 0x59, 0xd7, 0x47, - 0xb0, 0xe6, 0x9d, 0xcd, 0x3b, 0xe2, 0xa9, 0x5f, 0xf4, 0xce, 0x66, 0xdd, 0x6e, 0x89, 0x17, 0x30, - 0x9f, 0x9a, 0x24, 0xa4, 0x96, 0xfa, 0x5e, 0x9a, 0x9d, 0x32, 0xe0, 0xfb, 0x80, 0x4c, 0xd3, 0xa0, - 0x8c, 0x9c, 0x38, 0xd4, 0x20, 0x3e, 0x65, 0x24, 0x50, 0xaf, 0xa7, 0xc9, 0x55, 0xd3, 0xd4, 0x84, - 0xb5, 0x2e, 0x8c, 0xf8, 0x2e, 0xac, 0xba, 0x27, 0xcf, 0x4d, 0x29, 0x2e, 0xc3, 0xf3, 0xe9, 0xc0, - 0x7e, 0xad, 0xde, 0x14, 0x65, 0x5a, 0xe1, 0x06, 0x21, 0xad, 0xae, 0x80, 0xf1, 0x1d, 0x40, 0x66, - 0x70, 0x4a, 0x7c, 0x4f, 0x34, 0xe9, 0xc0, 0x23, 0x26, 0x55, 0x6f, 0x49, 0xaa, 0xc4, 0x8f, 0x62, - 0x18, 0x6b, 0x70, 0x9d, 0x6f, 0x9e, 0x11, 0xe6, 0x1a, 0xe3, 0x80, 0x1a, 0x93, 0x14, 0x93, 0xb3, - 0xf8, 0x90, 0xa7, 0xa5, 0x5f, 0x8d, 0x69, 0xfd, 0x80, 0x36, 0x13, 0x52, 0x7c, 0x3c, 0xcf, 0x60, - 0x7d, 0xcc, 0x6c, 0x16, 0x52, 0xdf, 0xf3, 0x29, 0x77, 0x96, 0x0f, 0xac, 0xfa, 0xcf, 0xa5, 0x73, - 0x86, 0xee, 0x7e, 0x9a, 0x2d, 0x45, 0xa2, 0xaf, 0x8d, 0xe7, 0xc1, 0xda, 0x1e, 0x54, 0xd2, 0xda, - 0xc1, 0x25, 0x90, 0xea, 0x41, 0x0a, 0xef, 0xa8, 0x8d, 0x4e, 0x93, 0xf7, 0xc2, 0x2f, 0x35, 0x94, - 0xe1, 0x3d, 0xb9, 0xdd, 0x3a, 0xd6, 0x0c, 0xbd, 0x7f, 0x74, 0xdc, 0x3a, 0xd4, 0x50, 0xf6, 0x6e, - 0xa9, 0xf8, 0xaf, 0x25, 0xf4, 0xe6, 0xcd, 0x9b, 0x37, 0x99, 0xda, 0x5f, 0x32, 0x50, 0x9d, 0x9e, - 0x83, 0xf1, 0x4f, 0xe0, 0xbd, 0xf8, 0xa5, 0x35, 0xa0, 0xa1, 0xf1, 0xca, 0xf6, 0x85, 0x9c, 0x47, - 0x44, 0x4e, 0x92, 0xc9, 0x49, 0xac, 0x47, 0xac, 0x1e, 0x0d, 0x7f, 0x61, 0xfb, 0x5c, 0xac, 0x23, - 0x12, 0xe2, 0x36, 0x5c, 0x67, 0xae, 0x11, 0x84, 0x84, 0x59, 0xc4, 0xb7, 0x8c, 0xc9, 0xe7, 0x02, - 0x83, 0x98, 0x26, 0x0d, 0x02, 0x57, 0x76, 0x92, 0x24, 0xca, 0x55, 0xe6, 0xf6, 0x22, 0xf2, 0xe4, - 0x8a, 0xad, 0x47, 0xd4, 0x19, 0xd5, 0x64, 0xcf, 0x53, 0xcd, 0x15, 0x28, 0x8d, 0x88, 0x67, 0x50, - 0x16, 0xfa, 0x67, 0x62, 0x7a, 0x2b, 0xea, 0xc5, 0x11, 0xf1, 0x34, 0xbe, 0xfe, 0xfe, 0xce, 0x20, - 0x5d, 0xc7, 0xbf, 0x67, 0xa1, 0x92, 0x9e, 0xe0, 0xf8, 0x40, 0x6c, 0x8a, 0x6b, 0x5e, 0x11, 0xb7, - 0xc0, 0x07, 0xdf, 0x3a, 0xef, 0x6d, 0x37, 0xf8, 0xfd, 0xbf, 0x57, 0x90, 0x73, 0x95, 0x2e, 0x3d, - 0x79, 0xef, 0xe5, 0x5a, 0xa3, 0x72, 0x5a, 0x2f, 0xea, 0xd1, 0x0a, 0x1f, 0x40, 0xe1, 0x79, 0x20, - 0x62, 0x17, 0x44, 0xec, 0x9b, 0xdf, 0x1e, 0xfb, 0x69, 0x4f, 0x04, 0x2f, 0x3d, 0xed, 0x19, 0x47, - 0x1d, 0xfd, 0xb0, 0xde, 0xd6, 0x23, 0x77, 0x7c, 0x19, 0x72, 0x0e, 0xf9, 0xea, 0x6c, 0xba, 0x53, - 0x08, 0xe8, 0xa2, 0x85, 0xbf, 0x0c, 0xb9, 0x57, 0x94, 0xbc, 0x98, 0xbe, 0x9f, 0x05, 0xf4, 0x3d, - 0x4a, 0xff, 0x3e, 0xe4, 0x45, 0xbd, 0x30, 0x40, 0x54, 0x31, 0xf4, 0x0e, 0x2e, 0x42, 0xae, 0xd1, - 0xd1, 0xb9, 0xfc, 0x11, 0x54, 0x24, 0x6a, 0x74, 0x5b, 0x5a, 0x43, 0x43, 0x99, 0xda, 0x23, 0x28, - 0xc8, 0x22, 0xf0, 0x47, 0x23, 0x29, 0x03, 0x7a, 0x27, 0x5a, 0x46, 0x31, 0x94, 0xd8, 0xda, 0x3f, - 0xdc, 0xd7, 0x74, 0x94, 0x49, 0x1f, 0xef, 0x9f, 0x15, 0x28, 0xa7, 0x06, 0x2a, 0xde, 0xca, 0x89, - 0xe3, 0xb8, 0xaf, 0x0c, 0xe2, 0xd8, 0x24, 0x88, 0xce, 0x07, 0x04, 0x54, 0xe7, 0xc8, 0x45, 0xeb, - 0xf7, 0x3f, 0xd1, 0xe6, 0x1f, 0x14, 0x40, 0xb3, 0xc3, 0xd8, 0x4c, 0x82, 0xca, 0x0f, 0x9a, 0xe0, - 0xef, 0x15, 0xa8, 0x4e, 0x4f, 0x60, 0x33, 0xe9, 0xdd, 0xf8, 0x41, 0xd3, 0xfb, 0x9d, 0x02, 0xcb, - 0x53, 0x73, 0xd7, 0xff, 0x55, 0x76, 0xbf, 0xcd, 0xc2, 0xda, 0x02, 0x3f, 0x5c, 0x8f, 0x06, 0x54, - 0x39, 0x33, 0xff, 0xf8, 0x22, 0xbf, 0xb5, 0xcd, 0xfb, 0x5f, 0x97, 0xf8, 0x61, 0x34, 0xcf, 0xde, - 0x01, 0x64, 0x5b, 0x94, 0x85, 0xf6, 0xc0, 0xa6, 0x7e, 0xf4, 0x6e, 0x2c, 0xa7, 0xd6, 0x95, 0x09, - 0x2e, 0x5f, 0x8f, 0x7f, 0x04, 0xd8, 0x73, 0x03, 0x3b, 0xb4, 0x5f, 0x52, 0xc3, 0x66, 0xf1, 0x8b, - 0x34, 0x9f, 0x62, 0x73, 0x3a, 0x8a, 0x2d, 0x2d, 0x16, 0x26, 0x6c, 0x46, 0x87, 0x64, 0x86, 0xcd, - 0xaf, 0xa1, 0xac, 0x8e, 0x62, 0x4b, 0xc2, 0xbe, 0x01, 0x15, 0xcb, 0x1d, 0xf3, 0x81, 0x40, 0xf2, - 0xf8, 0xad, 0xa7, 0xe8, 0x65, 0x89, 0x25, 0x94, 0x68, 0x62, 0x9b, 0xbc, 0xc1, 0x57, 0xf4, 0xb2, - 0xc4, 0x24, 0xe5, 0x36, 0xac, 0x90, 0xe1, 0xd0, 0xe7, 0xc1, 0xe3, 0x40, 0x72, 0x0c, 0xad, 0x26, - 0xb0, 0x20, 0x6e, 0x3c, 0x85, 0x62, 0x5c, 0x07, 0xde, 0x58, 0x78, 0x25, 0x0c, 0x4f, 0x7e, 0x47, - 0xc9, 0xf0, 0x97, 0x7a, 0x16, 0x1b, 0x6f, 0x40, 0xc5, 0x0e, 0x8c, 0xc9, 0x07, 0xbd, 0xcc, 0x66, - 0x66, 0xab, 0xa8, 0x97, 0xed, 0x20, 0xf9, 0x82, 0x53, 0xfb, 0x26, 0x03, 0xd5, 0xe9, 0x0f, 0x92, - 0xb8, 0x09, 0x45, 0xc7, 0x35, 0x89, 0x10, 0x82, 0xfc, 0x1a, 0xbe, 0xf5, 0x96, 0x6f, 0x98, 0xdb, - 0xed, 0x88, 0xaf, 0x27, 0x9e, 0x1b, 0x7f, 0x55, 0xa0, 0x18, 0xc3, 0xf8, 0x12, 0xe4, 0x3c, 0x12, - 0x9e, 0x8a, 0x70, 0xf9, 0xfd, 0x0c, 0x52, 0x74, 0xb1, 0xe6, 0x78, 0xe0, 0x11, 0x26, 0x24, 0x10, - 0xe1, 0x7c, 0xcd, 0xcf, 0xd5, 0xa1, 0xc4, 0x12, 0x03, 0xae, 0x3b, 0x1a, 0x51, 0x16, 0x06, 0xf1, - 0xb9, 0x46, 0x78, 0x23, 0x82, 0xf1, 0x3d, 0x58, 0x0d, 0x7d, 0x62, 0x3b, 0x53, 0xdc, 0x9c, 0xe0, - 0xa2, 0xd8, 0x90, 0x90, 0xf7, 0xe0, 0x72, 0x1c, 0xd7, 0xa2, 0x21, 0x31, 0x4f, 0xa9, 0x35, 0x71, - 0x2a, 0x88, 0xaf, 0x5d, 0xef, 0x45, 0x84, 0x66, 0x64, 0x8f, 0x7d, 0xf7, 0x9f, 0xc1, 0x9a, 0xe9, - 0x8e, 0x66, 0x2b, 0xb1, 0x8f, 0x66, 0xde, 0xbb, 0x82, 0xcf, 0x95, 0x2f, 0x61, 0x32, 0x54, 0x7c, - 0x9d, 0xc9, 0x1e, 0x74, 0xf7, 0xff, 0x98, 0xd9, 0x38, 0x90, 0x7e, 0xdd, 0xb8, 0x82, 0x3a, 0x1d, - 0x38, 0xd4, 0xe4, 0xd5, 0xf9, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x78, 0x42, 0x69, 0x71, 0xb3, - 0x18, 0x00, 0x00, + // 2379 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x73, 0xdb, 0xc6, + 0x15, 0x37, 0xc1, 0x0f, 0x91, 0x8f, 0x14, 0xb5, 0x5a, 0x29, 0x36, 0x2c, 0xc7, 0xb1, 0xcc, 0xd8, + 0xb5, 0x6c, 0xb7, 0x74, 0x46, 0xfe, 0x88, 0xa3, 0x74, 0xd2, 0xa1, 0x48, 0x58, 0xa1, 0x4b, 0x91, + 0x2c, 0x48, 0x35, 0x76, 0x2e, 0x98, 0x15, 0xb0, 0xa4, 0x60, 0x83, 0x00, 0x02, 0x80, 0xb6, 0x95, + 0x93, 0x67, 0x7a, 0xea, 0x7f, 0xd0, 0xe9, 0x74, 0x7a, 0xc8, 0x25, 0x33, 0xed, 0xbd, 0x87, 0xde, + 0x7b, 0xed, 0x4c, 0xef, 0x3d, 0xf4, 0xd0, 0x99, 0xf6, 0x3f, 0xe8, 0xb5, 0xb3, 0xbb, 0x00, 0x08, + 0x7e, 0xd9, 0x4a, 0x66, 0x9c, 0x9c, 0xa4, 0xfd, 0xbd, 0xdf, 0x7b, 0x78, 0xfb, 0xf6, 0x87, 0xdd, + 0x87, 0x25, 0x20, 0x83, 0xfa, 0xba, 0x67, 0xba, 0x81, 0xe3, 0x55, 0x5d, 0xcf, 0x09, 0x1c, 0xbc, + 0x36, 0x74, 0x9c, 0xa1, 0x45, 0xc5, 0xe8, 0x78, 0x3c, 0xa8, 0x1c, 0xc2, 0xfa, 0x23, 0xd3, 0xa2, + 0x8d, 0x98, 0xd8, 0xa3, 0x01, 0x7e, 0x08, 0x99, 0x81, 0x69, 0x51, 0x39, 0xb5, 0x9d, 0xde, 0x29, + 0xee, 0x5e, 0xab, 0xce, 0x38, 0x55, 0xa7, 0x3d, 0xba, 0x0c, 0x56, 0xb9, 0x47, 0xe5, 0xdf, 0x19, + 0xd8, 0x58, 0x60, 0xc5, 0x18, 0x32, 0x36, 0x19, 0xb1, 0x88, 0xa9, 0x9d, 0x82, 0xca, 0xff, 0xc7, + 0x32, 0xac, 0xb8, 0x44, 0x7f, 0x4e, 0x86, 0x54, 0x96, 0x38, 0x1c, 0x0d, 0xf1, 0x07, 0x00, 0x06, + 0x75, 0xa9, 0x6d, 0x50, 0x5b, 0x3f, 0x95, 0xd3, 0xdb, 0xe9, 0x9d, 0x82, 0x9a, 0x40, 0xf0, 0x6d, + 0x58, 0x77, 0xc7, 0xc7, 0x96, 0xa9, 0x6b, 0x09, 0x1a, 0x6c, 0xa7, 0x77, 0xb2, 0x2a, 0x12, 0x86, + 0xc6, 0x84, 0x7c, 0x03, 0xd6, 0x5e, 0x52, 0xf2, 0x3c, 0x49, 0x2d, 0x72, 0x6a, 0x99, 0xc1, 0x09, + 0x62, 0x1d, 0x4a, 0x23, 0xea, 0xfb, 0x64, 0x48, 0xb5, 0xe0, 0xd4, 0xa5, 0x72, 0x86, 0xcf, 0x7e, + 0x7b, 0x6e, 0xf6, 0xb3, 0x33, 0x2f, 0x86, 0x5e, 0xfd, 0x53, 0x97, 0xe2, 0x1a, 0x14, 0xa8, 0x3d, + 0x1e, 0x89, 0x08, 0xd9, 0x25, 0xf5, 0x53, 0xec, 0xf1, 0x68, 0x36, 0x4a, 0x9e, 0xb9, 0x85, 0x21, + 0x56, 0x7c, 0xea, 0xbd, 0x30, 0x75, 0x2a, 0xe7, 0x78, 0x80, 0x1b, 0x73, 0x01, 0x7a, 0xc2, 0x3e, + 0x1b, 0x23, 0xf2, 0xc3, 0x75, 0x28, 0xd0, 0x57, 0x01, 0xb5, 0x7d, 0xd3, 0xb1, 0xe5, 0x15, 0x1e, + 0xe4, 0xfa, 0x82, 0x55, 0xa4, 0x96, 0x31, 0x1b, 0x62, 0xe2, 0x87, 0x1f, 0xc0, 0x8a, 0xe3, 0x06, + 0xa6, 0x63, 0xfb, 0x72, 0x7e, 0x3b, 0xb5, 0x53, 0xdc, 0x7d, 0x7f, 0xa1, 0x10, 0x3a, 0x82, 0xa3, + 0x46, 0x64, 0xdc, 0x04, 0xe4, 0x3b, 0x63, 0x4f, 0xa7, 0x9a, 0xee, 0x18, 0x54, 0x33, 0xed, 0x81, + 0x23, 0x17, 0x78, 0x80, 0x2b, 0xf3, 0x13, 0xe1, 0xc4, 0xba, 0x63, 0xd0, 0xa6, 0x3d, 0x70, 0xd4, + 0xb2, 0x3f, 0x35, 0xc6, 0xe7, 0x21, 0xe7, 0x9f, 0xda, 0x01, 0x79, 0x25, 0x97, 0xb8, 0x42, 0xc2, + 0x51, 0xe5, 0x7f, 0x59, 0x58, 0x3b, 0x8b, 0xc4, 0x3e, 0x85, 0xec, 0x80, 0xcd, 0x52, 0x96, 0xbe, + 0x4b, 0x0d, 0x84, 0xcf, 0x74, 0x11, 0x73, 0xdf, 0xb3, 0x88, 0x35, 0x28, 0xda, 0xd4, 0x0f, 0xa8, + 0x21, 0x14, 0x91, 0x3e, 0xa3, 0xa6, 0x40, 0x38, 0xcd, 0x4b, 0x2a, 0xf3, 0xbd, 0x24, 0xf5, 0x04, + 0xd6, 0xe2, 0x94, 0x34, 0x8f, 0xd8, 0xc3, 0x48, 0x9b, 0x77, 0xde, 0x96, 0x49, 0x55, 0x89, 0xfc, + 0x54, 0xe6, 0xa6, 0x96, 0xe9, 0xd4, 0x18, 0x37, 0x00, 0x1c, 0x9b, 0x3a, 0x03, 0xcd, 0xa0, 0xba, + 0x25, 0xe7, 0x97, 0x54, 0xa9, 0xc3, 0x28, 0x73, 0x55, 0x72, 0x04, 0xaa, 0x5b, 0xf8, 0x93, 0x89, + 0xd4, 0x56, 0x96, 0x28, 0xe5, 0x50, 0xbc, 0x64, 0x73, 0x6a, 0x3b, 0x82, 0xb2, 0x47, 0x99, 0xee, + 0xa9, 0x11, 0xce, 0xac, 0xc0, 0x93, 0xa8, 0xbe, 0x75, 0x66, 0x6a, 0xe8, 0x26, 0x26, 0xb6, 0xea, + 0x25, 0x87, 0xf8, 0x43, 0x88, 0x01, 0x8d, 0xcb, 0x0a, 0xf8, 0x2e, 0x54, 0x8a, 0xc0, 0x36, 0x19, + 0xd1, 0xad, 0x87, 0x50, 0x9e, 0x2e, 0x0f, 0xde, 0x84, 0xac, 0x1f, 0x10, 0x2f, 0xe0, 0x2a, 0xcc, + 0xaa, 0x62, 0x80, 0x11, 0xa4, 0xa9, 0x6d, 0xf0, 0x5d, 0x2e, 0xab, 0xb2, 0x7f, 0xb7, 0x3e, 0x86, + 0xd5, 0xa9, 0xc7, 0x9f, 0xd5, 0xb1, 0xf2, 0xbb, 0x1c, 0x6c, 0x2e, 0xd2, 0xdc, 0x42, 0xf9, 0x9f, + 0x87, 0x9c, 0x3d, 0x1e, 0x1d, 0x53, 0x4f, 0x4e, 0xf3, 0x08, 0xe1, 0x08, 0xd7, 0x20, 0x6b, 0x91, + 0x63, 0x6a, 0xc9, 0x99, 0xed, 0xd4, 0x4e, 0x79, 0xf7, 0xf6, 0x99, 0x54, 0x5d, 0x6d, 0x31, 0x17, + 0x55, 0x78, 0xe2, 0xcf, 0x20, 0x13, 0x6e, 0x71, 0x2c, 0xc2, 0xad, 0xb3, 0x45, 0x60, 0x5a, 0x54, + 0xb9, 0x1f, 0xbe, 0x04, 0x05, 0xf6, 0x57, 0xd4, 0x36, 0xc7, 0x73, 0xce, 0x33, 0x80, 0xd5, 0x15, + 0x6f, 0x41, 0x9e, 0xcb, 0xcc, 0xa0, 0xd1, 0xd1, 0x10, 0x8f, 0xd9, 0xc2, 0x18, 0x74, 0x40, 0xc6, + 0x56, 0xa0, 0xbd, 0x20, 0xd6, 0x98, 0x72, 0xc1, 0x14, 0xd4, 0x52, 0x08, 0xfe, 0x9a, 0x61, 0xf8, + 0x0a, 0x14, 0x85, 0x2a, 0x4d, 0xdb, 0xa0, 0xaf, 0xf8, 0xee, 0x93, 0x55, 0x85, 0x50, 0x9b, 0x0c, + 0x61, 0x8f, 0x7f, 0xe6, 0x3b, 0x76, 0xb4, 0xb4, 0xfc, 0x11, 0x0c, 0xe0, 0x8f, 0xff, 0x78, 0x76, + 0xe3, 0xbb, 0xbc, 0x78, 0x7a, 0xb3, 0x5a, 0xac, 0xfc, 0x45, 0x82, 0x0c, 0x7f, 0xdf, 0xd6, 0xa0, + 0xd8, 0x7f, 0xda, 0x55, 0xb4, 0x46, 0xe7, 0x68, 0xbf, 0xa5, 0xa0, 0x14, 0x2e, 0x03, 0x70, 0xe0, + 0x51, 0xab, 0x53, 0xeb, 0x23, 0x29, 0x1e, 0x37, 0xdb, 0xfd, 0x07, 0xf7, 0x50, 0x3a, 0x76, 0x38, + 0x12, 0x40, 0x26, 0x49, 0xb8, 0xbb, 0x8b, 0xb2, 0x18, 0x41, 0x49, 0x04, 0x68, 0x3e, 0x51, 0x1a, + 0x0f, 0xee, 0xa1, 0xdc, 0x34, 0x72, 0x77, 0x17, 0xad, 0xe0, 0x55, 0x28, 0x70, 0x64, 0xbf, 0xd3, + 0x69, 0xa1, 0x7c, 0x1c, 0xb3, 0xd7, 0x57, 0x9b, 0xed, 0x03, 0x54, 0x88, 0x63, 0x1e, 0xa8, 0x9d, + 0xa3, 0x2e, 0x82, 0x38, 0xc2, 0xa1, 0xd2, 0xeb, 0xd5, 0x0e, 0x14, 0x54, 0x8c, 0x19, 0xfb, 0x4f, + 0xfb, 0x4a, 0x0f, 0x95, 0xa6, 0xd2, 0xba, 0xbb, 0x8b, 0x56, 0xe3, 0x47, 0x28, 0xed, 0xa3, 0x43, + 0x54, 0xc6, 0xeb, 0xb0, 0x2a, 0x1e, 0x11, 0x25, 0xb1, 0x36, 0x03, 0x3d, 0xb8, 0x87, 0xd0, 0x24, + 0x11, 0x11, 0x65, 0x7d, 0x0a, 0x78, 0x70, 0x0f, 0xe1, 0x4a, 0x1d, 0xb2, 0x5c, 0x5d, 0x18, 0x43, + 0xb9, 0x55, 0xdb, 0x57, 0x5a, 0x5a, 0xa7, 0xdb, 0x6f, 0x76, 0xda, 0xb5, 0x16, 0x4a, 0x4d, 0x30, + 0x55, 0xf9, 0xd5, 0x51, 0x53, 0x55, 0x1a, 0x48, 0x4a, 0x62, 0x5d, 0xa5, 0xd6, 0x57, 0x1a, 0x28, + 0x5d, 0xd1, 0x61, 0x73, 0xd1, 0x3e, 0xb3, 0xf0, 0xcd, 0x48, 0x2c, 0xb1, 0xb4, 0x64, 0x89, 0x79, + 0xac, 0xb9, 0x25, 0xfe, 0x26, 0x05, 0x1b, 0x0b, 0xf6, 0xda, 0x85, 0x0f, 0xf9, 0x05, 0x64, 0x85, + 0x44, 0xc5, 0xe9, 0x73, 0x73, 0xe1, 0xa6, 0xcd, 0x05, 0x3b, 0x77, 0x02, 0x71, 0xbf, 0xe4, 0x09, + 0x9c, 0x5e, 0x72, 0x02, 0xb3, 0x10, 0x73, 0x49, 0xfe, 0x26, 0x05, 0xf2, 0xb2, 0xd8, 0x6f, 0xd9, + 0x28, 0xa4, 0xa9, 0x8d, 0xe2, 0xd3, 0xd9, 0x04, 0xae, 0x2e, 0x9f, 0xc3, 0x5c, 0x16, 0xdf, 0xa6, + 0xe0, 0xfc, 0xe2, 0x46, 0x65, 0x61, 0x0e, 0x9f, 0x41, 0x6e, 0x44, 0x83, 0x13, 0x27, 0x3a, 0xac, + 0x7f, 0xb2, 0xe0, 0x08, 0x60, 0xe6, 0xd9, 0x5a, 0x85, 0x5e, 0xc9, 0x33, 0x24, 0xbd, 0xac, 0xdb, + 0x10, 0xd9, 0xcc, 0x65, 0xfa, 0x5b, 0x09, 0xde, 0x5b, 0x18, 0x7c, 0x61, 0xa2, 0x97, 0x01, 0x4c, + 0xdb, 0x1d, 0x07, 0xe2, 0x40, 0x16, 0xfb, 0x53, 0x81, 0x23, 0xfc, 0xdd, 0x67, 0x7b, 0xcf, 0x38, + 0x88, 0xed, 0x69, 0x6e, 0x07, 0x01, 0x71, 0xc2, 0xc3, 0x49, 0xa2, 0x19, 0x9e, 0xe8, 0x07, 0x4b, + 0x66, 0x3a, 0x77, 0xd6, 0x7d, 0x04, 0x48, 0xb7, 0x4c, 0x6a, 0x07, 0x9a, 0x1f, 0x78, 0x94, 0x8c, + 0x4c, 0x7b, 0xc8, 0x37, 0xe0, 0xfc, 0x5e, 0x76, 0x40, 0x2c, 0x9f, 0xaa, 0x6b, 0xc2, 0xdc, 0x8b, + 0xac, 0xcc, 0x83, 0x9f, 0x32, 0x5e, 0xc2, 0x23, 0x37, 0xe5, 0x21, 0xcc, 0xb1, 0x47, 0xe5, 0xcf, + 0x2b, 0x50, 0x4c, 0xb4, 0x75, 0xf8, 0x2a, 0x94, 0x9e, 0x91, 0x17, 0x44, 0x8b, 0x5a, 0x75, 0x51, + 0x89, 0x22, 0xc3, 0xba, 0x61, 0xbb, 0xfe, 0x11, 0x6c, 0x72, 0x8a, 0x33, 0x0e, 0xa8, 0xa7, 0xe9, + 0x16, 0xf1, 0x7d, 0x5e, 0xb4, 0x3c, 0xa7, 0x62, 0x66, 0xeb, 0x30, 0x53, 0x3d, 0xb2, 0xe0, 0xfb, + 0xb0, 0xc1, 0x3d, 0x46, 0x63, 0x2b, 0x30, 0x5d, 0x8b, 0x6a, 0xec, 0xe3, 0xc1, 0xe7, 0x1b, 0x71, + 0x9c, 0xd9, 0x3a, 0x63, 0x1c, 0x86, 0x04, 0x96, 0x91, 0x8f, 0x1b, 0x70, 0x99, 0xbb, 0x0d, 0xa9, + 0x4d, 0x3d, 0x12, 0x50, 0x8d, 0x7e, 0x35, 0x26, 0x96, 0xaf, 0x11, 0xdb, 0xd0, 0x4e, 0x88, 0x7f, + 0x22, 0x6f, 0xb2, 0x00, 0xfb, 0x92, 0x9c, 0x52, 0x2f, 0x32, 0xe2, 0x41, 0xc8, 0x53, 0x38, 0xad, + 0x66, 0x1b, 0x9f, 0x13, 0xff, 0x04, 0xef, 0xc1, 0x79, 0x1e, 0xc5, 0x0f, 0x3c, 0xd3, 0x1e, 0x6a, + 0xfa, 0x09, 0xd5, 0x9f, 0x6b, 0xe3, 0x60, 0xf0, 0x50, 0xbe, 0x94, 0x7c, 0x3e, 0xcf, 0xb0, 0xc7, + 0x39, 0x75, 0x46, 0x39, 0x0a, 0x06, 0x0f, 0x71, 0x0f, 0x4a, 0x6c, 0x31, 0x46, 0xe6, 0xd7, 0x54, + 0x1b, 0x38, 0x1e, 0x3f, 0x59, 0xca, 0x0b, 0xde, 0xec, 0x44, 0x05, 0xab, 0x9d, 0xd0, 0xe1, 0xd0, + 0x31, 0xe8, 0x5e, 0xb6, 0xd7, 0x55, 0x94, 0x86, 0x5a, 0x8c, 0xa2, 0x3c, 0x72, 0x3c, 0x26, 0xa8, + 0xa1, 0x13, 0x17, 0xb8, 0x28, 0x04, 0x35, 0x74, 0xa2, 0xf2, 0xde, 0x87, 0x0d, 0x5d, 0x17, 0x73, + 0x36, 0x75, 0x2d, 0x6c, 0xf1, 0x7d, 0x19, 0x4d, 0x15, 0x4b, 0xd7, 0x0f, 0x04, 0x21, 0xd4, 0xb8, + 0x8f, 0x3f, 0x81, 0xf7, 0x26, 0xc5, 0x4a, 0x3a, 0xae, 0xcf, 0xcd, 0x72, 0xd6, 0xf5, 0x3e, 0x6c, + 0xb8, 0xa7, 0xf3, 0x8e, 0x78, 0xea, 0x89, 0xee, 0xe9, 0xac, 0xdb, 0x75, 0xfe, 0xd9, 0xe6, 0x51, + 0x9d, 0x04, 0xd4, 0x90, 0x2f, 0x24, 0xd9, 0x09, 0x03, 0xbe, 0x03, 0x48, 0xd7, 0x35, 0x6a, 0x93, + 0x63, 0x8b, 0x6a, 0xc4, 0xa3, 0x36, 0xf1, 0xe5, 0x2b, 0x49, 0x72, 0x59, 0xd7, 0x15, 0x6e, 0xad, + 0x71, 0x23, 0xbe, 0x05, 0xeb, 0xce, 0xf1, 0x33, 0x5d, 0x28, 0x4b, 0x73, 0x3d, 0x3a, 0x30, 0x5f, + 0xc9, 0xd7, 0x78, 0x99, 0xd6, 0x98, 0x81, 0xeb, 0xaa, 0xcb, 0x61, 0x7c, 0x13, 0x90, 0xee, 0x9f, + 0x10, 0xcf, 0xe5, 0x47, 0xbb, 0xef, 0x12, 0x9d, 0xca, 0xd7, 0x05, 0x55, 0xe0, 0xed, 0x08, 0x66, + 0xca, 0xf6, 0x5f, 0x9a, 0x83, 0x20, 0x8a, 0x78, 0x43, 0x28, 0x9b, 0x63, 0x61, 0xb4, 0x27, 0xb0, + 0x39, 0xb6, 0x4d, 0x3b, 0xa0, 0x9e, 0xeb, 0x51, 0xd6, 0xc4, 0x8b, 0x37, 0x51, 0xfe, 0xcf, 0xca, + 0x92, 0x36, 0xfc, 0x28, 0xc9, 0x16, 0x02, 0x50, 0x37, 0xc6, 0xf3, 0x60, 0x65, 0x0f, 0x4a, 0x49, + 0x5d, 0xe0, 0x02, 0x08, 0x65, 0xa0, 0x14, 0x3b, 0x63, 0xeb, 0x9d, 0x06, 0x3b, 0x1d, 0xbf, 0x54, + 0x90, 0xc4, 0x4e, 0xe9, 0x56, 0xb3, 0xaf, 0x68, 0xea, 0x51, 0xbb, 0xdf, 0x3c, 0x54, 0x50, 0xfa, + 0x56, 0x21, 0xff, 0xdf, 0x15, 0xf4, 0xfa, 0xf5, 0xeb, 0xd7, 0x52, 0xe5, 0x6f, 0x12, 0x94, 0xa7, + 0x3b, 0x63, 0xfc, 0x73, 0xb8, 0x10, 0x7d, 0xc6, 0xfa, 0x34, 0xd0, 0x5e, 0x9a, 0x1e, 0x97, 0xea, + 0x88, 0x88, 0xde, 0x32, 0xae, 0xf2, 0x66, 0xc8, 0xea, 0xd1, 0xe0, 0x0b, 0xd3, 0x63, 0x42, 0x1c, + 0x91, 0x00, 0xb7, 0xe0, 0x8a, 0xed, 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0xb9, 0x40, + 0xd0, 0x88, 0xae, 0x53, 0xdf, 0x77, 0xc4, 0x11, 0x11, 0x47, 0x79, 0xdf, 0x76, 0x7a, 0x21, 0x79, + 0xb2, 0x77, 0xd6, 0x42, 0xea, 0x8c, 0x22, 0xd2, 0xcb, 0x14, 0x71, 0x09, 0x0a, 0x23, 0xe2, 0x6a, + 0xd4, 0x0e, 0xbc, 0x53, 0xde, 0xcf, 0xe5, 0xd5, 0xfc, 0x88, 0xb8, 0x0a, 0x1b, 0xbf, 0xbb, 0x35, + 0x48, 0xd6, 0xf1, 0x9f, 0x69, 0x28, 0x25, 0x7b, 0x3a, 0xd6, 0x22, 0xeb, 0x7c, 0xff, 0x4e, 0xf1, + 0x37, 0xfc, 0xc3, 0x37, 0x76, 0x80, 0xd5, 0x3a, 0xdb, 0xd8, 0xf7, 0x72, 0xa2, 0xd3, 0x52, 0x85, + 0x27, 0x3b, 0x54, 0xd9, 0x3b, 0x4d, 0x45, 0xff, 0x9e, 0x57, 0xc3, 0x11, 0x3e, 0x80, 0xdc, 0x33, + 0x9f, 0xc7, 0xce, 0xf1, 0xd8, 0xd7, 0xde, 0x1c, 0xfb, 0x71, 0x8f, 0x07, 0x2f, 0x3c, 0xee, 0x69, + 0xed, 0x8e, 0x7a, 0x58, 0x6b, 0xa9, 0xa1, 0x3b, 0xbe, 0x08, 0x19, 0x8b, 0x7c, 0x7d, 0x3a, 0x7d, + 0x04, 0x70, 0xe8, 0xac, 0x85, 0xbf, 0x08, 0x99, 0x97, 0x94, 0x3c, 0x9f, 0xde, 0x78, 0x39, 0xf4, + 0x0e, 0xa5, 0x7f, 0x07, 0xb2, 0xbc, 0x5e, 0x18, 0x20, 0xac, 0x18, 0x3a, 0x87, 0xf3, 0x90, 0xa9, + 0x77, 0x54, 0x26, 0x7f, 0x04, 0x25, 0x81, 0x6a, 0xdd, 0xa6, 0x52, 0x57, 0x90, 0x54, 0xb9, 0x0f, + 0x39, 0x51, 0x04, 0xf6, 0x6a, 0xc4, 0x65, 0x40, 0xe7, 0xc2, 0x61, 0x18, 0x23, 0x15, 0x59, 0x8f, + 0x0e, 0xf7, 0x15, 0x15, 0x49, 0xc9, 0xe5, 0xf5, 0xa1, 0x94, 0x6c, 0xe7, 0x7e, 0x18, 0x4d, 0xfd, + 0x35, 0x05, 0xc5, 0x44, 0x7b, 0xc6, 0x1a, 0x03, 0x62, 0x59, 0xce, 0x4b, 0x8d, 0x58, 0x26, 0xf1, + 0x43, 0x51, 0x00, 0x87, 0x6a, 0x0c, 0x39, 0xeb, 0xa2, 0xfd, 0x20, 0xc9, 0xff, 0x31, 0x05, 0x68, + 0xb6, 0xb5, 0x9b, 0x49, 0x30, 0xf5, 0xa3, 0x26, 0xf8, 0x87, 0x14, 0x94, 0xa7, 0xfb, 0xb9, 0x99, + 0xf4, 0xae, 0xfe, 0xa8, 0xe9, 0xfd, 0x4b, 0x82, 0xd5, 0xa9, 0x2e, 0xee, 0xac, 0xd9, 0x7d, 0x05, + 0xeb, 0xa6, 0x41, 0x47, 0xae, 0x13, 0x50, 0x5b, 0x3f, 0xd5, 0x2c, 0xfa, 0x82, 0x5a, 0x72, 0x85, + 0x6f, 0x14, 0x77, 0xde, 0xdc, 0x27, 0x56, 0x9b, 0x13, 0xbf, 0x16, 0x73, 0xdb, 0xdb, 0x68, 0x36, + 0x94, 0xc3, 0x6e, 0xa7, 0xaf, 0xb4, 0xeb, 0x4f, 0xb5, 0xa3, 0xf6, 0x2f, 0xdb, 0x9d, 0x2f, 0xda, + 0x2a, 0x32, 0x67, 0x68, 0xef, 0xf0, 0x55, 0xef, 0x02, 0x9a, 0x4d, 0x0a, 0x5f, 0x80, 0x45, 0x69, + 0xa1, 0x73, 0x78, 0x03, 0xd6, 0xda, 0x1d, 0xad, 0xd7, 0x6c, 0x28, 0x9a, 0xf2, 0xe8, 0x91, 0x52, + 0xef, 0xf7, 0xc4, 0x87, 0x73, 0xcc, 0xee, 0x4f, 0xbf, 0xd4, 0xbf, 0x4f, 0xc3, 0xc6, 0x82, 0x4c, + 0x70, 0x2d, 0xec, 0xd9, 0xc5, 0x67, 0xc4, 0xcf, 0xce, 0x92, 0x7d, 0x95, 0x75, 0x05, 0x5d, 0xe2, + 0x05, 0x61, 0x8b, 0x7f, 0x13, 0x58, 0x95, 0xec, 0xc0, 0x1c, 0x98, 0xd4, 0x0b, 0xef, 0x19, 0x44, + 0x23, 0xbf, 0x36, 0xc1, 0xc5, 0x55, 0xc3, 0x4f, 0x01, 0xbb, 0x8e, 0x6f, 0x06, 0xe6, 0x0b, 0xaa, + 0x99, 0x76, 0x74, 0x29, 0xc1, 0x1a, 0xfb, 0x8c, 0x8a, 0x22, 0x4b, 0xd3, 0x0e, 0x62, 0xb6, 0x4d, + 0x87, 0x64, 0x86, 0xcd, 0x36, 0xf0, 0xb4, 0x8a, 0x22, 0x4b, 0xcc, 0xbe, 0x0a, 0x25, 0xc3, 0x19, + 0xb3, 0x36, 0x49, 0xf0, 0xd8, 0x79, 0x91, 0x52, 0x8b, 0x02, 0x8b, 0x29, 0x61, 0x1f, 0x3b, 0xb9, + 0x0d, 0x29, 0xa9, 0x45, 0x81, 0x09, 0xca, 0x0d, 0x58, 0x23, 0xc3, 0xa1, 0xc7, 0x82, 0x47, 0x81, + 0x44, 0x67, 0x5e, 0x8e, 0x61, 0x4e, 0xdc, 0x7a, 0x0c, 0xf9, 0xa8, 0x0e, 0xec, 0x48, 0x66, 0x95, + 0xd0, 0x5c, 0x71, 0x27, 0x25, 0xed, 0x14, 0xd4, 0xbc, 0x1d, 0x19, 0xaf, 0x42, 0xc9, 0xf4, 0xb5, + 0xc9, 0xe5, 0xa8, 0xb4, 0x2d, 0xed, 0xe4, 0xd5, 0xa2, 0xe9, 0xc7, 0xb7, 0x61, 0x95, 0x6f, 0x25, + 0x28, 0x4f, 0x5f, 0xee, 0xe2, 0x06, 0xe4, 0x2d, 0x47, 0x27, 0x5c, 0x5a, 0xe2, 0x97, 0x85, 0x9d, + 0xb7, 0xdc, 0x07, 0x57, 0x5b, 0x21, 0x5f, 0x8d, 0x3d, 0xb7, 0xfe, 0x9e, 0x82, 0x7c, 0x04, 0xe3, + 0xf3, 0x90, 0x71, 0x49, 0x70, 0xc2, 0xc3, 0x65, 0xf7, 0x25, 0x94, 0x52, 0xf9, 0x98, 0xe1, 0xbe, + 0x4b, 0x6c, 0x2e, 0x81, 0x10, 0x67, 0x63, 0xb6, 0xae, 0x16, 0x25, 0x06, 0x6f, 0xfb, 0x9d, 0xd1, + 0x88, 0xda, 0x81, 0x1f, 0xad, 0x6b, 0x88, 0xd7, 0x43, 0x18, 0xdf, 0x86, 0xf5, 0xc0, 0x23, 0xa6, + 0x35, 0xc5, 0xcd, 0x70, 0x2e, 0x8a, 0x0c, 0x31, 0x79, 0x0f, 0x2e, 0x46, 0x71, 0x0d, 0x1a, 0x10, + 0xfd, 0x84, 0x1a, 0x13, 0xa7, 0x1c, 0xbf, 0x39, 0xbc, 0x10, 0x12, 0x1a, 0xa1, 0x3d, 0xf2, 0xad, + 0xfc, 0x23, 0x05, 0xeb, 0xd1, 0x87, 0x8a, 0x11, 0x17, 0xeb, 0x10, 0x80, 0xd8, 0xb6, 0x13, 0x24, + 0xcb, 0x35, 0x2f, 0xe5, 0x39, 0xbf, 0x6a, 0x2d, 0x76, 0x52, 0x13, 0x01, 0xb6, 0x46, 0x00, 0x13, + 0xcb, 0xd2, 0xb2, 0x5d, 0x81, 0x62, 0x78, 0x73, 0xcf, 0x7f, 0xfe, 0x11, 0x9f, 0xb6, 0x20, 0x20, + 0xf6, 0x45, 0x83, 0x37, 0x21, 0x7b, 0x4c, 0x87, 0xa6, 0x1d, 0xde, 0x27, 0x8a, 0x41, 0x74, 0x4b, + 0x99, 0x89, 0x6f, 0x29, 0xf7, 0x9f, 0xc0, 0x86, 0xee, 0x8c, 0x66, 0xd3, 0xdd, 0x47, 0x33, 0x9f, + 0xd7, 0xfe, 0xe7, 0xa9, 0x2f, 0x61, 0xd2, 0x62, 0x7e, 0x23, 0xa5, 0x0f, 0xba, 0xfb, 0x7f, 0x92, + 0xb6, 0x0e, 0x84, 0x5f, 0x37, 0x9a, 0xa6, 0x4a, 0x07, 0x16, 0xd5, 0x59, 0xea, 0xff, 0x0f, 0x00, + 0x00, 0xff, 0xff, 0xa0, 0xbf, 0x63, 0x15, 0xd3, 0x1a, 0x00, 0x00, } diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go index b175f555..bac9913e 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -21,12 +21,14 @@ It has these top-level messages: FileOptions MessageOptions FieldOptions + OneofOptions EnumOptions EnumValueOptions ServiceOptions MethodOptions UninterpretedOption SourceCodeInfo + GeneratedCodeInfo */ package descriptor @@ -231,11 +233,14 @@ func (this *OneofDescriptorProto) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 5) + s := make([]string, 0, 6) s = append(s, "&descriptor.OneofDescriptorProto{") if this.Name != nil { s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } @@ -383,8 +388,8 @@ func (this *FileOptions) GoString() string { if this.CsharpNamespace != nil { s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") } - if this.JavananoUseDeprecatedPackage != nil { - s = append(s, "JavananoUseDeprecatedPackage: "+valueToGoStringDescriptor(this.JavananoUseDeprecatedPackage, "bool")+",\n") + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") } if this.UninterpretedOption != nil { s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") @@ -458,6 +463,22 @@ func (this *FieldOptions) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func (this *EnumOptions) GoString() string { if this == nil { return "nil" @@ -522,11 +543,14 @@ func (this *MethodOptions) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 7) s = append(s, "&descriptor.MethodOptions{") if this.Deprecated != nil { s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "descriptor.MethodOptions_IdempotencyLevel")+",\n") + } if this.UninterpretedOption != nil { s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") } @@ -630,6 +654,45 @@ func (this *SourceCodeInfo_Location) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func valueToGoStringDescriptor(v interface{}, typ string) string { rv := reflect.ValueOf(v) if rv.IsNil() { diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go index 861f4d02..e0846a35 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -99,6 +99,17 @@ func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { return x } +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + func (field *FieldDescriptorProto) GetKey() []byte { x := field.GetKeyUint64() i := 0 @@ -111,6 +122,18 @@ func (field *FieldDescriptorProto) GetKey() []byte { return keybuf } +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { msg := desc.GetMessage(packageName, messageName) if msg == nil { @@ -352,6 +375,16 @@ func (f *FieldDescriptorProto) IsPacked() bool { return f.Options != nil && f.GetOptions().GetPacked() } +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + func (m *DescriptorProto) HasExtension() bool { return len(m.ExtensionRange) > 0 } diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md index 795f53f6..037fc7c8 100644 --- a/vendor/github.com/golang/protobuf/README.md +++ b/vendor/github.com/golang/protobuf/README.md @@ -1,7 +1,5 @@ # Go support for Protocol Buffers -[![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf) - Google's data interchange format. Copyright 2010 The Go Authors. https://github.com/golang/protobuf @@ -24,7 +22,7 @@ To use this software, you must: for details or, if you are using gccgo, follow the instructions at https://golang.org/doc/install/gccgo - Grab the code from the repository and install the proto package. - The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`. + The simplest way is to run `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`. The compiler plugin, protoc-gen-go, will be installed in $GOBIN, defaulting to $GOPATH/bin. It must be in your $PATH for the protocol compiler, protoc, to find it. @@ -106,6 +104,7 @@ for a protocol buffer variable v: When the .proto file specifies `syntax="proto3"`, there are some differences: - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. - Enum types do not get an Enum method. Consider file test.proto, containing diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go index 8b84d1b2..68b9b30c 100644 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -174,11 +174,11 @@ func sizeFixed32(x uint64) int { // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func sizeZigzag64(x uint64) int { - return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) + return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer @@ -1075,17 +1075,10 @@ func (o *Buffer) enc_map(p *Properties, base structPointer) error { func (o *Buffer) enc_exts(p *Properties, base structPointer) error { exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { + if err := encodeExtensions(exts); err != nil { return err } + v, _ := exts.extensionsRead() return o.enc_map_body(v) } diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index eaad2183..6b9b3637 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -154,7 +154,6 @@ type ExtensionDesc struct { Field int32 // field number Name string // fully-qualified name of extension, for text formatting Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined } func (ed *ExtensionDesc) repeated() bool { diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index 1c225504..ac4ddbc0 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -73,6 +73,7 @@ for a protocol buffer variable v: When the .proto file specifies `syntax="proto3"`, there are some differences: - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. - Enum types do not get an Enum method. The simplest way to describe this is to see an example. diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go index 5e14513f..61f83c1e 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -865,7 +865,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(x) + fv.SetUint(uint64(x)) return nil } case reflect.Uint64: diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 00000000..89e07ae1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,136 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *any.Any) (string, error) { + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*any.Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func Empty(any *any.Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *any.Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = Empty(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *any.Any, pb proto.Message) bool { + aname, err := AnyMessageName(any) + if err != nil { + return false + } + + return aname == proto.MessageName(pb) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 00000000..f2c6906b --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,155 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/any/any.proto +// DO NOT EDIT! + +/* +Package any is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/any/any.proto + +It has these top-level messages: + Any +*/ +package any + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Any) XXX_WellKnownType() string { return "Any" } + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 187 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc, + 0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c, + 0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69, + 0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24, + 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1, + 0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19, + 0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, + 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9, + 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00, + 0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto new file mode 100644 index 00000000..81dcf46c --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -0,0 +1,140 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/any"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 00000000..c0d595da --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package ptypes contains code for interacting with well-known types. +*/ +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 00000000..65cb0f8e --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + // Range of a durpb.Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the durpb.Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid durpb.Duration +// may still be too large to fit into a time.Duration (the range of durpb.Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *durpb.Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) + } + return nil +} + +// Duration converts a durpb.Duration to a time.Duration. Duration +// returns an error if the durpb.Duration is invalid or is too large to be +// represented in a time.Duration. +func Duration(p *durpb.Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durpb.Duration. +func DurationProto(d time.Duration) *durpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durpb.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 00000000..56974834 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,114 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/duration/duration.proto +// DO NOT EDIT! + +/* +Package duration is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/duration/duration.proto + +It has these top-level messages: + Duration +*/ +package duration + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Duration) XXX_WellKnownType() string { return "Duration" } + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 189 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29, + 0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, + 0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8, + 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60, + 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6, + 0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98, + 0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xbb, 0x80, 0x91, 0x71, 0x11, 0x13, + 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9, + 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto new file mode 100644 index 00000000..96c1796d --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -0,0 +1,98 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/duration"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 00000000..1b365762 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,125 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *tspb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *tspb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := &tspb.Timestamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *tspb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 00000000..ffcc5159 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,127 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +// DO NOT EDIT! + +/* +Package timestamp is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + +It has these top-level messages: + Timestamp +*/ +package timestamp + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// now = time.time() +// seconds = int(now) +// nanos = int((now - seconds) * 10**9) +// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 194 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9, + 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3, + 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24, + 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, + 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, + 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x91, 0x91, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x50, 0x27, + 0x3e, 0xb8, 0x91, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0x1c, 0xbd, 0x80, 0x91, 0xf1, + 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0xc3, 0x03, + 0xa0, 0xca, 0xf5, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0xda, 0x92, + 0xd8, 0xc0, 0xe6, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01, + 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto new file mode 100644 index 00000000..7992a858 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -0,0 +1,111 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/timestamp"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// now = time.time() +// seconds = int(now) +// nanos = int((now - seconds) * 10**9) +// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/go-openapi/analysis/LICENSE b/vendor/github.com/google/btree/LICENSE similarity index 100% rename from vendor/github.com/go-openapi/analysis/LICENSE rename to vendor/github.com/google/btree/LICENSE diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md new file mode 100644 index 00000000..6062a4da --- /dev/null +++ b/vendor/github.com/google/btree/README.md @@ -0,0 +1,12 @@ +# BTree implementation for Go + +![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) + +This package provides an in-memory B-Tree implementation for Go, useful as +an ordered, mutable data structure. + +The API is based off of the wonderful +http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to +act as a drop-in replacement for gollrb trees. + +See http://godoc.org/github.com/google/btree for documentation. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go new file mode 100644 index 00000000..fc5aaaa1 --- /dev/null +++ b/vendor/github.com/google/btree/btree.go @@ -0,0 +1,649 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implmentation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values or backwards iteration. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are not safe for concurrent write access. +type FreeList struct { + freelist []*node +} + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return &FreeList{freelist: make([]*node, 0, size)} +} + +func (f *FreeList) newNode() (n *node) { + index := len(f.freelist) - 1 + if index < 0 { + return new(node) + } + f.freelist, n = f.freelist[:index], f.freelist[index] + return +} + +func (f *FreeList) freeNode(n *node) { + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + } +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator func(i Item) bool + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + if degree <= 1 { + panic("bad degree") + } + return &BTree{ + degree: degree, + freelist: f, + } +} + +// items stores items in a node. +type items []Item + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items) insertAt(index int, item Item) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items) removeAt(index int) Item { + item := (*s)[index] + (*s)[index] = nil + copy((*s)[index:], (*s)[index+1:]) + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items) pop() (out Item) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(item Item) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return item.Less(s[i]) + }) + if i > 0 && !s[i-1].Less(item) { + return i - 1, true + } + return i, false +} + +// children stores child nodes in a node. +type children []*node + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *children) insertAt(index int, n *node) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = n +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *children) removeAt(index int) *node { + n := (*s)[index] + (*s)[index] = nil + copy((*s)[index:], (*s)[index+1:]) + *s = (*s)[:len(*s)-1] + return n +} + +// pop removes and returns the last element in the list. +func (s *children) pop() (out *node) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node struct { + items items + children children + t *BTree +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node) split(i int) (Item, *node) { + item := n.items[i] + next := n.t.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items = n.items[:i] + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children = n.children[:i+1] + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.children[i] + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node) insert(item Item, maxItems int) Item { + i, found := n.items.find(item) + if found { + out := n.items[i] + n.items[i] = item + return out + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return nil + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case item.Less(inTree): + // no change, we want first split node + case inTree.Less(item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out + } + } + return n.children[i].insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node) get(key Item) Item { + i, found := n.items.find(key) + if found { + return n.items[i] + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return nil +} + +// min returns the first item in the subtree. +func min(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// max returns the last item in the subtree. +func max(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node) remove(item Item, minItems int, typ toRemove) Item { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop() + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0) + } + i = 0 + case removeItem: + i, found = n.items.find(item) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i) + } + return nil + } + default: + panic("invalid type") + } + // If we get to here, we have children. + child := n.children[i] + if len(child.items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + n.items[i] = child.remove(nil, minItems, removeMax) + return out + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { + child := n.children[i] + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + stealFrom := n.children[i-1] + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + stealFrom := n.children[i+1] + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + child = n.children[i] + } + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.t.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +// iterate provides a simple method for iterating over elements in the tree. +// It could probably use some work to be extra-efficient (it calls from() a +// little more than it should), but it works pretty well for now. +// +// It requires that 'from' and 'to' both return true for values we should hit +// with the iterator. It should also be the case that 'from' returns true for +// values less than or equal to values 'to' returns true for, and 'to' +// returns true for values greater than or equal to those that 'from' +// does. +func (n *node) iterate(from, to func(Item) bool, iter ItemIterator) bool { + for i, item := range n.items { + if !from(item) { + continue + } + if len(n.children) > 0 && !n.children[i].iterate(from, to, iter) { + return false + } + if !to(item) { + return false + } + if !iter(item) { + return false + } + } + if len(n.children) > 0 { + return n.children[len(n.children)-1].iterate(from, to, iter) + } + return true +} + +// Used for testing/debugging purposes. +func (n *node) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree struct { + degree int + length int + root *node + freelist *FreeList +} + +// maxItems returns the max number of items to allow per node. +func (t *BTree) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTree) minItems() int { + return t.degree - 1 +} + +func (t *BTree) newNode() (n *node) { + n = t.freelist.newNode() + n.t = t + return +} + +func (t *BTree) freeNode(n *node) { + for i := range n.items { + n.items[i] = nil // clear to allow GC + } + n.items = n.items[:0] + for i := range n.children { + n.children[i] = nil // clear to allow GC + } + n.children = n.children[:0] + n.t = nil // clear to allow GC + t.freelist.freeNode(n) +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("nil item being added to BTree") + } + if t.root == nil { + t.root = t.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return nil + } else if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + out := t.root.insert(item, t.maxItems()) + if out == nil { + t.length++ + } + return out +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + return t.deleteItem(nil, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + return t.deleteItem(nil, removeMax) +} + +func (t *BTree) deleteItem(item Item, typ toRemove) Item { + if t.root == nil || len(t.root.items) == 0 { + return nil + } + out := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.freeNode(oldroot) + } + if out != nil { + t.length-- + } + return out +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate( + func(a Item) bool { return !a.Less(greaterOrEqual) }, + func(a Item) bool { return a.Less(lessThan) }, + iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate( + func(a Item) bool { return true }, + func(a Item) bool { return a.Less(pivot) }, + iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate( + func(a Item) bool { return !a.Less(pivot) }, + func(a Item) bool { return true }, + iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate( + func(a Item) bool { return true }, + func(a Item) bool { return true }, + iterator) +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + if t.root == nil { + return nil + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + return min(t.root) +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return t.Get(key) != nil +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return t.length +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} diff --git a/vendor/github.com/go-openapi/loads/LICENSE b/vendor/github.com/googleapis/gnostic/LICENSE similarity index 100% rename from vendor/github.com/go-openapi/loads/LICENSE rename to vendor/github.com/googleapis/gnostic/LICENSE diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go new file mode 100644 index 00000000..0e32451a --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go @@ -0,0 +1,8728 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +package openapi_v2 + +import ( + "fmt" + "github.com/googleapis/gnostic/compiler" + "gopkg.in/yaml.v2" + "regexp" + "strings" +) + +// Version returns the package name (and OpenAPI version). +func Version() string { + return "openapi_v2" +} + +// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. +func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*AdditionalPropertiesItem, error) { + errors := make([]error, 0) + x := &AdditionalPropertiesItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) + if matchingError == nil { + x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // bool boolean = 2; + boolValue, ok := in.(bool) + if ok { + x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewAny creates an object of type Any if possible, returning an error if not. +func NewAny(in interface{}, context *compiler.Context) (*Any, error) { + errors := make([]error, 0) + x := &Any{} + bytes, _ := yaml.Marshal(in) + x.Yaml = string(bytes) + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. +func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecurity, error) { + errors := make([]error, 0) + x := &ApiKeySecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [apiKey] + if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header query] + if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. +func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (*BasicAuthenticationSecurity, error) { + errors := make([]error, 0) + x := &BasicAuthenticationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [basic] + if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. +func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter, error) { + errors := make([]error, 0) + x := &BodyParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "schema"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "required", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [body] + if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool required = 4; + v4 := compiler.MapValueForKey(m, "required") + if v4 != nil { + x.Required, ok = v4.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema schema = 5; + v5 := compiler.MapValueForKey(m, "schema") + if v5 != nil { + var err error + x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewContact creates an object of type Contact if possible, returning an error if not. +func NewContact(in interface{}, context *compiler.Context) (*Contact, error) { + errors := make([]error, 0) + x := &Contact{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"email", "name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string email = 3; + v3 := compiler.MapValueForKey(m, "email") + if v3 != nil { + x.Email, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for email: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefault creates an object of type Default if possible, returning an error if not. +func NewDefault(in interface{}, context *compiler.Context) (*Default, error) { + errors := make([]error, 0) + x := &Default{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefinitions creates an object of type Definitions if possible, returning an error if not. +func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, error) { + errors := make([]error, 0) + x := &Definitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDocument creates an object of type Document if possible, returning an error if not. +func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { + errors := make([]error, 0) + x := &Document{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"info", "paths", "swagger"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string swagger = 1; + v1 := compiler.MapValueForKey(m, "swagger") + if v1 != nil { + x.Swagger, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [2.0] + if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { + message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Info info = 2; + v2 := compiler.MapValueForKey(m, "info") + if v2 != nil { + var err error + x.Info, err = NewInfo(v2, compiler.NewContext("info", context)) + if err != nil { + errors = append(errors, err) + } + } + // string host = 3; + v3 := compiler.MapValueForKey(m, "host") + if v3 != nil { + x.Host, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for host: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string base_path = 4; + v4 := compiler.MapValueForKey(m, "basePath") + if v4 != nil { + x.BasePath, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for basePath: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string schemes = 5; + v5 := compiler.MapValueForKey(m, "schemes") + if v5 != nil { + v, ok := v5.([]interface{}) + if ok { + x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %+v", v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 6; + v6 := compiler.MapValueForKey(m, "consumes") + if v6 != nil { + v, ok := v6.([]interface{}) + if ok { + x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 7; + v7 := compiler.MapValueForKey(m, "produces") + if v7 != nil { + v, ok := v7.([]interface{}) + if ok { + x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Paths paths = 8; + v8 := compiler.MapValueForKey(m, "paths") + if v8 != nil { + var err error + x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context)) + if err != nil { + errors = append(errors, err) + } + } + // Definitions definitions = 9; + v9 := compiler.MapValueForKey(m, "definitions") + if v9 != nil { + var err error + x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context)) + if err != nil { + errors = append(errors, err) + } + } + // ParameterDefinitions parameters = 10; + v10 := compiler.MapValueForKey(m, "parameters") + if v10 != nil { + var err error + x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + } + // ResponseDefinitions responses = 11; + v11 := compiler.MapValueForKey(m, "responses") + if v11 != nil { + var err error + x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := v12.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // SecurityDefinitions security_definitions = 13; + v13 := compiler.MapValueForKey(m, "securityDefinitions") + if v13 != nil { + var err error + x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Tag tags = 14; + v14 := compiler.MapValueForKey(m, "tags") + if v14 != nil { + // repeated Tag + x.Tags = make([]*Tag, 0) + a, ok := v14.([]interface{}) + if ok { + for _, item := range a { + y, err := NewTag(item, compiler.NewContext("tags", context)) + if err != nil { + errors = append(errors, err) + } + x.Tags = append(x.Tags, y) + } + } + } + // ExternalDocs external_docs = 15; + v15 := compiler.MapValueForKey(m, "externalDocs") + if v15 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 16; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExamples creates an object of type Examples if possible, returning an error if not. +func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) { + errors := make([]error, 0) + x := &Examples{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. +func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, error) { + errors := make([]error, 0) + x := &ExternalDocs{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"url"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFileSchema creates an object of type FileSchema if possible, returning an error if not. +func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, error) { + errors := make([]error, 0) + x := &FileSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string format = 1; + v1 := compiler.MapValueForKey(m, "format") + if v1 != nil { + x.Format, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 2; + v2 := compiler.MapValueForKey(m, "title") + if v2 != nil { + x.Title, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 4; + v4 := compiler.MapValueForKey(m, "default") + if v4 != nil { + var err error + x.Default, err = NewAny(v4, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string required = 5; + v5 := compiler.MapValueForKey(m, "required") + if v5 != nil { + v, ok := v5.([]interface{}) + if ok { + x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [file] + if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 7; + v7 := compiler.MapValueForKey(m, "readOnly") + if v7 != nil { + x.ReadOnly, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 8; + v8 := compiler.MapValueForKey(m, "externalDocs") + if v8 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 9; + v9 := compiler.MapValueForKey(m, "example") + if v9 != nil { + var err error + x.Example, err = NewAny(v9, compiler.NewContext("example", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. +func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*FormDataParameterSubSchema, error) { + errors := make([]error, 0) + x := &FormDataParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [formData] + if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array file] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = v7.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = v9.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + switch v11 := v11.(type) { + case float64: + x.Maximum = v11 + case float32: + x.Maximum = float64(v11) + case uint64: + x.Maximum = float64(v11) + case uint32: + x.Maximum = float64(v11) + case int64: + x.Maximum = float64(v11) + case int32: + x.Maximum = float64(v11) + case int: + x.Maximum = float64(v11) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = v12.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + switch v13 := v13.(type) { + case float64: + x.Minimum = v13 + case float32: + x.Minimum = float64(v13) + case uint64: + x.Minimum = float64(v13) + case uint32: + x.Minimum = float64(v13) + case int64: + x.Minimum = float64(v13) + case int32: + x.Minimum = float64(v13) + case int: + x.Minimum = float64(v13) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = v14.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := v16.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = v17.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := v19.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = v20.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v21.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + switch v22 := v22.(type) { + case float64: + x.MultipleOf = v22 + case float32: + x.MultipleOf = float64(v22) + case uint64: + x.MultipleOf = float64(v22) + case uint32: + x.MultipleOf = float64(v22) + case int64: + x.MultipleOf = float64(v22) + case int32: + x.MultipleOf = float64(v22) + case int: + x.MultipleOf = float64(v22) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeader creates an object of type Header if possible, returning an error if not. +func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { + errors := make([]error, 0) + x := &Header{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.Maximum = v6 + case float32: + x.Maximum = float64(v6) + case uint64: + x.Maximum = float64(v6) + case uint32: + x.Maximum = float64(v6) + case int64: + x.Maximum = float64(v6) + case int32: + x.Maximum = float64(v6) + case int: + x.Maximum = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + switch v8 := v8.(type) { + case float64: + x.Minimum = v8 + case float32: + x.Minimum = float64(v8) + case uint64: + x.Minimum = float64(v8) + case uint32: + x.Minimum = float64(v8) + case int64: + x.Minimum = float64(v8) + case int32: + x.Minimum = float64(v8) + case int: + x.Minimum = float64(v8) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = v9.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := v10.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = v12.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := v13.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = v15.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v16.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + switch v17 := v17.(type) { + case float64: + x.MultipleOf = v17 + case float32: + x.MultipleOf = float64(v17) + case uint64: + x.MultipleOf = float64(v17) + case uint32: + x.MultipleOf = float64(v17) + case int64: + x.MultipleOf = float64(v17) + case int32: + x.MultipleOf = float64(v17) + case int: + x.MultipleOf = float64(v17) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 18; + v18 := compiler.MapValueForKey(m, "description") + if v18 != nil { + x.Description, ok = v18.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 19; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. +func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*HeaderParameterSubSchema, error) { + errors := make([]error, 0) + x := &HeaderParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header] + if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = v8.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + switch v10 := v10.(type) { + case float64: + x.Maximum = v10 + case float32: + x.Maximum = float64(v10) + case uint64: + x.Maximum = float64(v10) + case uint32: + x.Maximum = float64(v10) + case int64: + x.Maximum = float64(v10) + case int32: + x.Maximum = float64(v10) + case int: + x.Maximum = float64(v10) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + switch v12 := v12.(type) { + case float64: + x.Minimum = v12 + case float32: + x.Minimum = float64(v12) + case uint64: + x.Minimum = float64(v12) + case uint32: + x.Minimum = float64(v12) + case int64: + x.Minimum = float64(v12) + case int32: + x.Minimum = float64(v12) + case int: + x.Minimum = float64(v12) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = v13.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = v16.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = v19.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + switch v21 := v21.(type) { + case float64: + x.MultipleOf = v21 + case float32: + x.MultipleOf = float64(v21) + case uint64: + x.MultipleOf = float64(v21) + case uint32: + x.MultipleOf = float64(v21) + case int64: + x.MultipleOf = float64(v21) + case int32: + x.MultipleOf = float64(v21) + case int: + x.MultipleOf = float64(v21) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaders creates an object of type Headers if possible, returning an error if not. +func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) { + errors := make([]error, 0) + x := &Headers{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedHeader additional_properties = 1; + // MAP: Header + x.AdditionalProperties = make([]*NamedHeader, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedHeader{} + pair.Name = k + var err error + pair.Value, err = NewHeader(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewInfo creates an object of type Info if possible, returning an error if not. +func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { + errors := make([]error, 0) + x := &Info{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"title", "version"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string title = 1; + v1 := compiler.MapValueForKey(m, "title") + if v1 != nil { + x.Title, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string version = 2; + v2 := compiler.MapValueForKey(m, "version") + if v2 != nil { + x.Version, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for version: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string terms_of_service = 4; + v4 := compiler.MapValueForKey(m, "termsOfService") + if v4 != nil { + x.TermsOfService, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for termsOfService: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Contact contact = 5; + v5 := compiler.MapValueForKey(m, "contact") + if v5 != nil { + var err error + x.Contact, err = NewContact(v5, compiler.NewContext("contact", context)) + if err != nil { + errors = append(errors, err) + } + } + // License license = 6; + v6 := compiler.MapValueForKey(m, "license") + if v6 != nil { + var err error + x.License, err = NewLicense(v6, compiler.NewContext("license", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. +func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) { + errors := make([]error, 0) + x := &ItemsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Schema = make([]*Schema, 0) + y, err := NewSchema(m, compiler.NewContext("", context)) + if err != nil { + return nil, err + } + x.Schema = append(x.Schema, y) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewJsonReference creates an object of type JsonReference if possible, returning an error if not. +func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference, error) { + errors := make([]error, 0) + x := &JsonReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"$ref"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"$ref", "description"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewLicense creates an object of type License if possible, returning an error if not. +func NewLicense(in interface{}, context *compiler.Context) (*License, error) { + errors := make([]error, 0) + x := &License{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. +func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { + errors := make([]error, 0) + x := &NamedAny{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewAny(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. +func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, error) { + errors := make([]error, 0) + x := &NamedHeader{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Header value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewHeader(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. +func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParameter, error) { + errors := make([]error, 0) + x := &NamedParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Parameter value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewParameter(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. +func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem, error) { + errors := make([]error, 0) + x := &NamedPathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PathItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewPathItem(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. +func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse, error) { + errors := make([]error, 0) + x := &NamedResponse{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Response value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponse(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. +func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedResponseValue, error) { + errors := make([]error, 0) + x := &NamedResponseValue{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ResponseValue value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. +func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, error) { + errors := make([]error, 0) + x := &NamedSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSchema(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. +func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &NamedSecurityDefinitionsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SecurityDefinitionsItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedString creates an object of type NamedString if possible, returning an error if not. +func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, error) { + errors := make([]error, 0) + x := &NamedString{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + x.Value, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for value: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. +func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStringArray, error) { + errors := make([]error, 0) + x := &NamedStringArray{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // StringArray value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewStringArray(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. +func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyParameter, error) { + errors := make([]error, 0) + x := &NonBodyParameter{} + matched := false + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // HeaderParameterSubSchema header_parameter_sub_schema = 1; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // QueryParameterSubSchema query_parameter_sub_schema = 3; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // PathParameterSubSchema path_parameter_sub_schema = 4; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. +func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { + errors := make([]error, 0) + x := &Oauth2AccessCodeSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [accessCode] + if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string token_url = 5; + v5 := compiler.MapValueForKey(m, "tokenUrl") + if v5 != nil { + x.TokenUrl, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 6; + v6 := compiler.MapValueForKey(m, "description") + if v6 != nil { + x.Description, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. +func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ApplicationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [application] + if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. +func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ImplicitSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [implicit] + if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. +func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oauth2PasswordSecurity, error) { + errors := make([]error, 0) + x := &Oauth2PasswordSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [password] + if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. +func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, error) { + errors := make([]error, 0) + x := &Oauth2Scopes{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedString additional_properties = 1; + // MAP: string + x.AdditionalProperties = make([]*NamedString, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedString{} + pair.Name = k + pair.Value = v.(string) + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOperation creates an object of type Operation if possible, returning an error if not. +func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) { + errors := make([]error, 0) + x := &Operation{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"responses"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated string tags = 1; + v1 := compiler.MapValueForKey(m, "tags") + if v1 != nil { + v, ok := v1.([]interface{}) + if ok { + x.Tags = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for tags: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string summary = 2; + v2 := compiler.MapValueForKey(m, "summary") + if v2 != nil { + x.Summary, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 4; + v4 := compiler.MapValueForKey(m, "externalDocs") + if v4 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // string operation_id = 5; + v5 := compiler.MapValueForKey(m, "operationId") + if v5 != nil { + x.OperationId, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for operationId: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 6; + v6 := compiler.MapValueForKey(m, "produces") + if v6 != nil { + v, ok := v6.([]interface{}) + if ok { + x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 7; + v7 := compiler.MapValueForKey(m, "consumes") + if v7 != nil { + v, ok := v7.([]interface{}) + if ok { + x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated ParametersItem parameters = 8; + v8 := compiler.MapValueForKey(m, "parameters") + if v8 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := v8.([]interface{}) + if ok { + for _, item := range a { + y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // Responses responses = 9; + v9 := compiler.MapValueForKey(m, "responses") + if v9 != nil { + var err error + x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string schemes = 10; + v10 := compiler.MapValueForKey(m, "schemes") + if v10 != nil { + v, ok := v10.([]interface{}) + if ok { + x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %+v", v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool deprecated = 11; + v11 := compiler.MapValueForKey(m, "deprecated") + if v11 != nil { + x.Deprecated, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := v12.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // repeated NamedAny vendor_extension = 13; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameter creates an object of type Parameter if possible, returning an error if not. +func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) { + errors := make([]error, 0) + x := &Parameter{} + matched := false + // BodyParameter body_parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", context)) + if matchingError == nil { + x.Oneof = &Parameter_BodyParameter{BodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // NonBodyParameter non_body_parameter = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context)) + if matchingError == nil { + x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. +func NewParameterDefinitions(in interface{}, context *compiler.Context) (*ParameterDefinitions, error) { + errors := make([]error, 0) + x := &ParameterDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedParameter additional_properties = 1; + // MAP: Parameter + x.AdditionalProperties = make([]*NamedParameter, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedParameter{} + pair.Name = k + var err error + pair.Value, err = NewParameter(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. +func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersItem, error) { + errors := make([]error, 0) + x := &ParametersItem{} + matched := false + // Parameter parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewParameter(m, compiler.NewContext("parameter", context)) + if matchingError == nil { + x.Oneof = &ParametersItem_Parameter{Parameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + if matchingError == nil { + x.Oneof = &ParametersItem_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathItem creates an object of type PathItem if possible, returning an error if not. +func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { + errors := make([]error, 0) + x := &PathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Operation get = 2; + v2 := compiler.MapValueForKey(m, "get") + if v2 != nil { + var err error + x.Get, err = NewOperation(v2, compiler.NewContext("get", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation put = 3; + v3 := compiler.MapValueForKey(m, "put") + if v3 != nil { + var err error + x.Put, err = NewOperation(v3, compiler.NewContext("put", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation post = 4; + v4 := compiler.MapValueForKey(m, "post") + if v4 != nil { + var err error + x.Post, err = NewOperation(v4, compiler.NewContext("post", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation delete = 5; + v5 := compiler.MapValueForKey(m, "delete") + if v5 != nil { + var err error + x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation options = 6; + v6 := compiler.MapValueForKey(m, "options") + if v6 != nil { + var err error + x.Options, err = NewOperation(v6, compiler.NewContext("options", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation head = 7; + v7 := compiler.MapValueForKey(m, "head") + if v7 != nil { + var err error + x.Head, err = NewOperation(v7, compiler.NewContext("head", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation patch = 8; + v8 := compiler.MapValueForKey(m, "patch") + if v8 != nil { + var err error + x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated ParametersItem parameters = 9; + v9 := compiler.MapValueForKey(m, "parameters") + if v9 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := v9.([]interface{}) + if ok { + for _, item := range a { + y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. +func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*PathParameterSubSchema, error) { + errors := make([]error, 0) + x := &PathParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"required"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [path] + if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = v8.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + switch v10 := v10.(type) { + case float64: + x.Maximum = v10 + case float32: + x.Maximum = float64(v10) + case uint64: + x.Maximum = float64(v10) + case uint32: + x.Maximum = float64(v10) + case int64: + x.Maximum = float64(v10) + case int32: + x.Maximum = float64(v10) + case int: + x.Maximum = float64(v10) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + switch v12 := v12.(type) { + case float64: + x.Minimum = v12 + case float32: + x.Minimum = float64(v12) + case uint64: + x.Minimum = float64(v12) + case uint32: + x.Minimum = float64(v12) + case int64: + x.Minimum = float64(v12) + case int32: + x.Minimum = float64(v12) + case int: + x.Minimum = float64(v12) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = v13.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = v16.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = v19.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + switch v21 := v21.(type) { + case float64: + x.MultipleOf = v21 + case float32: + x.MultipleOf = float64(v21) + case uint64: + x.MultipleOf = float64(v21) + case uint32: + x.MultipleOf = float64(v21) + case int64: + x.MultipleOf = float64(v21) + case int32: + x.MultipleOf = float64(v21) + case int: + x.MultipleOf = float64(v21) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPaths creates an object of type Paths if possible, returning an error if not. +func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { + errors := make([]error, 0) + x := &Paths{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern0, pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedAny vendor_extension = 1; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + // repeated NamedPathItem path = 2; + // MAP: PathItem ^/ + x.Path = make([]*NamedPathItem, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "/") { + pair := &NamedPathItem{} + pair.Name = k + var err error + pair.Value, err = NewPathItem(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.Path = append(x.Path, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. +func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesItems, error) { + errors := make([]error, 0) + x := &PrimitivesItems{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.Maximum = v6 + case float32: + x.Maximum = float64(v6) + case uint64: + x.Maximum = float64(v6) + case uint32: + x.Maximum = float64(v6) + case int64: + x.Maximum = float64(v6) + case int32: + x.Maximum = float64(v6) + case int: + x.Maximum = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + switch v8 := v8.(type) { + case float64: + x.Minimum = v8 + case float32: + x.Minimum = float64(v8) + case uint64: + x.Minimum = float64(v8) + case uint32: + x.Minimum = float64(v8) + case int64: + x.Minimum = float64(v8) + case int32: + x.Minimum = float64(v8) + case int: + x.Minimum = float64(v8) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = v9.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := v10.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = v12.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := v13.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = v15.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v16.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + switch v17 := v17.(type) { + case float64: + x.MultipleOf = v17 + case float32: + x.MultipleOf = float64(v17) + case uint64: + x.MultipleOf = float64(v17) + case uint32: + x.MultipleOf = float64(v17) + case int64: + x.MultipleOf = float64(v17) + case int32: + x.MultipleOf = float64(v17) + case int: + x.MultipleOf = float64(v17) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 18; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewProperties creates an object of type Properties if possible, returning an error if not. +func NewProperties(in interface{}, context *compiler.Context) (*Properties, error) { + errors := make([]error, 0) + x := &Properties{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. +func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*QueryParameterSubSchema, error) { + errors := make([]error, 0) + x := &QueryParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [query] + if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = v7.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = v9.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + switch v11 := v11.(type) { + case float64: + x.Maximum = v11 + case float32: + x.Maximum = float64(v11) + case uint64: + x.Maximum = float64(v11) + case uint32: + x.Maximum = float64(v11) + case int64: + x.Maximum = float64(v11) + case int32: + x.Maximum = float64(v11) + case int: + x.Maximum = float64(v11) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = v12.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + switch v13 := v13.(type) { + case float64: + x.Minimum = v13 + case float32: + x.Minimum = float64(v13) + case uint64: + x.Minimum = float64(v13) + case uint32: + x.Minimum = float64(v13) + case int64: + x.Minimum = float64(v13) + case int32: + x.Minimum = float64(v13) + case int: + x.Minimum = float64(v13) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = v14.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := v16.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = v17.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := v19.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = v20.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v21.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + switch v22 := v22.(type) { + case float64: + x.MultipleOf = v22 + case float32: + x.MultipleOf = float64(v22) + case uint64: + x.MultipleOf = float64(v22) + case uint32: + x.MultipleOf = float64(v22) + case int64: + x.MultipleOf = float64(v22) + case int32: + x.MultipleOf = float64(v22) + case int: + x.MultipleOf = float64(v22) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponse creates an object of type Response if possible, returning an error if not. +func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { + errors := make([]error, 0) + x := &Response{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"description"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "examples", "headers", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SchemaItem schema = 2; + v2 := compiler.MapValueForKey(m, "schema") + if v2 != nil { + var err error + x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context)) + if err != nil { + errors = append(errors, err) + } + } + // Headers headers = 3; + v3 := compiler.MapValueForKey(m, "headers") + if v3 != nil { + var err error + x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context)) + if err != nil { + errors = append(errors, err) + } + } + // Examples examples = 4; + v4 := compiler.MapValueForKey(m, "examples") + if v4 != nil { + var err error + x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. +func NewResponseDefinitions(in interface{}, context *compiler.Context) (*ResponseDefinitions, error) { + errors := make([]error, 0) + x := &ResponseDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedResponse additional_properties = 1; + // MAP: Response + x.AdditionalProperties = make([]*NamedResponse, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedResponse{} + pair.Name = k + var err error + pair.Value, err = NewResponse(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. +func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue, error) { + errors := make([]error, 0) + x := &ResponseValue{} + matched := false + // Response response = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewResponse(m, compiler.NewContext("response", context)) + if matchingError == nil { + x.Oneof = &ResponseValue_Response{Response: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + if matchingError == nil { + x.Oneof = &ResponseValue_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponses creates an object of type Responses if possible, returning an error if not. +func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) { + errors := make([]error, 0) + x := &Responses{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern2, pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedResponseValue response_code = 1; + // MAP: ResponseValue ^([0-9]{3})$|^(default)$ + x.ResponseCode = make([]*NamedResponseValue, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if pattern2.MatchString(k) { + pair := &NamedResponseValue{} + pair.Name = k + var err error + pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.ResponseCode = append(x.ResponseCode, pair) + } + } + } + // repeated NamedAny vendor_extension = 2; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchema creates an object of type Schema if possible, returning an error if not. +func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { + errors := make([]error, 0) + x := &Schema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 3; + v3 := compiler.MapValueForKey(m, "title") + if v3 != nil { + x.Title, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float multiple_of = 6; + v6 := compiler.MapValueForKey(m, "multipleOf") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.MultipleOf = v6 + case float32: + x.MultipleOf = float64(v6) + case uint64: + x.MultipleOf = float64(v6) + case uint32: + x.MultipleOf = float64(v6) + case int64: + x.MultipleOf = float64(v6) + case int32: + x.MultipleOf = float64(v6) + case int: + x.MultipleOf = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float maximum = 7; + v7 := compiler.MapValueForKey(m, "maximum") + if v7 != nil { + switch v7 := v7.(type) { + case float64: + x.Maximum = v7 + case float32: + x.Maximum = float64(v7) + case uint64: + x.Maximum = float64(v7) + case uint32: + x.Maximum = float64(v7) + case int64: + x.Maximum = float64(v7) + case int32: + x.Maximum = float64(v7) + case int: + x.Maximum = float64(v7) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 8; + v8 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v8 != nil { + x.ExclusiveMaximum, ok = v8.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 9; + v9 := compiler.MapValueForKey(m, "minimum") + if v9 != nil { + switch v9 := v9.(type) { + case float64: + x.Minimum = v9 + case float32: + x.Minimum = float64(v9) + case uint64: + x.Minimum = float64(v9) + case uint32: + x.Minimum = float64(v9) + case int64: + x.Minimum = float64(v9) + case int32: + x.Minimum = float64(v9) + case int: + x.Minimum = float64(v9) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 10; + v10 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v10 != nil { + x.ExclusiveMinimum, ok = v10.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 11; + v11 := compiler.MapValueForKey(m, "maxLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 12; + v12 := compiler.MapValueForKey(m, "minLength") + if v12 != nil { + t, ok := v12.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 13; + v13 := compiler.MapValueForKey(m, "pattern") + if v13 != nil { + x.Pattern, ok = v13.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 14; + v14 := compiler.MapValueForKey(m, "maxItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 15; + v15 := compiler.MapValueForKey(m, "minItems") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 16; + v16 := compiler.MapValueForKey(m, "uniqueItems") + if v16 != nil { + x.UniqueItems, ok = v16.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_properties = 17; + v17 := compiler.MapValueForKey(m, "maxProperties") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxProperties: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_properties = 18; + v18 := compiler.MapValueForKey(m, "minProperties") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minProperties: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string required = 19; + v19 := compiler.MapValueForKey(m, "required") + if v19 != nil { + v, ok := v19.([]interface{}) + if ok { + x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // AdditionalPropertiesItem additional_properties = 21; + v21 := compiler.MapValueForKey(m, "additionalProperties") + if v21 != nil { + var err error + x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context)) + if err != nil { + errors = append(errors, err) + } + } + // TypeItem type = 22; + v22 := compiler.MapValueForKey(m, "type") + if v22 != nil { + var err error + x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context)) + if err != nil { + errors = append(errors, err) + } + } + // ItemsItem items = 23; + v23 := compiler.MapValueForKey(m, "items") + if v23 != nil { + var err error + x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Schema all_of = 24; + v24 := compiler.MapValueForKey(m, "allOf") + if v24 != nil { + // repeated Schema + x.AllOf = make([]*Schema, 0) + a, ok := v24.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSchema(item, compiler.NewContext("allOf", context)) + if err != nil { + errors = append(errors, err) + } + x.AllOf = append(x.AllOf, y) + } + } + } + // Properties properties = 25; + v25 := compiler.MapValueForKey(m, "properties") + if v25 != nil { + var err error + x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context)) + if err != nil { + errors = append(errors, err) + } + } + // string discriminator = 26; + v26 := compiler.MapValueForKey(m, "discriminator") + if v26 != nil { + x.Discriminator, ok = v26.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for discriminator: %+v (%T)", v26, v26) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 27; + v27 := compiler.MapValueForKey(m, "readOnly") + if v27 != nil { + x.ReadOnly, ok = v27.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v27, v27) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Xml xml = 28; + v28 := compiler.MapValueForKey(m, "xml") + if v28 != nil { + var err error + x.Xml, err = NewXml(v28, compiler.NewContext("xml", context)) + if err != nil { + errors = append(errors, err) + } + } + // ExternalDocs external_docs = 29; + v29 := compiler.MapValueForKey(m, "externalDocs") + if v29 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 30; + v30 := compiler.MapValueForKey(m, "example") + if v30 != nil { + var err error + x.Example, err = NewAny(v30, compiler.NewContext("example", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 31; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. +func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, error) { + errors := make([]error, 0) + x := &SchemaItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) + if matchingError == nil { + x.Oneof = &SchemaItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // FileSchema file_schema = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", context)) + if matchingError == nil { + x.Oneof = &SchemaItem_FileSchema{FileSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. +func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*SecurityDefinitions, error) { + errors := make([]error, 0) + x := &SecurityDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSecurityDefinitionsItem additional_properties = 1; + // MAP: SecurityDefinitionsItem + x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSecurityDefinitionsItem{} + pair.Name = k + var err error + pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. +func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*SecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &SecurityDefinitionsItem{} + matched := false + // BasicAuthenticationSecurity basic_authentication_security = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // ApiKeySecurity api_key_security = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ImplicitSecurity oauth2_implicit_security = 3; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2PasswordSecurity oauth2_password_security = 4; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ApplicationSecurity oauth2_application_security = 5; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. +func NewSecurityRequirement(in interface{}, context *compiler.Context) (*SecurityRequirement, error) { + errors := make([]error, 0) + x := &SecurityRequirement{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedStringArray additional_properties = 1; + // MAP: StringArray + x.AdditionalProperties = make([]*NamedStringArray, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedStringArray{} + pair.Name = k + var err error + pair.Value, err = NewStringArray(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewStringArray creates an object of type StringArray if possible, returning an error if not. +func NewStringArray(in interface{}, context *compiler.Context) (*StringArray, error) { + errors := make([]error, 0) + x := &StringArray{} + a, ok := in.([]interface{}) + if !ok { + message := fmt.Sprintf("has unexpected value for StringArray: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Value = make([]string, 0) + for _, s := range a { + x.Value = append(x.Value, s.(string)) + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTag creates an object of type Tag if possible, returning an error if not. +func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { + errors := make([]error, 0) + x := &Tag{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "externalDocs", "name"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 3; + v3 := compiler.MapValueForKey(m, "externalDocs") + if v3 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTypeItem creates an object of type TypeItem if possible, returning an error if not. +func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) { + errors := make([]error, 0) + x := &TypeItem{} + switch in := in.(type) { + case string: + x.Value = make([]string, 0) + x.Value = append(x.Value, in) + case []interface{}: + x.Value = make([]string, 0) + for _, v := range in { + value, ok := v.(string) + if ok { + x.Value = append(x.Value, value) + } else { + message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value) + errors = append(errors, compiler.NewError(context, message)) + } + } + default: + message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. +func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExtension, error) { + errors := make([]error, 0) + x := &VendorExtension{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewXml creates an object of type Xml if possible, returning an error if not. +func NewXml(in interface{}, context *compiler.Context) (*Xml, error) { + errors := make([]error, 0) + x := &Xml{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string namespace = 2; + v2 := compiler.MapValueForKey(m, "namespace") + if v2 != nil { + x.Namespace, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for namespace: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string prefix = 3; + v3 := compiler.MapValueForKey(m, "prefix") + if v3 != nil { + x.Prefix, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for prefix: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool attribute = 4; + v4 := compiler.MapValueForKey(m, "attribute") + if v4 != nil { + x.Attribute, ok = v4.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for attribute: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool wrapped = 5; + v5 := compiler.MapValueForKey(m, "wrapped") + if v5 != nil { + x.Wrapped, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for wrapped: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. +func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Any objects. +func (m *Any) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ApiKeySecurity objects. +func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. +func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BodyParameter objects. +func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Contact objects. +func (m *Contact) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Default objects. +func (m *Default) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Definitions objects. +func (m *Definitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Document objects. +func (m *Document) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Info != nil { + _, err := m.Info.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Paths != nil { + _, err := m.Paths.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Definitions != nil { + _, err := m.Definitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Parameters != nil { + _, err := m.Parameters.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.SecurityDefinitions != nil { + _, err := m.SecurityDefinitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Tags { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Examples objects. +func (m *Examples) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ExternalDocs objects. +func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FileSchema objects. +func (m *FileSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FormDataParameterSubSchema objects. +func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Header objects. +func (m *Header) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside HeaderParameterSubSchema objects. +func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Headers objects. +func (m *Headers) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Info objects. +func (m *Info) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Contact != nil { + _, err := m.Contact.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.License != nil { + _, err := m.License.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ItemsItem objects. +func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.Schema { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside JsonReference objects. +func (m *JsonReference) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewJsonReference(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside License objects. +func (m *License) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedAny objects. +func (m *NamedAny) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedHeader objects. +func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedParameter objects. +func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedPathItem objects. +func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponse objects. +func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponseValue objects. +func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSchema objects. +func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. +func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedString objects. +func (m *NamedString) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedStringArray objects. +func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NonBodyParameter objects. +func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) + if ok { + _, err := p.HeaderParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema) + if ok { + _, err := p.FormDataParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema) + if ok { + _, err := p.QueryParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema) + if ok { + _, err := p.PathParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. +func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. +func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. +func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. +func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2Scopes objects. +func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Operation objects. +func (m *Operation) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Parameter objects. +func (m *Parameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*Parameter_BodyParameter) + if ok { + _, err := p.BodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*Parameter_NonBodyParameter) + if ok { + _, err := p.NonBodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParameterDefinitions objects. +func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParametersItem objects. +func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ParametersItem_Parameter) + if ok { + _, err := p.Parameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ParametersItem_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewParametersItem(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathItem objects. +func (m *PathItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewPathItem(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Get != nil { + _, err := m.Get.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Put != nil { + _, err := m.Put.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Post != nil { + _, err := m.Post.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Delete != nil { + _, err := m.Delete.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Options != nil { + _, err := m.Options.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Head != nil { + _, err := m.Head.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Patch != nil { + _, err := m.Patch.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathParameterSubSchema objects. +func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Paths objects. +func (m *Paths) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.Path { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PrimitivesItems objects. +func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Properties objects. +func (m *Properties) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside QueryParameterSubSchema objects. +func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Response objects. +func (m *Response) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Headers != nil { + _, err := m.Headers.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseDefinitions objects. +func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseValue objects. +func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ResponseValue_Response) + if ok { + _, err := p.Response.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ResponseValue_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewResponseValue(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Responses objects. +func (m *Responses) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.ResponseCode { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Schema objects. +func (m *Schema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewSchema(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.AdditionalProperties != nil { + _, err := m.AdditionalProperties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Type != nil { + _, err := m.Type.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.AllOf { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Properties != nil { + _, err := m.Properties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Xml != nil { + _, err := m.Xml.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SchemaItem objects. +func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SchemaItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SchemaItem_FileSchema) + if ok { + _, err := p.FileSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitions objects. +func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitionsItem objects. +func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) + if ok { + _, err := p.BasicAuthenticationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity) + if ok { + _, err := p.ApiKeySecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity) + if ok { + _, err := p.Oauth2ImplicitSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity) + if ok { + _, err := p.Oauth2PasswordSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity) + if ok { + _, err := p.Oauth2ApplicationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) + if ok { + _, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityRequirement objects. +func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside StringArray objects. +func (m *StringArray) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Tag objects. +func (m *Tag) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside TypeItem objects. +func (m *TypeItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside VendorExtension objects. +func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Xml objects. +func (m *Xml) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. +func (m *AdditionalPropertiesItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // AdditionalPropertiesItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return v1.Boolean + } + return nil +} + +// ToRawInfo returns a description of Any suitable for JSON or YAML export. +func (m *Any) ToRawInfo() interface{} { + var err error + var info1 []yaml.MapSlice + err = yaml.Unmarshal([]byte(m.Yaml), &info1) + if err == nil { + return info1 + } + var info2 yaml.MapSlice + err = yaml.Unmarshal([]byte(m.Yaml), &info2) + if err == nil { + return info2 + } + var info3 interface{} + err = yaml.Unmarshal([]byte(m.Yaml), &info3) + if err == nil { + return info3 + } + return nil +} + +// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. +func (m *ApiKeySecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. +func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. +func (m *BodyParameter) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.Schema != nil { + info = append(info, yaml.MapItem{"schema", m.Schema.ToRawInfo()}) + } + // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Contact suitable for JSON or YAML export. +func (m *Contact) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Url != "" { + info = append(info, yaml.MapItem{"url", m.Url}) + } + if m.Email != "" { + info = append(info, yaml.MapItem{"email", m.Email}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Default suitable for JSON or YAML export. +func (m *Default) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:} + return info +} + +// ToRawInfo returns a description of Definitions suitable for JSON or YAML export. +func (m *Definitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Document suitable for JSON or YAML export. +func (m *Document) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Swagger != "" { + info = append(info, yaml.MapItem{"swagger", m.Swagger}) + } + if m.Info != nil { + info = append(info, yaml.MapItem{"info", m.Info.ToRawInfo()}) + } + // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Host != "" { + info = append(info, yaml.MapItem{"host", m.Host}) + } + if m.BasePath != "" { + info = append(info, yaml.MapItem{"basePath", m.BasePath}) + } + if len(m.Schemes) != 0 { + info = append(info, yaml.MapItem{"schemes", m.Schemes}) + } + if len(m.Consumes) != 0 { + info = append(info, yaml.MapItem{"consumes", m.Consumes}) + } + if len(m.Produces) != 0 { + info = append(info, yaml.MapItem{"produces", m.Produces}) + } + if m.Paths != nil { + info = append(info, yaml.MapItem{"paths", m.Paths.ToRawInfo()}) + } + // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Definitions != nil { + info = append(info, yaml.MapItem{"definitions", m.Definitions.ToRawInfo()}) + } + // &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Parameters != nil { + info = append(info, yaml.MapItem{"parameters", m.Parameters.ToRawInfo()}) + } + // &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Responses != nil { + info = append(info, yaml.MapItem{"responses", m.Responses.ToRawInfo()}) + } + // &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Security) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Security { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"security", items}) + } + // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.SecurityDefinitions != nil { + info = append(info, yaml.MapItem{"securityDefinitions", m.SecurityDefinitions.ToRawInfo()}) + } + // &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Tags) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Tags { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"tags", items}) + } + // &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Examples suitable for JSON or YAML export. +func (m *Examples) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. +func (m *ExternalDocs) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Url != "" { + info = append(info, yaml.MapItem{"url", m.Url}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. +func (m *FileSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Title != "" { + info = append(info, yaml.MapItem{"title", m.Title}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Required) != 0 { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.ReadOnly != false { + info = append(info, yaml.MapItem{"readOnly", m.ReadOnly}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Example != nil { + info = append(info, yaml.MapItem{"example", m.Example.ToRawInfo()}) + } + // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. +func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.AllowEmptyValue != false { + info = append(info, yaml.MapItem{"allowEmptyValue", m.AllowEmptyValue}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Header suitable for JSON or YAML export. +func (m *Header) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. +func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Headers suitable for JSON or YAML export. +func (m *Headers) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Info suitable for JSON or YAML export. +func (m *Info) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Title != "" { + info = append(info, yaml.MapItem{"title", m.Title}) + } + if m.Version != "" { + info = append(info, yaml.MapItem{"version", m.Version}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.TermsOfService != "" { + info = append(info, yaml.MapItem{"termsOfService", m.TermsOfService}) + } + if m.Contact != nil { + info = append(info, yaml.MapItem{"contact", m.Contact.ToRawInfo()}) + } + // &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.License != nil { + info = append(info, yaml.MapItem{"license", m.License.ToRawInfo()}) + } + // &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. +func (m *ItemsItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if len(m.Schema) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Schema { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"schema", items}) + } + // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + return info +} + +// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. +func (m *JsonReference) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.XRef != "" { + info = append(info, yaml.MapItem{"$ref", m.XRef}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + return info +} + +// ToRawInfo returns a description of License suitable for JSON or YAML export. +func (m *License) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Url != "" { + info = append(info, yaml.MapItem{"url", m.Url}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. +func (m *NamedAny) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. +func (m *NamedHeader) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. +func (m *NamedParameter) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. +func (m *NamedPathItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. +func (m *NamedResponse) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. +func (m *NamedResponseValue) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. +func (m *NamedSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. +func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. +func (m *NamedString) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Value != "" { + info = append(info, yaml.MapItem{"value", m.Value}) + } + return info +} + +// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. +func (m *NamedStringArray) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. +func (m *NonBodyParameter) ToRawInfo() interface{} { + // ONE OF WRAPPER + // NonBodyParameter + // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetHeaderParameterSubSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFormDataParameterSubSchema() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetQueryParameterSubSchema() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetPathParameterSubSchema() + if v3 != nil { + return v3.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. +func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{"flow", m.Flow}) + } + if m.Scopes != nil { + info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.AuthorizationUrl != "" { + info = append(info, yaml.MapItem{"authorizationUrl", m.AuthorizationUrl}) + } + if m.TokenUrl != "" { + info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. +func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{"flow", m.Flow}) + } + if m.Scopes != nil { + info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.TokenUrl != "" { + info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. +func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{"flow", m.Flow}) + } + if m.Scopes != nil { + info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.AuthorizationUrl != "" { + info = append(info, yaml.MapItem{"authorizationUrl", m.AuthorizationUrl}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. +func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{"flow", m.Flow}) + } + if m.Scopes != nil { + info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.TokenUrl != "" { + info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. +func (m *Oauth2Scopes) ToRawInfo() interface{} { + info := yaml.MapSlice{} + // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Operation suitable for JSON or YAML export. +func (m *Operation) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if len(m.Tags) != 0 { + info = append(info, yaml.MapItem{"tags", m.Tags}) + } + if m.Summary != "" { + info = append(info, yaml.MapItem{"summary", m.Summary}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.OperationId != "" { + info = append(info, yaml.MapItem{"operationId", m.OperationId}) + } + if len(m.Produces) != 0 { + info = append(info, yaml.MapItem{"produces", m.Produces}) + } + if len(m.Consumes) != 0 { + info = append(info, yaml.MapItem{"consumes", m.Consumes}) + } + if len(m.Parameters) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Parameters { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"parameters", items}) + } + // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} + if m.Responses != nil { + info = append(info, yaml.MapItem{"responses", m.Responses.ToRawInfo()}) + } + // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Schemes) != 0 { + info = append(info, yaml.MapItem{"schemes", m.Schemes}) + } + if m.Deprecated != false { + info = append(info, yaml.MapItem{"deprecated", m.Deprecated}) + } + if len(m.Security) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Security { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"security", items}) + } + // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. +func (m *Parameter) ToRawInfo() interface{} { + // ONE OF WRAPPER + // Parameter + // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBodyParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetNonBodyParameter() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. +func (m *ParameterDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. +func (m *ParametersItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // ParametersItem + // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. +func (m *PathItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.XRef != "" { + info = append(info, yaml.MapItem{"$ref", m.XRef}) + } + if m.Get != nil { + info = append(info, yaml.MapItem{"get", m.Get.ToRawInfo()}) + } + // &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Put != nil { + info = append(info, yaml.MapItem{"put", m.Put.ToRawInfo()}) + } + // &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Post != nil { + info = append(info, yaml.MapItem{"post", m.Post.ToRawInfo()}) + } + // &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Delete != nil { + info = append(info, yaml.MapItem{"delete", m.Delete.ToRawInfo()}) + } + // &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Options != nil { + info = append(info, yaml.MapItem{"options", m.Options.ToRawInfo()}) + } + // &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Head != nil { + info = append(info, yaml.MapItem{"head", m.Head.ToRawInfo()}) + } + // &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Patch != nil { + info = append(info, yaml.MapItem{"patch", m.Patch.ToRawInfo()}) + } + // &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Parameters) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Parameters { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"parameters", items}) + } + // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. +func (m *PathParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Paths suitable for JSON or YAML export. +func (m *Paths) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + if m.Path != nil { + for _, item := range m.Path { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. +func (m *PrimitivesItems) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Properties suitable for JSON or YAML export. +func (m *Properties) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. +func (m *QueryParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.AllowEmptyValue != false { + info = append(info, yaml.MapItem{"allowEmptyValue", m.AllowEmptyValue}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Response suitable for JSON or YAML export. +func (m *Response) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Schema != nil { + info = append(info, yaml.MapItem{"schema", m.Schema.ToRawInfo()}) + } + // &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Headers != nil { + info = append(info, yaml.MapItem{"headers", m.Headers.ToRawInfo()}) + } + // &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Examples != nil { + info = append(info, yaml.MapItem{"examples", m.Examples.ToRawInfo()}) + } + // &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. +func (m *ResponseDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. +func (m *ResponseValue) ToRawInfo() interface{} { + // ONE OF WRAPPER + // ResponseValue + // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetResponse() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of Responses suitable for JSON or YAML export. +func (m *Responses) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.ResponseCode != nil { + for _, item := range m.ResponseCode { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Schema suitable for JSON or YAML export. +func (m *Schema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.XRef != "" { + info = append(info, yaml.MapItem{"$ref", m.XRef}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Title != "" { + info = append(info, yaml.MapItem{"title", m.Title}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if m.MaxProperties != 0 { + info = append(info, yaml.MapItem{"maxProperties", m.MaxProperties}) + } + if m.MinProperties != 0 { + info = append(info, yaml.MapItem{"minProperties", m.MinProperties}) + } + if len(m.Required) != 0 { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.AdditionalProperties != nil { + info = append(info, yaml.MapItem{"additionalProperties", m.AdditionalProperties.ToRawInfo()}) + } + // &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Type != nil { + if len(m.Type.Value) == 1 { + info = append(info, yaml.MapItem{"type", m.Type.Value[0]}) + } else { + info = append(info, yaml.MapItem{"type", m.Type.Value}) + } + } + // &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Items != nil { + items := make([]interface{}, 0) + for _, item := range m.Items.Schema { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"items", items[0]}) + } + // &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.AllOf) != 0 { + items := make([]interface{}, 0) + for _, item := range m.AllOf { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"allOf", items}) + } + // &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.Properties != nil { + info = append(info, yaml.MapItem{"properties", m.Properties.ToRawInfo()}) + } + // &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Discriminator != "" { + info = append(info, yaml.MapItem{"discriminator", m.Discriminator}) + } + if m.ReadOnly != false { + info = append(info, yaml.MapItem{"readOnly", m.ReadOnly}) + } + if m.Xml != nil { + info = append(info, yaml.MapItem{"xml", m.Xml.ToRawInfo()}) + } + // &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Example != nil { + info = append(info, yaml.MapItem{"example", m.Example.ToRawInfo()}) + } + // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. +func (m *SchemaItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // SchemaItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFileSchema() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. +func (m *SecurityDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. +func (m *SecurityDefinitionsItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // SecurityDefinitionsItem + // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBasicAuthenticationSecurity() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetApiKeySecurity() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetOauth2ImplicitSecurity() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetOauth2PasswordSecurity() + if v3 != nil { + return v3.ToRawInfo() + } + // {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v4 := m.GetOauth2ApplicationSecurity() + if v4 != nil { + return v4.ToRawInfo() + } + // {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v5 := m.GetOauth2AccessCodeSecurity() + if v5 != nil { + return v5.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. +func (m *SecurityRequirement) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. +func (m *StringArray) ToRawInfo() interface{} { + return m.Value +} + +// ToRawInfo returns a description of Tag suitable for JSON or YAML export. +func (m *Tag) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. +func (m *TypeItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if len(m.Value) != 0 { + info = append(info, yaml.MapItem{"value", m.Value}) + } + return info +} + +// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. +func (m *VendorExtension) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Xml suitable for JSON or YAML export. +func (m *Xml) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Namespace != "" { + info = append(info, yaml.MapItem{"namespace", m.Namespace}) + } + if m.Prefix != "" { + info = append(info, yaml.MapItem{"prefix", m.Prefix}) + } + if m.Attribute != false { + info = append(info, yaml.MapItem{"attribute", m.Attribute}) + } + if m.Wrapped != false { + info = append(info, yaml.MapItem{"wrapped", m.Wrapped}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +var ( + pattern0 = regexp.MustCompile("^x-") + pattern1 = regexp.MustCompile("^/") + pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$") +) diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go new file mode 100644 index 00000000..37da7df2 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go @@ -0,0 +1,4456 @@ +// Code generated by protoc-gen-go. +// source: OpenAPIv2/OpenAPIv2.proto +// DO NOT EDIT! + +/* +Package openapi_v2 is a generated protocol buffer package. + +It is generated from these files: + OpenAPIv2/OpenAPIv2.proto + +It has these top-level messages: + AdditionalPropertiesItem + Any + ApiKeySecurity + BasicAuthenticationSecurity + BodyParameter + Contact + Default + Definitions + Document + Examples + ExternalDocs + FileSchema + FormDataParameterSubSchema + Header + HeaderParameterSubSchema + Headers + Info + ItemsItem + JsonReference + License + NamedAny + NamedHeader + NamedParameter + NamedPathItem + NamedResponse + NamedResponseValue + NamedSchema + NamedSecurityDefinitionsItem + NamedString + NamedStringArray + NonBodyParameter + Oauth2AccessCodeSecurity + Oauth2ApplicationSecurity + Oauth2ImplicitSecurity + Oauth2PasswordSecurity + Oauth2Scopes + Operation + Parameter + ParameterDefinitions + ParametersItem + PathItem + PathParameterSubSchema + Paths + PrimitivesItems + Properties + QueryParameterSubSchema + Response + ResponseDefinitions + ResponseValue + Responses + Schema + SchemaItem + SecurityDefinitions + SecurityDefinitionsItem + SecurityRequirement + StringArray + Tag + TypeItem + VendorExtension + Xml +*/ +package openapi_v2 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type AdditionalPropertiesItem struct { + // Types that are valid to be assigned to Oneof: + // *AdditionalPropertiesItem_Schema + // *AdditionalPropertiesItem_Boolean + Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} } +func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) } +func (*AdditionalPropertiesItem) ProtoMessage() {} +func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isAdditionalPropertiesItem_Oneof interface { + isAdditionalPropertiesItem_Oneof() +} + +type AdditionalPropertiesItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` +} +type AdditionalPropertiesItem_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,oneof"` +} + +func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} +func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} + +func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *AdditionalPropertiesItem) GetSchema() *Schema { + if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Schema); ok { + return x.Schema + } + return nil +} + +func (m *AdditionalPropertiesItem) GetBoolean() bool { + if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return x.Boolean + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AdditionalPropertiesItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AdditionalPropertiesItem_OneofMarshaler, _AdditionalPropertiesItem_OneofUnmarshaler, _AdditionalPropertiesItem_OneofSizer, []interface{}{ + (*AdditionalPropertiesItem_Schema)(nil), + (*AdditionalPropertiesItem_Boolean)(nil), + } +} + +func _AdditionalPropertiesItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AdditionalPropertiesItem) + // oneof + switch x := m.Oneof.(type) { + case *AdditionalPropertiesItem_Schema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Schema); err != nil { + return err + } + case *AdditionalPropertiesItem_Boolean: + t := uint64(0) + if x.Boolean { + t = 1 + } + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("AdditionalPropertiesItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _AdditionalPropertiesItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AdditionalPropertiesItem) + switch tag { + case 1: // oneof.schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Schema) + err := b.DecodeMessage(msg) + m.Oneof = &AdditionalPropertiesItem_Schema{msg} + return true, err + case 2: // oneof.boolean + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Oneof = &AdditionalPropertiesItem_Boolean{x != 0} + return true, err + default: + return false, nil + } +} + +func _AdditionalPropertiesItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AdditionalPropertiesItem) + // oneof + switch x := m.Oneof.(type) { + case *AdditionalPropertiesItem_Schema: + s := proto.Size(x.Schema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AdditionalPropertiesItem_Boolean: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Any struct { + Value *google_protobuf.Any `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` + Yaml string `protobuf:"bytes,2,opt,name=yaml" json:"yaml,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Any) GetValue() *google_protobuf.Any { + if m != nil { + return m.Value + } + return nil +} + +func (m *Any) GetYaml() string { + if m != nil { + return m.Yaml + } + return "" +} + +type ApiKeySecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} } +func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) } +func (*ApiKeySecurity) ProtoMessage() {} +func (*ApiKeySecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ApiKeySecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ApiKeySecurity) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ApiKeySecurity) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *ApiKeySecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type BasicAuthenticationSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} } +func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) } +func (*BasicAuthenticationSecurity) ProtoMessage() {} +func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *BasicAuthenticationSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *BasicAuthenticationSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type BodyParameter struct { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,4,opt,name=required" json:"required,omitempty"` + Schema *Schema `protobuf:"bytes,5,opt,name=schema" json:"schema,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *BodyParameter) Reset() { *m = BodyParameter{} } +func (m *BodyParameter) String() string { return proto.CompactTextString(m) } +func (*BodyParameter) ProtoMessage() {} +func (*BodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *BodyParameter) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *BodyParameter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BodyParameter) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *BodyParameter) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *BodyParameter) GetSchema() *Schema { + if m != nil { + return m.Schema + } + return nil +} + +func (m *BodyParameter) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Contact information for the owners of the API. +type Contact struct { + // The identifying name of the contact person/organization. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The URL pointing to the contact information. + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + // The email address of the contact person/organization. + Email string `protobuf:"bytes,3,opt,name=email" json:"email,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Contact) Reset() { *m = Contact{} } +func (m *Contact) String() string { return proto.CompactTextString(m) } +func (*Contact) ProtoMessage() {} +func (*Contact) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *Contact) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Contact) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *Contact) GetEmail() string { + if m != nil { + return m.Email + } + return "" +} + +func (m *Contact) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Default struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Default) Reset() { *m = Default{} } +func (m *Default) String() string { return proto.CompactTextString(m) } +func (*Default) ProtoMessage() {} +func (*Default) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *Default) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +type Definitions struct { + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Definitions) Reset() { *m = Definitions{} } +func (m *Definitions) String() string { return proto.CompactTextString(m) } +func (*Definitions) ProtoMessage() {} +func (*Definitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *Definitions) GetAdditionalProperties() []*NamedSchema { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Document struct { + // The Swagger version of this document. + Swagger string `protobuf:"bytes,1,opt,name=swagger" json:"swagger,omitempty"` + Info *Info `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + // The host (name or ip) of the API. Example: 'swagger.io' + Host string `protobuf:"bytes,3,opt,name=host" json:"host,omitempty"` + // The base path to the API. Example: '/api'. + BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath" json:"base_path,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,5,rep,name=schemes" json:"schemes,omitempty"` + // A list of MIME types accepted by the API. + Consumes []string `protobuf:"bytes,6,rep,name=consumes" json:"consumes,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,7,rep,name=produces" json:"produces,omitempty"` + Paths *Paths `protobuf:"bytes,8,opt,name=paths" json:"paths,omitempty"` + Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions" json:"definitions,omitempty"` + Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters" json:"parameters,omitempty"` + Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses" json:"responses,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` + SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions" json:"security_definitions,omitempty"` + Tags []*Tag `protobuf:"bytes,14,rep,name=tags" json:"tags,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *Document) GetSwagger() string { + if m != nil { + return m.Swagger + } + return "" +} + +func (m *Document) GetInfo() *Info { + if m != nil { + return m.Info + } + return nil +} + +func (m *Document) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *Document) GetBasePath() string { + if m != nil { + return m.BasePath + } + return "" +} + +func (m *Document) GetSchemes() []string { + if m != nil { + return m.Schemes + } + return nil +} + +func (m *Document) GetConsumes() []string { + if m != nil { + return m.Consumes + } + return nil +} + +func (m *Document) GetProduces() []string { + if m != nil { + return m.Produces + } + return nil +} + +func (m *Document) GetPaths() *Paths { + if m != nil { + return m.Paths + } + return nil +} + +func (m *Document) GetDefinitions() *Definitions { + if m != nil { + return m.Definitions + } + return nil +} + +func (m *Document) GetParameters() *ParameterDefinitions { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *Document) GetResponses() *ResponseDefinitions { + if m != nil { + return m.Responses + } + return nil +} + +func (m *Document) GetSecurity() []*SecurityRequirement { + if m != nil { + return m.Security + } + return nil +} + +func (m *Document) GetSecurityDefinitions() *SecurityDefinitions { + if m != nil { + return m.SecurityDefinitions + } + return nil +} + +func (m *Document) GetTags() []*Tag { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Document) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Document) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Examples struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Examples) Reset() { *m = Examples{} } +func (m *Examples) String() string { return proto.CompactTextString(m) } +func (*Examples) ProtoMessage() {} +func (*Examples) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *Examples) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// information about external documentation +type ExternalDocs struct { + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *ExternalDocs) Reset() { *m = ExternalDocs{} } +func (m *ExternalDocs) String() string { return proto.CompactTextString(m) } +func (*ExternalDocs) ProtoMessage() {} +func (*ExternalDocs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *ExternalDocs) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ExternalDocs) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *ExternalDocs) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type FileSchema struct { + Format string `protobuf:"bytes,1,opt,name=format" json:"format,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + Default *Any `protobuf:"bytes,4,opt,name=default" json:"default,omitempty"` + Required []string `protobuf:"bytes,5,rep,name=required" json:"required,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,9,opt,name=example" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *FileSchema) Reset() { *m = FileSchema{} } +func (m *FileSchema) String() string { return proto.CompactTextString(m) } +func (*FileSchema) ProtoMessage() {} +func (*FileSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *FileSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *FileSchema) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *FileSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *FileSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *FileSchema) GetRequired() []string { + if m != nil { + return m.Required + } + return nil +} + +func (m *FileSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *FileSchema) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +func (m *FileSchema) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *FileSchema) GetExample() *Any { + if m != nil { + return m.Example + } + return nil +} + +func (m *FileSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type FormDataParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} } +func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*FormDataParameterSubSchema) ProtoMessage() {} +func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *FormDataParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *FormDataParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *FormDataParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *FormDataParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *FormDataParameterSubSchema) GetAllowEmptyValue() bool { + if m != nil { + return m.AllowEmptyValue + } + return false +} + +func (m *FormDataParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *FormDataParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *FormDataParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *FormDataParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *FormDataParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *FormDataParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *FormDataParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *FormDataParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *FormDataParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *FormDataParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *FormDataParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Header struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + Description string `protobuf:"bytes,18,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *Header) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Header) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *Header) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *Header) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *Header) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *Header) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *Header) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *Header) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *Header) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *Header) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *Header) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *Header) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *Header) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *Header) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Header) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *Header) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *Header) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *Header) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Header) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type HeaderParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} } +func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*HeaderParameterSubSchema) ProtoMessage() {} +func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *HeaderParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *HeaderParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *HeaderParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *HeaderParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *HeaderParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *HeaderParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *HeaderParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *HeaderParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *HeaderParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *HeaderParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *HeaderParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *HeaderParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *HeaderParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *HeaderParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *HeaderParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Headers struct { + AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Headers) Reset() { *m = Headers{} } +func (m *Headers) String() string { return proto.CompactTextString(m) } +func (*Headers) ProtoMessage() {} +func (*Headers) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *Headers) GetAdditionalProperties() []*NamedHeader { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// General information about the API. +type Info struct { + // A unique and precise title of the API. + Title string `protobuf:"bytes,1,opt,name=title" json:"title,omitempty"` + // A semantic version number of the API. + Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The terms of service for the API. + TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService" json:"terms_of_service,omitempty"` + Contact *Contact `protobuf:"bytes,5,opt,name=contact" json:"contact,omitempty"` + License *License `protobuf:"bytes,6,opt,name=license" json:"license,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Info) Reset() { *m = Info{} } +func (m *Info) String() string { return proto.CompactTextString(m) } +func (*Info) ProtoMessage() {} +func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *Info) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Info) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Info) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Info) GetTermsOfService() string { + if m != nil { + return m.TermsOfService + } + return "" +} + +func (m *Info) GetContact() *Contact { + if m != nil { + return m.Contact + } + return nil +} + +func (m *Info) GetLicense() *License { + if m != nil { + return m.License + } + return nil +} + +func (m *Info) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type ItemsItem struct { + Schema []*Schema `protobuf:"bytes,1,rep,name=schema" json:"schema,omitempty"` +} + +func (m *ItemsItem) Reset() { *m = ItemsItem{} } +func (m *ItemsItem) String() string { return proto.CompactTextString(m) } +func (*ItemsItem) ProtoMessage() {} +func (*ItemsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *ItemsItem) GetSchema() []*Schema { + if m != nil { + return m.Schema + } + return nil +} + +type JsonReference struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` +} + +func (m *JsonReference) Reset() { *m = JsonReference{} } +func (m *JsonReference) String() string { return proto.CompactTextString(m) } +func (*JsonReference) ProtoMessage() {} +func (*JsonReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *JsonReference) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *JsonReference) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +type License struct { + // The name of the license type. It's encouraged to use an OSI compatible license. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The URL pointing to the license. + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *License) Reset() { *m = License{} } +func (m *License) String() string { return proto.CompactTextString(m) } +func (*License) ProtoMessage() {} +func (*License) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *License) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *License) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *License) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +type NamedAny struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedAny) Reset() { *m = NamedAny{} } +func (m *NamedAny) String() string { return proto.CompactTextString(m) } +func (*NamedAny) ProtoMessage() {} +func (*NamedAny) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *NamedAny) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedAny) GetValue() *Any { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +type NamedHeader struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Header `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedHeader) Reset() { *m = NamedHeader{} } +func (m *NamedHeader) String() string { return proto.CompactTextString(m) } +func (*NamedHeader) ProtoMessage() {} +func (*NamedHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *NamedHeader) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedHeader) GetValue() *Header { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +type NamedParameter struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Parameter `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedParameter) Reset() { *m = NamedParameter{} } +func (m *NamedParameter) String() string { return proto.CompactTextString(m) } +func (*NamedParameter) ProtoMessage() {} +func (*NamedParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *NamedParameter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedParameter) GetValue() *Parameter { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +type NamedPathItem struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *PathItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedPathItem) Reset() { *m = NamedPathItem{} } +func (m *NamedPathItem) String() string { return proto.CompactTextString(m) } +func (*NamedPathItem) ProtoMessage() {} +func (*NamedPathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *NamedPathItem) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedPathItem) GetValue() *PathItem { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +type NamedResponse struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Response `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedResponse) Reset() { *m = NamedResponse{} } +func (m *NamedResponse) String() string { return proto.CompactTextString(m) } +func (*NamedResponse) ProtoMessage() {} +func (*NamedResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *NamedResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedResponse) GetValue() *Response { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +type NamedResponseValue struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *ResponseValue `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} } +func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) } +func (*NamedResponseValue) ProtoMessage() {} +func (*NamedResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *NamedResponseValue) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedResponseValue) GetValue() *ResponseValue { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +type NamedSchema struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Schema `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedSchema) Reset() { *m = NamedSchema{} } +func (m *NamedSchema) String() string { return proto.CompactTextString(m) } +func (*NamedSchema) ProtoMessage() {} +func (*NamedSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *NamedSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedSchema) GetValue() *Schema { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +type NamedSecurityDefinitionsItem struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} } +func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*NamedSecurityDefinitionsItem) ProtoMessage() {} +func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *NamedSecurityDefinitionsItem) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +type NamedString struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedString) Reset() { *m = NamedString{} } +func (m *NamedString) String() string { return proto.CompactTextString(m) } +func (*NamedString) ProtoMessage() {} +func (*NamedString) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *NamedString) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedString) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +type NamedStringArray struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *StringArray `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedStringArray) Reset() { *m = NamedStringArray{} } +func (m *NamedStringArray) String() string { return proto.CompactTextString(m) } +func (*NamedStringArray) ProtoMessage() {} +func (*NamedStringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *NamedStringArray) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedStringArray) GetValue() *StringArray { + if m != nil { + return m.Value + } + return nil +} + +type NonBodyParameter struct { + // Types that are valid to be assigned to Oneof: + // *NonBodyParameter_HeaderParameterSubSchema + // *NonBodyParameter_FormDataParameterSubSchema + // *NonBodyParameter_QueryParameterSubSchema + // *NonBodyParameter_PathParameterSubSchema + Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} } +func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) } +func (*NonBodyParameter) ProtoMessage() {} +func (*NonBodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +type isNonBodyParameter_Oneof interface { + isNonBodyParameter_Oneof() +} + +type NonBodyParameter_HeaderParameterSubSchema struct { + HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,oneof"` +} +type NonBodyParameter_FormDataParameterSubSchema struct { + FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,oneof"` +} +type NonBodyParameter_QueryParameterSubSchema struct { + QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,oneof"` +} +type NonBodyParameter_PathParameterSubSchema struct { + PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,oneof"` +} + +func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { + return x.HeaderParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { + return x.FormDataParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { + return x.QueryParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { + return x.PathParameterSubSchema + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NonBodyParameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NonBodyParameter_OneofMarshaler, _NonBodyParameter_OneofUnmarshaler, _NonBodyParameter_OneofSizer, []interface{}{ + (*NonBodyParameter_HeaderParameterSubSchema)(nil), + (*NonBodyParameter_FormDataParameterSubSchema)(nil), + (*NonBodyParameter_QueryParameterSubSchema)(nil), + (*NonBodyParameter_PathParameterSubSchema)(nil), + } +} + +func _NonBodyParameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NonBodyParameter) + // oneof + switch x := m.Oneof.(type) { + case *NonBodyParameter_HeaderParameterSubSchema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HeaderParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_FormDataParameterSubSchema: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FormDataParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_QueryParameterSubSchema: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.QueryParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_PathParameterSubSchema: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PathParameterSubSchema); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("NonBodyParameter.Oneof has unexpected type %T", x) + } + return nil +} + +func _NonBodyParameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NonBodyParameter) + switch tag { + case 1: // oneof.header_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(HeaderParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_HeaderParameterSubSchema{msg} + return true, err + case 2: // oneof.form_data_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FormDataParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_FormDataParameterSubSchema{msg} + return true, err + case 3: // oneof.query_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(QueryParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_QueryParameterSubSchema{msg} + return true, err + case 4: // oneof.path_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PathParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_PathParameterSubSchema{msg} + return true, err + default: + return false, nil + } +} + +func _NonBodyParameter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NonBodyParameter) + // oneof + switch x := m.Oneof.(type) { + case *NonBodyParameter_HeaderParameterSubSchema: + s := proto.Size(x.HeaderParameterSubSchema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_FormDataParameterSubSchema: + s := proto.Size(x.FormDataParameterSubSchema) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_QueryParameterSubSchema: + s := proto.Size(x.QueryParameterSubSchema) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_PathParameterSubSchema: + s := proto.Size(x.PathParameterSubSchema) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Oauth2AccessCodeSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` + TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,6,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} } +func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2AccessCodeSecurity) ProtoMessage() {} +func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *Oauth2AccessCodeSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { + if m != nil { + return m.AuthorizationUrl + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2ApplicationSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} } +func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ApplicationSecurity) ProtoMessage() {} +func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +func (m *Oauth2ApplicationSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2ApplicationSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2ImplicitSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} } +func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ImplicitSecurity) ProtoMessage() {} +func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *Oauth2ImplicitSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { + if m != nil { + return m.AuthorizationUrl + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2PasswordSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} } +func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2PasswordSecurity) ProtoMessage() {} +func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +func (m *Oauth2PasswordSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2PasswordSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2Scopes struct { + AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} } +func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) } +func (*Oauth2Scopes) ProtoMessage() {} +func (*Oauth2Scopes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Operation struct { + Tags []string `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` + // A brief summary of the operation. + Summary string `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` + // A longer description of the operation, GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + // A unique identifier of the operation. + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,6,rep,name=produces" json:"produces,omitempty"` + // A list of MIME types the API can consume. + Consumes []string `protobuf:"bytes,7,rep,name=consumes" json:"consumes,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters" json:"parameters,omitempty"` + Responses *Responses `protobuf:"bytes,9,opt,name=responses" json:"responses,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,10,rep,name=schemes" json:"schemes,omitempty"` + Deprecated bool `protobuf:"varint,11,opt,name=deprecated" json:"deprecated,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +func (m *Operation) GetTags() []string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Operation) GetSummary() string { + if m != nil { + return m.Summary + } + return "" +} + +func (m *Operation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Operation) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Operation) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +func (m *Operation) GetProduces() []string { + if m != nil { + return m.Produces + } + return nil +} + +func (m *Operation) GetConsumes() []string { + if m != nil { + return m.Consumes + } + return nil +} + +func (m *Operation) GetParameters() []*ParametersItem { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *Operation) GetResponses() *Responses { + if m != nil { + return m.Responses + } + return nil +} + +func (m *Operation) GetSchemes() []string { + if m != nil { + return m.Schemes + } + return nil +} + +func (m *Operation) GetDeprecated() bool { + if m != nil { + return m.Deprecated + } + return false +} + +func (m *Operation) GetSecurity() []*SecurityRequirement { + if m != nil { + return m.Security + } + return nil +} + +func (m *Operation) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Parameter struct { + // Types that are valid to be assigned to Oneof: + // *Parameter_BodyParameter + // *Parameter_NonBodyParameter + Oneof isParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (m *Parameter) Reset() { *m = Parameter{} } +func (m *Parameter) String() string { return proto.CompactTextString(m) } +func (*Parameter) ProtoMessage() {} +func (*Parameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +type isParameter_Oneof interface { + isParameter_Oneof() +} + +type Parameter_BodyParameter struct { + BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,oneof"` +} +type Parameter_NonBodyParameter struct { + NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,oneof"` +} + +func (*Parameter_BodyParameter) isParameter_Oneof() {} +func (*Parameter_NonBodyParameter) isParameter_Oneof() {} + +func (m *Parameter) GetOneof() isParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *Parameter) GetBodyParameter() *BodyParameter { + if x, ok := m.GetOneof().(*Parameter_BodyParameter); ok { + return x.BodyParameter + } + return nil +} + +func (m *Parameter) GetNonBodyParameter() *NonBodyParameter { + if x, ok := m.GetOneof().(*Parameter_NonBodyParameter); ok { + return x.NonBodyParameter + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Parameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Parameter_OneofMarshaler, _Parameter_OneofUnmarshaler, _Parameter_OneofSizer, []interface{}{ + (*Parameter_BodyParameter)(nil), + (*Parameter_NonBodyParameter)(nil), + } +} + +func _Parameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Parameter) + // oneof + switch x := m.Oneof.(type) { + case *Parameter_BodyParameter: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BodyParameter); err != nil { + return err + } + case *Parameter_NonBodyParameter: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NonBodyParameter); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Parameter.Oneof has unexpected type %T", x) + } + return nil +} + +func _Parameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Parameter) + switch tag { + case 1: // oneof.body_parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BodyParameter) + err := b.DecodeMessage(msg) + m.Oneof = &Parameter_BodyParameter{msg} + return true, err + case 2: // oneof.non_body_parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NonBodyParameter) + err := b.DecodeMessage(msg) + m.Oneof = &Parameter_NonBodyParameter{msg} + return true, err + default: + return false, nil + } +} + +func _Parameter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Parameter) + // oneof + switch x := m.Oneof.(type) { + case *Parameter_BodyParameter: + s := proto.Size(x.BodyParameter) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Parameter_NonBodyParameter: + s := proto.Size(x.NonBodyParameter) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// One or more JSON representations for parameters +type ParameterDefinitions struct { + AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} } +func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) } +func (*ParameterDefinitions) ProtoMessage() {} +func (*ParameterDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } + +func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type ParametersItem struct { + // Types that are valid to be assigned to Oneof: + // *ParametersItem_Parameter + // *ParametersItem_JsonReference + Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *ParametersItem) Reset() { *m = ParametersItem{} } +func (m *ParametersItem) String() string { return proto.CompactTextString(m) } +func (*ParametersItem) ProtoMessage() {} +func (*ParametersItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + +type isParametersItem_Oneof interface { + isParametersItem_Oneof() +} + +type ParametersItem_Parameter struct { + Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,oneof"` +} +type ParametersItem_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` +} + +func (*ParametersItem_Parameter) isParametersItem_Oneof() {} +func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} + +func (m *ParametersItem) GetOneof() isParametersItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *ParametersItem) GetParameter() *Parameter { + if x, ok := m.GetOneof().(*ParametersItem_Parameter); ok { + return x.Parameter + } + return nil +} + +func (m *ParametersItem) GetJsonReference() *JsonReference { + if x, ok := m.GetOneof().(*ParametersItem_JsonReference); ok { + return x.JsonReference + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ParametersItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ParametersItem_OneofMarshaler, _ParametersItem_OneofUnmarshaler, _ParametersItem_OneofSizer, []interface{}{ + (*ParametersItem_Parameter)(nil), + (*ParametersItem_JsonReference)(nil), + } +} + +func _ParametersItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ParametersItem) + // oneof + switch x := m.Oneof.(type) { + case *ParametersItem_Parameter: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Parameter); err != nil { + return err + } + case *ParametersItem_JsonReference: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JsonReference); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ParametersItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _ParametersItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ParametersItem) + switch tag { + case 1: // oneof.parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Parameter) + err := b.DecodeMessage(msg) + m.Oneof = &ParametersItem_Parameter{msg} + return true, err + case 2: // oneof.json_reference + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JsonReference) + err := b.DecodeMessage(msg) + m.Oneof = &ParametersItem_JsonReference{msg} + return true, err + default: + return false, nil + } +} + +func _ParametersItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ParametersItem) + // oneof + switch x := m.Oneof.(type) { + case *ParametersItem_Parameter: + s := proto.Size(x.Parameter) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ParametersItem_JsonReference: + s := proto.Size(x.JsonReference) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type PathItem struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` + Get *Operation `protobuf:"bytes,2,opt,name=get" json:"get,omitempty"` + Put *Operation `protobuf:"bytes,3,opt,name=put" json:"put,omitempty"` + Post *Operation `protobuf:"bytes,4,opt,name=post" json:"post,omitempty"` + Delete *Operation `protobuf:"bytes,5,opt,name=delete" json:"delete,omitempty"` + Options *Operation `protobuf:"bytes,6,opt,name=options" json:"options,omitempty"` + Head *Operation `protobuf:"bytes,7,opt,name=head" json:"head,omitempty"` + Patch *Operation `protobuf:"bytes,8,opt,name=patch" json:"patch,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters" json:"parameters,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PathItem) Reset() { *m = PathItem{} } +func (m *PathItem) String() string { return proto.CompactTextString(m) } +func (*PathItem) ProtoMessage() {} +func (*PathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } + +func (m *PathItem) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *PathItem) GetGet() *Operation { + if m != nil { + return m.Get + } + return nil +} + +func (m *PathItem) GetPut() *Operation { + if m != nil { + return m.Put + } + return nil +} + +func (m *PathItem) GetPost() *Operation { + if m != nil { + return m.Post + } + return nil +} + +func (m *PathItem) GetDelete() *Operation { + if m != nil { + return m.Delete + } + return nil +} + +func (m *PathItem) GetOptions() *Operation { + if m != nil { + return m.Options + } + return nil +} + +func (m *PathItem) GetHead() *Operation { + if m != nil { + return m.Head + } + return nil +} + +func (m *PathItem) GetPatch() *Operation { + if m != nil { + return m.Patch + } + return nil +} + +func (m *PathItem) GetParameters() []*ParametersItem { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *PathItem) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type PathParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} } +func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*PathParameterSubSchema) ProtoMessage() {} +func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } + +func (m *PathParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *PathParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *PathParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PathParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PathParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PathParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *PathParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *PathParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *PathParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *PathParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *PathParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *PathParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *PathParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *PathParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *PathParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *PathParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *PathParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *PathParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *PathParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *PathParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *PathParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +type Paths struct { + VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path" json:"path,omitempty"` +} + +func (m *Paths) Reset() { *m = Paths{} } +func (m *Paths) String() string { return proto.CompactTextString(m) } +func (*Paths) ProtoMessage() {} +func (*Paths) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } + +func (m *Paths) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +func (m *Paths) GetPath() []*NamedPathItem { + if m != nil { + return m.Path + } + return nil +} + +type PrimitivesItems struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} } +func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) } +func (*PrimitivesItems) ProtoMessage() {} +func (*PrimitivesItems) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } + +func (m *PrimitivesItems) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PrimitivesItems) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *PrimitivesItems) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *PrimitivesItems) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *PrimitivesItems) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *PrimitivesItems) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *PrimitivesItems) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *PrimitivesItems) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *PrimitivesItems) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *PrimitivesItems) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *PrimitivesItems) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *PrimitivesItems) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *PrimitivesItems) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *PrimitivesItems) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *PrimitivesItems) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *PrimitivesItems) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *PrimitivesItems) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *PrimitivesItems) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Properties struct { + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Properties) Reset() { *m = Properties{} } +func (m *Properties) String() string { return proto.CompactTextString(m) } +func (*Properties) ProtoMessage() {} +func (*Properties) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } + +func (m *Properties) GetAdditionalProperties() []*NamedSchema { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type QueryParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} } +func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*QueryParameterSubSchema) ProtoMessage() {} +func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } + +func (m *QueryParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *QueryParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *QueryParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *QueryParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *QueryParameterSubSchema) GetAllowEmptyValue() bool { + if m != nil { + return m.AllowEmptyValue + } + return false +} + +func (m *QueryParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *QueryParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *QueryParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *QueryParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *QueryParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *QueryParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *QueryParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *QueryParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *QueryParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *QueryParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *QueryParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *QueryParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *QueryParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *QueryParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *QueryParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *QueryParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *QueryParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Response struct { + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema" json:"schema,omitempty"` + Headers *Headers `protobuf:"bytes,3,opt,name=headers" json:"headers,omitempty"` + Examples *Examples `protobuf:"bytes,4,opt,name=examples" json:"examples,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } + +func (m *Response) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Response) GetSchema() *SchemaItem { + if m != nil { + return m.Schema + } + return nil +} + +func (m *Response) GetHeaders() *Headers { + if m != nil { + return m.Headers + } + return nil +} + +func (m *Response) GetExamples() *Examples { + if m != nil { + return m.Examples + } + return nil +} + +func (m *Response) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// One or more JSON representations for parameters +type ResponseDefinitions struct { + AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} } +func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) } +func (*ResponseDefinitions) ProtoMessage() {} +func (*ResponseDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } + +func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type ResponseValue struct { + // Types that are valid to be assigned to Oneof: + // *ResponseValue_Response + // *ResponseValue_JsonReference + Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` +} + +func (m *ResponseValue) Reset() { *m = ResponseValue{} } +func (m *ResponseValue) String() string { return proto.CompactTextString(m) } +func (*ResponseValue) ProtoMessage() {} +func (*ResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } + +type isResponseValue_Oneof interface { + isResponseValue_Oneof() +} + +type ResponseValue_Response struct { + Response *Response `protobuf:"bytes,1,opt,name=response,oneof"` +} +type ResponseValue_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` +} + +func (*ResponseValue_Response) isResponseValue_Oneof() {} +func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} + +func (m *ResponseValue) GetOneof() isResponseValue_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *ResponseValue) GetResponse() *Response { + if x, ok := m.GetOneof().(*ResponseValue_Response); ok { + return x.Response + } + return nil +} + +func (m *ResponseValue) GetJsonReference() *JsonReference { + if x, ok := m.GetOneof().(*ResponseValue_JsonReference); ok { + return x.JsonReference + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ResponseValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ResponseValue_OneofMarshaler, _ResponseValue_OneofUnmarshaler, _ResponseValue_OneofSizer, []interface{}{ + (*ResponseValue_Response)(nil), + (*ResponseValue_JsonReference)(nil), + } +} + +func _ResponseValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ResponseValue) + // oneof + switch x := m.Oneof.(type) { + case *ResponseValue_Response: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Response); err != nil { + return err + } + case *ResponseValue_JsonReference: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JsonReference); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ResponseValue.Oneof has unexpected type %T", x) + } + return nil +} + +func _ResponseValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ResponseValue) + switch tag { + case 1: // oneof.response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Response) + err := b.DecodeMessage(msg) + m.Oneof = &ResponseValue_Response{msg} + return true, err + case 2: // oneof.json_reference + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JsonReference) + err := b.DecodeMessage(msg) + m.Oneof = &ResponseValue_JsonReference{msg} + return true, err + default: + return false, nil + } +} + +func _ResponseValue_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ResponseValue) + // oneof + switch x := m.Oneof.(type) { + case *ResponseValue_Response: + s := proto.Size(x.Response) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ResponseValue_JsonReference: + s := proto.Size(x.JsonReference) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Response objects names can either be any valid HTTP status code or 'default'. +type Responses struct { + ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode" json:"response_code,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Responses) Reset() { *m = Responses{} } +func (m *Responses) String() string { return proto.CompactTextString(m) } +func (*Responses) ProtoMessage() {} +func (*Responses) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } + +func (m *Responses) GetResponseCode() []*NamedResponseValue { + if m != nil { + return m.ResponseCode + } + return nil +} + +func (m *Responses) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type Schema struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Title string `protobuf:"bytes,3,opt,name=title" json:"title,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + Maximum float64 `protobuf:"fixed64,7,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,9,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,13,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties" json:"max_properties,omitempty"` + MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,19,rep,name=required" json:"required,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + Type *TypeItem `protobuf:"bytes,22,opt,name=type" json:"type,omitempty"` + Items *ItemsItem `protobuf:"bytes,23,opt,name=items" json:"items,omitempty"` + AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf" json:"all_of,omitempty"` + Properties *Properties `protobuf:"bytes,25,opt,name=properties" json:"properties,omitempty"` + Discriminator string `protobuf:"bytes,26,opt,name=discriminator" json:"discriminator,omitempty"` + ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` + Xml *Xml `protobuf:"bytes,28,opt,name=xml" json:"xml,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,30,opt,name=example" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Schema) Reset() { *m = Schema{} } +func (m *Schema) String() string { return proto.CompactTextString(m) } +func (*Schema) ProtoMessage() {} +func (*Schema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } + +func (m *Schema) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *Schema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *Schema) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Schema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Schema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *Schema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *Schema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *Schema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *Schema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *Schema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *Schema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *Schema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *Schema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *Schema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *Schema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Schema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *Schema) GetMaxProperties() int64 { + if m != nil { + return m.MaxProperties + } + return 0 +} + +func (m *Schema) GetMinProperties() int64 { + if m != nil { + return m.MinProperties + } + return 0 +} + +func (m *Schema) GetRequired() []string { + if m != nil { + return m.Required + } + return nil +} + +func (m *Schema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +func (m *Schema) GetType() *TypeItem { + if m != nil { + return m.Type + } + return nil +} + +func (m *Schema) GetItems() *ItemsItem { + if m != nil { + return m.Items + } + return nil +} + +func (m *Schema) GetAllOf() []*Schema { + if m != nil { + return m.AllOf + } + return nil +} + +func (m *Schema) GetProperties() *Properties { + if m != nil { + return m.Properties + } + return nil +} + +func (m *Schema) GetDiscriminator() string { + if m != nil { + return m.Discriminator + } + return "" +} + +func (m *Schema) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +func (m *Schema) GetXml() *Xml { + if m != nil { + return m.Xml + } + return nil +} + +func (m *Schema) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Schema) GetExample() *Any { + if m != nil { + return m.Example + } + return nil +} + +func (m *Schema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type SchemaItem struct { + // Types that are valid to be assigned to Oneof: + // *SchemaItem_Schema + // *SchemaItem_FileSchema + Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *SchemaItem) Reset() { *m = SchemaItem{} } +func (m *SchemaItem) String() string { return proto.CompactTextString(m) } +func (*SchemaItem) ProtoMessage() {} +func (*SchemaItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } + +type isSchemaItem_Oneof interface { + isSchemaItem_Oneof() +} + +type SchemaItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` +} +type SchemaItem_FileSchema struct { + FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,oneof"` +} + +func (*SchemaItem_Schema) isSchemaItem_Oneof() {} +func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} + +func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *SchemaItem) GetSchema() *Schema { + if x, ok := m.GetOneof().(*SchemaItem_Schema); ok { + return x.Schema + } + return nil +} + +func (m *SchemaItem) GetFileSchema() *FileSchema { + if x, ok := m.GetOneof().(*SchemaItem_FileSchema); ok { + return x.FileSchema + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SchemaItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SchemaItem_OneofMarshaler, _SchemaItem_OneofUnmarshaler, _SchemaItem_OneofSizer, []interface{}{ + (*SchemaItem_Schema)(nil), + (*SchemaItem_FileSchema)(nil), + } +} + +func _SchemaItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SchemaItem) + // oneof + switch x := m.Oneof.(type) { + case *SchemaItem_Schema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Schema); err != nil { + return err + } + case *SchemaItem_FileSchema: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FileSchema); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SchemaItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _SchemaItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SchemaItem) + switch tag { + case 1: // oneof.schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Schema) + err := b.DecodeMessage(msg) + m.Oneof = &SchemaItem_Schema{msg} + return true, err + case 2: // oneof.file_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileSchema) + err := b.DecodeMessage(msg) + m.Oneof = &SchemaItem_FileSchema{msg} + return true, err + default: + return false, nil + } +} + +func _SchemaItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SchemaItem) + // oneof + switch x := m.Oneof.(type) { + case *SchemaItem_Schema: + s := proto.Size(x.Schema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SchemaItem_FileSchema: + s := proto.Size(x.FileSchema) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type SecurityDefinitions struct { + AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} } +func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitions) ProtoMessage() {} +func (*SecurityDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } + +func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type SecurityDefinitionsItem struct { + // Types that are valid to be assigned to Oneof: + // *SecurityDefinitionsItem_BasicAuthenticationSecurity + // *SecurityDefinitionsItem_ApiKeySecurity + // *SecurityDefinitionsItem_Oauth2ImplicitSecurity + // *SecurityDefinitionsItem_Oauth2PasswordSecurity + // *SecurityDefinitionsItem_Oauth2ApplicationSecurity + // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity + Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} } +func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitionsItem) ProtoMessage() {} +func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } + +type isSecurityDefinitionsItem_Oneof interface { + isSecurityDefinitionsItem_Oneof() +} + +type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { + BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,oneof"` +} +type SecurityDefinitionsItem_ApiKeySecurity struct { + ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { + Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { + Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { + Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { + Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,oneof"` +} + +func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { + return x.BasicAuthenticationSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { + return x.ApiKeySecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { + return x.Oauth2ImplicitSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { + return x.Oauth2PasswordSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { + return x.Oauth2ApplicationSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { + return x.Oauth2AccessCodeSecurity + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SecurityDefinitionsItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SecurityDefinitionsItem_OneofMarshaler, _SecurityDefinitionsItem_OneofUnmarshaler, _SecurityDefinitionsItem_OneofSizer, []interface{}{ + (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), + (*SecurityDefinitionsItem_ApiKeySecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), + } +} + +func _SecurityDefinitionsItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SecurityDefinitionsItem) + // oneof + switch x := m.Oneof.(type) { + case *SecurityDefinitionsItem_BasicAuthenticationSecurity: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BasicAuthenticationSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_ApiKeySecurity: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ApiKeySecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2ImplicitSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2PasswordSecurity: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2PasswordSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2ApplicationSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2AccessCodeSecurity); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SecurityDefinitionsItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _SecurityDefinitionsItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SecurityDefinitionsItem) + switch tag { + case 1: // oneof.basic_authentication_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BasicAuthenticationSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{msg} + return true, err + case 2: // oneof.api_key_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ApiKeySecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{msg} + return true, err + case 3: // oneof.oauth2_implicit_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2ImplicitSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{msg} + return true, err + case 4: // oneof.oauth2_password_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2PasswordSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{msg} + return true, err + case 5: // oneof.oauth2_application_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2ApplicationSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{msg} + return true, err + case 6: // oneof.oauth2_access_code_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2AccessCodeSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{msg} + return true, err + default: + return false, nil + } +} + +func _SecurityDefinitionsItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SecurityDefinitionsItem) + // oneof + switch x := m.Oneof.(type) { + case *SecurityDefinitionsItem_BasicAuthenticationSecurity: + s := proto.Size(x.BasicAuthenticationSecurity) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_ApiKeySecurity: + s := proto.Size(x.ApiKeySecurity) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: + s := proto.Size(x.Oauth2ImplicitSecurity) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2PasswordSecurity: + s := proto.Size(x.Oauth2PasswordSecurity) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: + s := proto.Size(x.Oauth2ApplicationSecurity) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: + s := proto.Size(x.Oauth2AccessCodeSecurity) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type SecurityRequirement struct { + AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} } +func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) } +func (*SecurityRequirement) ProtoMessage() {} +func (*SecurityRequirement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } + +func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type StringArray struct { + Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` +} + +func (m *StringArray) Reset() { *m = StringArray{} } +func (m *StringArray) String() string { return proto.CompactTextString(m) } +func (*StringArray) ProtoMessage() {} +func (*StringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } + +func (m *StringArray) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +type Tag struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Tag) Reset() { *m = Tag{} } +func (m *Tag) String() string { return proto.CompactTextString(m) } +func (*Tag) ProtoMessage() {} +func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } + +func (m *Tag) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Tag) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Tag) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Tag) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type TypeItem struct { + Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` +} + +func (m *TypeItem) Reset() { *m = TypeItem{} } +func (m *TypeItem) String() string { return proto.CompactTextString(m) } +func (*TypeItem) ProtoMessage() {} +func (*TypeItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } + +func (m *TypeItem) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +// Any property starting with x- is valid. +type VendorExtension struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *VendorExtension) Reset() { *m = VendorExtension{} } +func (m *VendorExtension) String() string { return proto.CompactTextString(m) } +func (*VendorExtension) ProtoMessage() {} +func (*VendorExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } + +func (m *VendorExtension) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Xml struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix" json:"prefix,omitempty"` + Attribute bool `protobuf:"varint,4,opt,name=attribute" json:"attribute,omitempty"` + Wrapped bool `protobuf:"varint,5,opt,name=wrapped" json:"wrapped,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Xml) Reset() { *m = Xml{} } +func (m *Xml) String() string { return proto.CompactTextString(m) } +func (*Xml) ProtoMessage() {} +func (*Xml) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } + +func (m *Xml) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Xml) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *Xml) GetPrefix() string { + if m != nil { + return m.Prefix + } + return "" +} + +func (m *Xml) GetAttribute() bool { + if m != nil { + return m.Attribute + } + return false +} + +func (m *Xml) GetWrapped() bool { + if m != nil { + return m.Wrapped + } + return false +} + +func (m *Xml) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +func init() { + proto.RegisterType((*AdditionalPropertiesItem)(nil), "openapi.v2.AdditionalPropertiesItem") + proto.RegisterType((*Any)(nil), "openapi.v2.Any") + proto.RegisterType((*ApiKeySecurity)(nil), "openapi.v2.ApiKeySecurity") + proto.RegisterType((*BasicAuthenticationSecurity)(nil), "openapi.v2.BasicAuthenticationSecurity") + proto.RegisterType((*BodyParameter)(nil), "openapi.v2.BodyParameter") + proto.RegisterType((*Contact)(nil), "openapi.v2.Contact") + proto.RegisterType((*Default)(nil), "openapi.v2.Default") + proto.RegisterType((*Definitions)(nil), "openapi.v2.Definitions") + proto.RegisterType((*Document)(nil), "openapi.v2.Document") + proto.RegisterType((*Examples)(nil), "openapi.v2.Examples") + proto.RegisterType((*ExternalDocs)(nil), "openapi.v2.ExternalDocs") + proto.RegisterType((*FileSchema)(nil), "openapi.v2.FileSchema") + proto.RegisterType((*FormDataParameterSubSchema)(nil), "openapi.v2.FormDataParameterSubSchema") + proto.RegisterType((*Header)(nil), "openapi.v2.Header") + proto.RegisterType((*HeaderParameterSubSchema)(nil), "openapi.v2.HeaderParameterSubSchema") + proto.RegisterType((*Headers)(nil), "openapi.v2.Headers") + proto.RegisterType((*Info)(nil), "openapi.v2.Info") + proto.RegisterType((*ItemsItem)(nil), "openapi.v2.ItemsItem") + proto.RegisterType((*JsonReference)(nil), "openapi.v2.JsonReference") + proto.RegisterType((*License)(nil), "openapi.v2.License") + proto.RegisterType((*NamedAny)(nil), "openapi.v2.NamedAny") + proto.RegisterType((*NamedHeader)(nil), "openapi.v2.NamedHeader") + proto.RegisterType((*NamedParameter)(nil), "openapi.v2.NamedParameter") + proto.RegisterType((*NamedPathItem)(nil), "openapi.v2.NamedPathItem") + proto.RegisterType((*NamedResponse)(nil), "openapi.v2.NamedResponse") + proto.RegisterType((*NamedResponseValue)(nil), "openapi.v2.NamedResponseValue") + proto.RegisterType((*NamedSchema)(nil), "openapi.v2.NamedSchema") + proto.RegisterType((*NamedSecurityDefinitionsItem)(nil), "openapi.v2.NamedSecurityDefinitionsItem") + proto.RegisterType((*NamedString)(nil), "openapi.v2.NamedString") + proto.RegisterType((*NamedStringArray)(nil), "openapi.v2.NamedStringArray") + proto.RegisterType((*NonBodyParameter)(nil), "openapi.v2.NonBodyParameter") + proto.RegisterType((*Oauth2AccessCodeSecurity)(nil), "openapi.v2.Oauth2AccessCodeSecurity") + proto.RegisterType((*Oauth2ApplicationSecurity)(nil), "openapi.v2.Oauth2ApplicationSecurity") + proto.RegisterType((*Oauth2ImplicitSecurity)(nil), "openapi.v2.Oauth2ImplicitSecurity") + proto.RegisterType((*Oauth2PasswordSecurity)(nil), "openapi.v2.Oauth2PasswordSecurity") + proto.RegisterType((*Oauth2Scopes)(nil), "openapi.v2.Oauth2Scopes") + proto.RegisterType((*Operation)(nil), "openapi.v2.Operation") + proto.RegisterType((*Parameter)(nil), "openapi.v2.Parameter") + proto.RegisterType((*ParameterDefinitions)(nil), "openapi.v2.ParameterDefinitions") + proto.RegisterType((*ParametersItem)(nil), "openapi.v2.ParametersItem") + proto.RegisterType((*PathItem)(nil), "openapi.v2.PathItem") + proto.RegisterType((*PathParameterSubSchema)(nil), "openapi.v2.PathParameterSubSchema") + proto.RegisterType((*Paths)(nil), "openapi.v2.Paths") + proto.RegisterType((*PrimitivesItems)(nil), "openapi.v2.PrimitivesItems") + proto.RegisterType((*Properties)(nil), "openapi.v2.Properties") + proto.RegisterType((*QueryParameterSubSchema)(nil), "openapi.v2.QueryParameterSubSchema") + proto.RegisterType((*Response)(nil), "openapi.v2.Response") + proto.RegisterType((*ResponseDefinitions)(nil), "openapi.v2.ResponseDefinitions") + proto.RegisterType((*ResponseValue)(nil), "openapi.v2.ResponseValue") + proto.RegisterType((*Responses)(nil), "openapi.v2.Responses") + proto.RegisterType((*Schema)(nil), "openapi.v2.Schema") + proto.RegisterType((*SchemaItem)(nil), "openapi.v2.SchemaItem") + proto.RegisterType((*SecurityDefinitions)(nil), "openapi.v2.SecurityDefinitions") + proto.RegisterType((*SecurityDefinitionsItem)(nil), "openapi.v2.SecurityDefinitionsItem") + proto.RegisterType((*SecurityRequirement)(nil), "openapi.v2.SecurityRequirement") + proto.RegisterType((*StringArray)(nil), "openapi.v2.StringArray") + proto.RegisterType((*Tag)(nil), "openapi.v2.Tag") + proto.RegisterType((*TypeItem)(nil), "openapi.v2.TypeItem") + proto.RegisterType((*VendorExtension)(nil), "openapi.v2.VendorExtension") + proto.RegisterType((*Xml)(nil), "openapi.v2.Xml") +} + +func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 3129 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, + 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c, + 0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb, + 0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a, + 0x16, 0x2c, 0xa0, 0x8a, 0x35, 0x50, 0x59, 0x53, 0x15, 0x16, 0x14, 0x55, 0x59, 0xb0, 0x62, 0xc5, + 0x1f, 0x60, 0xc7, 0x3f, 0x60, 0x0d, 0x5b, 0xaa, 0x58, 0x51, 0x3c, 0xea, 0xbe, 0xfa, 0x31, 0x7d, + 0x7b, 0x1e, 0x96, 0x0b, 0x28, 0xd0, 0x6a, 0xe6, 0xde, 0x73, 0xee, 0xb9, 0xa7, 0x4f, 0x9f, 0xd7, + 0x3d, 0xe7, 0x36, 0xac, 0xef, 0x79, 0xd8, 0xdd, 0xdd, 0x7f, 0x70, 0xb2, 0x73, 0x2b, 0xfa, 0xb7, + 0xed, 0xf9, 0x24, 0x24, 0x1a, 0x10, 0x0f, 0xbb, 0xc8, 0xb3, 0xb6, 0x4f, 0x76, 0x36, 0xd6, 0x8f, + 0x08, 0x39, 0xb2, 0xf1, 0x2d, 0x06, 0x39, 0x1c, 0x0e, 0x6e, 0x21, 0x77, 0xc4, 0xd1, 0xb6, 0x1c, + 0xd0, 0x77, 0xfb, 0x7d, 0x2b, 0xb4, 0x88, 0x8b, 0xec, 0x7d, 0x9f, 0x78, 0xd8, 0x0f, 0x2d, 0x1c, + 0x3c, 0x08, 0xb1, 0xa3, 0xfd, 0x1f, 0xd4, 0x82, 0xde, 0x31, 0x76, 0x90, 0x5e, 0xbc, 0x52, 0xbc, + 0xb6, 0xb0, 0xa3, 0x6d, 0xc7, 0x34, 0xb7, 0x0f, 0x18, 0xa4, 0x5b, 0x30, 0x04, 0x8e, 0xb6, 0x01, + 0xf5, 0x43, 0x42, 0x6c, 0x8c, 0x5c, 0xbd, 0x74, 0xa5, 0x78, 0xad, 0xd1, 0x2d, 0x18, 0x72, 0xe2, + 0x76, 0x1d, 0xaa, 0xc4, 0xc5, 0x64, 0xb0, 0x75, 0x0f, 0xca, 0xbb, 0xee, 0x48, 0xbb, 0x01, 0xd5, + 0x13, 0x64, 0x0f, 0xb1, 0x20, 0xbc, 0xba, 0xcd, 0x19, 0xdc, 0x96, 0x0c, 0x6e, 0xef, 0xba, 0x23, + 0x83, 0xa3, 0x68, 0x1a, 0x54, 0x46, 0xc8, 0xb1, 0x19, 0xd1, 0xa6, 0xc1, 0xfe, 0x6f, 0x7d, 0x51, + 0x84, 0xf6, 0xae, 0x67, 0xbd, 0x8b, 0x47, 0x07, 0xb8, 0x37, 0xf4, 0xad, 0x70, 0x44, 0xd1, 0xc2, + 0x91, 0xc7, 0x29, 0x36, 0x0d, 0xf6, 0x9f, 0xce, 0xb9, 0xc8, 0xc1, 0x72, 0x29, 0xfd, 0xaf, 0xb5, + 0xa1, 0x64, 0xb9, 0x7a, 0x99, 0xcd, 0x94, 0x2c, 0x57, 0xbb, 0x02, 0x0b, 0x7d, 0x1c, 0xf4, 0x7c, + 0xcb, 0xa3, 0x32, 0xd0, 0x2b, 0x0c, 0x90, 0x9c, 0xd2, 0xbe, 0x06, 0x9d, 0x13, 0xec, 0xf6, 0x89, + 0x6f, 0xe2, 0xd3, 0x10, 0xbb, 0x01, 0x45, 0xab, 0x5e, 0x29, 0x33, 0xbe, 0x13, 0x02, 0x79, 0x0f, + 0x39, 0xb8, 0x4f, 0xf9, 0x5e, 0xe2, 0xd8, 0xf7, 0x24, 0xf2, 0xd6, 0x67, 0x45, 0xd8, 0xbc, 0x8d, + 0x02, 0xab, 0xb7, 0x3b, 0x0c, 0x8f, 0xb1, 0x1b, 0x5a, 0x3d, 0x44, 0x09, 0x4f, 0x64, 0x7d, 0x8c, + 0xad, 0xd2, 0x6c, 0x6c, 0x95, 0xe7, 0x61, 0xeb, 0x0f, 0x45, 0x68, 0xdd, 0x26, 0xfd, 0xd1, 0x3e, + 0xf2, 0x91, 0x83, 0x43, 0xec, 0x8f, 0x6f, 0x5a, 0xcc, 0x6e, 0x3a, 0x8b, 0x44, 0x37, 0xa0, 0xe1, + 0xe3, 0x27, 0x43, 0xcb, 0xc7, 0x7d, 0x26, 0xce, 0x86, 0x11, 0x8d, 0xb5, 0x1b, 0x91, 0x4a, 0x55, + 0xf3, 0x54, 0x2a, 0x52, 0x28, 0xd5, 0x03, 0xd6, 0xe6, 0x79, 0xc0, 0x1f, 0x17, 0xa1, 0x7e, 0x87, + 0xb8, 0x21, 0xea, 0x85, 0x11, 0xe3, 0xc5, 0x04, 0xe3, 0x1d, 0x28, 0x0f, 0x7d, 0xa9, 0x58, 0xf4, + 0xaf, 0xb6, 0x0a, 0x55, 0xec, 0x20, 0xcb, 0x16, 0x4f, 0xc3, 0x07, 0x4a, 0x46, 0x2a, 0xf3, 0x30, + 0xf2, 0x08, 0xea, 0x77, 0xf1, 0x00, 0x0d, 0xed, 0x50, 0x7b, 0x00, 0x17, 0x50, 0x64, 0x6f, 0xa6, + 0x17, 0x19, 0x9c, 0x5e, 0x9c, 0x40, 0x70, 0x15, 0x29, 0x4c, 0x74, 0xeb, 0x3b, 0xb0, 0x70, 0x17, + 0x0f, 0x2c, 0x97, 0x41, 0x02, 0xed, 0xe1, 0x64, 0xca, 0x17, 0x33, 0x94, 0x85, 0xb8, 0xd5, 0xc4, + 0xff, 0x58, 0x85, 0xc6, 0x5d, 0xd2, 0x1b, 0x3a, 0xd8, 0x0d, 0x35, 0x1d, 0xea, 0xc1, 0x53, 0x74, + 0x74, 0x84, 0x7d, 0x21, 0x3f, 0x39, 0xd4, 0x5e, 0x86, 0x8a, 0xe5, 0x0e, 0x08, 0x93, 0xe1, 0xc2, + 0x4e, 0x27, 0xb9, 0xc7, 0x03, 0x77, 0x40, 0x0c, 0x06, 0xa5, 0xc2, 0x3f, 0x26, 0x41, 0x28, 0xa4, + 0xca, 0xfe, 0x6b, 0x9b, 0xd0, 0x3c, 0x44, 0x01, 0x36, 0x3d, 0x14, 0x1e, 0x0b, 0xab, 0x6b, 0xd0, + 0x89, 0x7d, 0x14, 0x1e, 0xb3, 0x0d, 0x29, 0x77, 0x38, 0x60, 0x96, 0x46, 0x37, 0xe4, 0x43, 0xaa, + 0x5c, 0x3d, 0xe2, 0x06, 0x43, 0x0a, 0xaa, 0x31, 0x50, 0x34, 0xa6, 0x30, 0xcf, 0x27, 0xfd, 0x61, + 0x0f, 0x07, 0x7a, 0x9d, 0xc3, 0xe4, 0x58, 0x7b, 0x0d, 0xaa, 0x74, 0xa7, 0x40, 0x6f, 0x30, 0x4e, + 0x97, 0x93, 0x9c, 0xd2, 0x2d, 0x03, 0x83, 0xc3, 0xb5, 0xb7, 0xa9, 0x0d, 0x44, 0x52, 0xd5, 0x9b, + 0x0c, 0x3d, 0x25, 0xbc, 0x84, 0xd0, 0x8d, 0x24, 0xae, 0xf6, 0x75, 0x00, 0x4f, 0xda, 0x52, 0xa0, + 0x03, 0x5b, 0x79, 0x25, 0xbd, 0x91, 0x80, 0x26, 0x49, 0x24, 0xd6, 0x68, 0xef, 0x40, 0xd3, 0xc7, + 0x81, 0x47, 0xdc, 0x00, 0x07, 0xfa, 0x02, 0x23, 0xf0, 0x62, 0x92, 0x80, 0x21, 0x80, 0xc9, 0xf5, + 0xf1, 0x0a, 0xed, 0xab, 0xd0, 0x08, 0x84, 0x53, 0xd1, 0x17, 0xd9, 0x5b, 0x4f, 0xad, 0x96, 0x0e, + 0xc7, 0xe0, 0xd6, 0x48, 0x5f, 0xad, 0x11, 0x2d, 0xd0, 0x0c, 0x58, 0x95, 0xff, 0xcd, 0xa4, 0x04, + 0x5a, 0x59, 0x36, 0x24, 0xa1, 0x24, 0x1b, 0x2b, 0x41, 0x76, 0x52, 0xbb, 0x0a, 0x95, 0x10, 0x1d, + 0x05, 0x7a, 0x9b, 0x31, 0xb3, 0x94, 0xa4, 0xf1, 0x08, 0x1d, 0x19, 0x0c, 0xa8, 0xbd, 0x03, 0x2d, + 0x6a, 0x57, 0x3e, 0x55, 0xdb, 0x3e, 0xe9, 0x05, 0xfa, 0x12, 0xdb, 0x51, 0x4f, 0x62, 0xdf, 0x13, + 0x08, 0x77, 0x49, 0x2f, 0x30, 0x16, 0x71, 0x62, 0xa4, 0xb4, 0xce, 0xce, 0x3c, 0xd6, 0xf9, 0x18, + 0x1a, 0xf7, 0x4e, 0x91, 0xe3, 0xd9, 0x38, 0x78, 0x9e, 0xe6, 0xf9, 0xa3, 0x22, 0x2c, 0x26, 0xd9, + 0x9e, 0xc1, 0xbb, 0x66, 0x1d, 0xd2, 0x99, 0x9d, 0xfc, 0x3f, 0x4a, 0x00, 0xf7, 0x2d, 0x1b, 0x73, + 0x63, 0xd7, 0xd6, 0xa0, 0x36, 0x20, 0xbe, 0x83, 0x42, 0xb1, 0xbd, 0x18, 0x51, 0xc7, 0x17, 0x5a, + 0xa1, 0x2d, 0x1d, 0x3b, 0x1f, 0x8c, 0x73, 0x5c, 0xce, 0x72, 0x7c, 0x1d, 0xea, 0x7d, 0xee, 0xd9, + 0x98, 0x0d, 0x8f, 0xbd, 0x63, 0xca, 0x91, 0x84, 0xa7, 0xc2, 0x02, 0x37, 0xea, 0x38, 0x2c, 0xc8, + 0x08, 0x58, 0x4b, 0x44, 0xc0, 0x4d, 0x6a, 0x0b, 0xa8, 0x6f, 0x12, 0xd7, 0x1e, 0xe9, 0x75, 0x19, + 0x47, 0x50, 0x7f, 0xcf, 0xb5, 0x47, 0x59, 0x9d, 0x69, 0xcc, 0xa5, 0x33, 0xd7, 0xa1, 0x8e, 0xf9, + 0x2b, 0x17, 0x06, 0x9e, 0x65, 0x5b, 0xc0, 0x95, 0x6f, 0x00, 0xe6, 0x79, 0x03, 0x5f, 0xd4, 0x60, + 0xe3, 0x3e, 0xf1, 0x9d, 0xbb, 0x28, 0x44, 0x91, 0x03, 0x38, 0x18, 0x1e, 0x1e, 0xc8, 0xb4, 0x29, + 0x16, 0x4b, 0x71, 0x2c, 0x5a, 0xf2, 0xc8, 0x5a, 0xca, 0xcb, 0x55, 0xca, 0xf9, 0xf1, 0xb9, 0x92, + 0x08, 0x73, 0x37, 0x60, 0x19, 0xd9, 0x36, 0x79, 0x6a, 0x62, 0xc7, 0x0b, 0x47, 0x26, 0x4f, 0xbc, + 0xaa, 0x6c, 0xab, 0x25, 0x06, 0xb8, 0x47, 0xe7, 0x3f, 0x90, 0xc9, 0x56, 0xe6, 0x45, 0xc4, 0x3a, + 0x53, 0x4f, 0xe9, 0xcc, 0xff, 0x43, 0xd5, 0x0a, 0xb1, 0x23, 0x65, 0xbf, 0x99, 0xf2, 0x74, 0xbe, + 0xe5, 0x58, 0xa1, 0x75, 0xc2, 0x33, 0xc9, 0xc0, 0xe0, 0x98, 0xda, 0xeb, 0xb0, 0xdc, 0x23, 0xb6, + 0x8d, 0x7b, 0x94, 0x59, 0x53, 0x50, 0x6d, 0x32, 0xaa, 0x9d, 0x18, 0x70, 0x9f, 0xd3, 0x4f, 0xe8, + 0x16, 0x4c, 0xd1, 0x2d, 0x1d, 0xea, 0x0e, 0x3a, 0xb5, 0x9c, 0xa1, 0xc3, 0xbc, 0x66, 0xd1, 0x90, + 0x43, 0xba, 0x23, 0x3e, 0xed, 0xd9, 0xc3, 0xc0, 0x3a, 0xc1, 0xa6, 0xc4, 0x59, 0x64, 0x0f, 0xdf, + 0x89, 0x00, 0xdf, 0x14, 0xc8, 0x94, 0x8c, 0xe5, 0x32, 0x94, 0x96, 0x20, 0xc3, 0x87, 0x63, 0x64, + 0x04, 0x4e, 0x7b, 0x9c, 0x8c, 0x40, 0x7e, 0x01, 0xc0, 0x41, 0xa7, 0xa6, 0x8d, 0xdd, 0xa3, 0xf0, + 0x98, 0x79, 0xb3, 0xb2, 0xd1, 0x74, 0xd0, 0xe9, 0x43, 0x36, 0xc1, 0xc0, 0x96, 0x2b, 0xc1, 0x1d, + 0x01, 0xb6, 0x5c, 0x01, 0xd6, 0xa1, 0xee, 0xa1, 0x90, 0x2a, 0xab, 0xbe, 0xcc, 0x83, 0xad, 0x18, + 0x52, 0x8b, 0xa0, 0x74, 0xb9, 0xd0, 0x35, 0xb6, 0xae, 0xe1, 0xa0, 0x53, 0x26, 0x61, 0x06, 0xb4, + 0x5c, 0x01, 0x5c, 0x11, 0x40, 0xcb, 0xe5, 0xc0, 0x97, 0x60, 0x71, 0xe8, 0x5a, 0x4f, 0x86, 0x58, + 0xc0, 0x57, 0x19, 0xe7, 0x0b, 0x7c, 0x8e, 0xa3, 0x5c, 0x85, 0x0a, 0x76, 0x87, 0x8e, 0x7e, 0x21, + 0xeb, 0xaa, 0xa9, 0xa8, 0x19, 0x50, 0x7b, 0x11, 0x16, 0x9c, 0xa1, 0x1d, 0x5a, 0x9e, 0x8d, 0x4d, + 0x32, 0xd0, 0xd7, 0x98, 0x90, 0x40, 0x4e, 0xed, 0x0d, 0x94, 0xd6, 0x72, 0x71, 0x2e, 0x6b, 0xa9, + 0x42, 0xad, 0x8b, 0x51, 0x1f, 0xfb, 0xca, 0xb4, 0x38, 0xd6, 0xc5, 0x92, 0x5a, 0x17, 0xcb, 0x67, + 0xd3, 0xc5, 0xca, 0x74, 0x5d, 0xac, 0xce, 0xae, 0x8b, 0xb5, 0x19, 0x74, 0xb1, 0x3e, 0x5d, 0x17, + 0x1b, 0x33, 0xe8, 0x62, 0x73, 0x26, 0x5d, 0x84, 0xc9, 0xba, 0xb8, 0x30, 0x41, 0x17, 0x17, 0x27, + 0xe8, 0x62, 0x6b, 0x92, 0x2e, 0xb6, 0xa7, 0xe8, 0xe2, 0x52, 0xbe, 0x2e, 0x76, 0xe6, 0xd0, 0xc5, + 0xe5, 0x8c, 0x2e, 0x8e, 0x79, 0x4b, 0x6d, 0xb6, 0x23, 0xd4, 0xca, 0x3c, 0xda, 0xfa, 0xb7, 0x2a, + 0xe8, 0x5c, 0x5b, 0xff, 0x2d, 0x9e, 0x5d, 0x5a, 0x48, 0x55, 0x69, 0x21, 0x35, 0xb5, 0x85, 0xd4, + 0xcf, 0x66, 0x21, 0x8d, 0xe9, 0x16, 0xd2, 0x9c, 0xdd, 0x42, 0x60, 0x06, 0x0b, 0x59, 0x98, 0x6e, + 0x21, 0x8b, 0x33, 0x58, 0x48, 0x6b, 0x26, 0x0b, 0x69, 0x4f, 0xb6, 0x90, 0xa5, 0x09, 0x16, 0xd2, + 0x99, 0x60, 0x21, 0xcb, 0x93, 0x2c, 0x44, 0x9b, 0x62, 0x21, 0x2b, 0xf9, 0x16, 0xb2, 0x3a, 0x87, + 0x85, 0x5c, 0x98, 0xc9, 0x5b, 0xaf, 0xcd, 0xa3, 0xff, 0xdf, 0x82, 0x3a, 0x57, 0xff, 0x67, 0x38, + 0x7e, 0xf2, 0x85, 0x39, 0xc9, 0xf3, 0xe7, 0x25, 0xa8, 0xd0, 0x03, 0x64, 0x9c, 0x98, 0x16, 0x93, + 0x89, 0xa9, 0x0e, 0xf5, 0x13, 0xec, 0x07, 0x71, 0x65, 0x44, 0x0e, 0x67, 0x30, 0xa4, 0x6b, 0xd0, + 0x09, 0xb1, 0xef, 0x04, 0x26, 0x19, 0x98, 0x01, 0xf6, 0x4f, 0xac, 0x9e, 0x34, 0xaa, 0x36, 0x9b, + 0xdf, 0x1b, 0x1c, 0xf0, 0x59, 0xed, 0x26, 0xd4, 0x7b, 0xbc, 0x7c, 0x20, 0x9c, 0xfe, 0x4a, 0xf2, + 0x21, 0x44, 0x65, 0xc1, 0x90, 0x38, 0x14, 0xdd, 0xb6, 0x7a, 0xd8, 0x0d, 0x78, 0xfa, 0x34, 0x86, + 0xfe, 0x90, 0x83, 0x0c, 0x89, 0xa3, 0x14, 0x7e, 0x7d, 0x1e, 0xe1, 0xbf, 0x05, 0x4d, 0xa6, 0x0c, + 0xac, 0x56, 0x77, 0x23, 0x51, 0xab, 0x2b, 0x4f, 0x2e, 0xac, 0x6c, 0xdd, 0x85, 0xd6, 0x37, 0x02, + 0xe2, 0x1a, 0x78, 0x80, 0x7d, 0xec, 0xf6, 0xb0, 0xb6, 0x0c, 0x15, 0xd3, 0xc7, 0x03, 0x21, 0xe3, + 0xb2, 0x81, 0x07, 0xd3, 0xeb, 0x4f, 0x5b, 0x1e, 0xd4, 0xc5, 0x33, 0xcd, 0x58, 0x5c, 0x39, 0xf3, + 0x59, 0xe6, 0x1e, 0x34, 0x24, 0x50, 0xb9, 0xe5, 0x2b, 0xb2, 0xaa, 0x58, 0x52, 0x3b, 0x20, 0x0e, + 0xdd, 0x7a, 0x17, 0x16, 0x12, 0x0a, 0xa8, 0xa4, 0x74, 0x2d, 0x4d, 0x29, 0x25, 0x4c, 0xa1, 0xb7, + 0x82, 0xd8, 0xfb, 0xd0, 0x66, 0xc4, 0xe2, 0x22, 0x9a, 0x8a, 0xde, 0xeb, 0x69, 0x7a, 0x17, 0x94, + 0x45, 0x01, 0x49, 0x72, 0x0f, 0x5a, 0x82, 0x64, 0x78, 0xcc, 0xde, 0xad, 0x8a, 0xe2, 0x8d, 0x34, + 0xc5, 0xd5, 0xf1, 0x7a, 0x06, 0x5d, 0x38, 0x4e, 0x50, 0x56, 0x0f, 0xe6, 0x26, 0x28, 0x17, 0x4a, + 0x82, 0x1f, 0x81, 0x96, 0x22, 0x18, 0x9d, 0x1d, 0x32, 0x54, 0x6f, 0xa5, 0xa9, 0xae, 0xab, 0xa8, + 0xb2, 0xd5, 0xe3, 0x2f, 0x47, 0xc4, 0xd0, 0x79, 0x5f, 0x8e, 0xd0, 0x74, 0x41, 0xcc, 0x81, 0x4b, + 0x9c, 0x58, 0xb6, 0x34, 0x91, 0x2b, 0xd8, 0xb7, 0xd3, 0xd4, 0xaf, 0x4e, 0xa9, 0x7b, 0x24, 0xe5, + 0xfc, 0x96, 0xe4, 0x3d, 0xf4, 0x2d, 0xf7, 0x48, 0x49, 0x7d, 0x35, 0x49, 0xbd, 0x29, 0x17, 0x3e, + 0x86, 0x4e, 0x62, 0xe1, 0xae, 0xef, 0x23, 0xb5, 0x82, 0xdf, 0x4c, 0xf3, 0x96, 0xf2, 0xa9, 0x89, + 0xb5, 0x92, 0xec, 0x6f, 0xca, 0xd0, 0x79, 0x8f, 0xb8, 0xe9, 0x1a, 0x2f, 0x86, 0xcd, 0x63, 0xa6, + 0xc1, 0x66, 0x54, 0x77, 0x32, 0x83, 0xe1, 0xa1, 0x99, 0xaa, 0xf4, 0xbf, 0x9c, 0x55, 0xf8, 0x6c, + 0x82, 0xd3, 0x2d, 0x18, 0xfa, 0x71, 0x5e, 0xf2, 0x63, 0xc3, 0x65, 0x9a, 0x30, 0x98, 0x7d, 0x14, + 0x22, 0xf5, 0x4e, 0xfc, 0x19, 0x5e, 0x4d, 0xee, 0x94, 0x7f, 0x4c, 0xee, 0x16, 0x8c, 0x8d, 0x41, + 0xfe, 0x21, 0xfa, 0x10, 0x36, 0x9e, 0x0c, 0xb1, 0x3f, 0x52, 0xef, 0x54, 0xce, 0xbe, 0xc9, 0xf7, + 0x29, 0xb6, 0x72, 0x9b, 0x8b, 0x4f, 0xd4, 0x20, 0xcd, 0x84, 0x75, 0x0f, 0x85, 0xc7, 0xea, 0x2d, + 0x78, 0xf1, 0x63, 0x6b, 0xdc, 0x0a, 0x95, 0x3b, 0xac, 0x79, 0x4a, 0x48, 0xdc, 0x24, 0xf9, 0xbc, + 0x04, 0xfa, 0x1e, 0x1a, 0x86, 0xc7, 0x3b, 0xbb, 0xbd, 0x1e, 0x0e, 0x82, 0x3b, 0xa4, 0x8f, 0xa7, + 0xf5, 0x39, 0x06, 0x36, 0x79, 0x2a, 0xab, 0xf2, 0xf4, 0xbf, 0xf6, 0x06, 0x0d, 0x08, 0xc4, 0xc3, + 0xf2, 0x48, 0x94, 0x2a, 0x8d, 0x70, 0xea, 0x07, 0x0c, 0x6e, 0x08, 0x3c, 0x9a, 0x35, 0xd1, 0x69, + 0xe2, 0x5b, 0xdf, 0x67, 0xfd, 0x09, 0x93, 0xfa, 0x6f, 0x71, 0x20, 0x4a, 0x01, 0x1e, 0xfb, 0x36, + 0x4d, 0x60, 0x42, 0xf2, 0x29, 0xe6, 0x48, 0x3c, 0xff, 0x6c, 0xb0, 0x09, 0x0a, 0x1c, 0x0b, 0x1e, + 0xb5, 0xd9, 0x32, 0xef, 0xb9, 0x82, 0xdf, 0x5f, 0x8a, 0xb0, 0x2e, 0x64, 0xe4, 0x79, 0xf6, 0x2c, + 0x1d, 0x95, 0xe7, 0x23, 0xa4, 0xd4, 0x73, 0x57, 0x26, 0x3f, 0x77, 0x75, 0xb6, 0xe7, 0x9e, 0xab, + 0xa7, 0xf1, 0xc3, 0x12, 0xac, 0x71, 0xc6, 0x1e, 0x38, 0xf4, 0xb9, 0xad, 0xf0, 0x3f, 0x4d, 0x33, + 0xfe, 0x05, 0x42, 0xf8, 0x73, 0x51, 0x0a, 0x61, 0x1f, 0x05, 0xc1, 0x53, 0xe2, 0xf7, 0xff, 0x07, + 0xde, 0xfc, 0xc7, 0xb0, 0x98, 0xe4, 0xeb, 0x19, 0xfa, 0x3d, 0x2c, 0x42, 0xe4, 0x24, 0xdc, 0x3f, + 0xaf, 0x40, 0x73, 0xcf, 0xc3, 0x3e, 0x92, 0x87, 0x4d, 0x56, 0xb7, 0x2f, 0xb2, 0x3a, 0x2d, 0x2f, + 0xd3, 0xeb, 0x50, 0x0f, 0x86, 0x8e, 0x83, 0xfc, 0x91, 0xcc, 0xb9, 0xc5, 0x70, 0x86, 0x9c, 0x3b, + 0x53, 0xae, 0xad, 0xcc, 0x55, 0xae, 0x7d, 0x09, 0x16, 0x89, 0xe4, 0xcd, 0xb4, 0xfa, 0x52, 0xbc, + 0xd1, 0xdc, 0x83, 0x7e, 0xaa, 0xf7, 0x53, 0x1b, 0xeb, 0xfd, 0x24, 0x7b, 0x46, 0xf5, 0xb1, 0x9e, + 0xd1, 0x57, 0x52, 0x3d, 0x9b, 0x06, 0x13, 0xdd, 0x86, 0x32, 0x3d, 0xe3, 0xa1, 0x3e, 0xd9, 0xad, + 0x79, 0x33, 0xd9, 0xad, 0x69, 0x66, 0x33, 0x3b, 0x99, 0xe0, 0xa4, 0x7a, 0x34, 0x89, 0xd6, 0x16, + 0xa4, 0x5b, 0x5b, 0x97, 0x01, 0xfa, 0xd8, 0xf3, 0x71, 0x0f, 0x85, 0xb8, 0x2f, 0x4e, 0xbd, 0x89, + 0x99, 0xb3, 0x75, 0x77, 0x54, 0xea, 0xd7, 0x9a, 0x47, 0xfd, 0x7e, 0x59, 0x84, 0x66, 0x9c, 0x45, + 0xdc, 0x86, 0xf6, 0x21, 0xe9, 0x27, 0xe2, 0xad, 0x48, 0x1c, 0x52, 0x09, 0x5e, 0x2a, 0xf1, 0xe8, + 0x16, 0x8c, 0xd6, 0x61, 0x2a, 0x13, 0x79, 0x08, 0x9a, 0x4b, 0x5c, 0x73, 0x8c, 0x0e, 0x4f, 0x0b, + 0x2e, 0xa5, 0x98, 0x1a, 0xcb, 0x61, 0xba, 0x05, 0xa3, 0xe3, 0x8e, 0xcd, 0xc5, 0xd1, 0xf3, 0x08, + 0x56, 0x55, 0x7d, 0x36, 0x6d, 0x6f, 0xb2, 0xbd, 0x6c, 0x64, 0xc4, 0x10, 0x27, 0xe6, 0x6a, 0x93, + 0xf9, 0xac, 0x08, 0xed, 0xb4, 0x76, 0x68, 0x5f, 0x82, 0xe6, 0xb8, 0x44, 0xd4, 0xb9, 0x7e, 0xb7, + 0x60, 0xc4, 0x98, 0x54, 0x9a, 0x9f, 0x04, 0xc4, 0xa5, 0x67, 0x30, 0x7e, 0x22, 0x53, 0xa5, 0xcb, + 0xa9, 0x23, 0x1b, 0x95, 0xe6, 0x27, 0xc9, 0x89, 0xf8, 0xf9, 0x7f, 0x5f, 0x86, 0x46, 0x74, 0x74, + 0x50, 0x9c, 0xec, 0x5e, 0x83, 0xf2, 0x11, 0x0e, 0x55, 0x27, 0x91, 0xc8, 0xfe, 0x0d, 0x8a, 0x41, + 0x11, 0xbd, 0x61, 0x28, 0xfc, 0x63, 0x1e, 0xa2, 0x37, 0x0c, 0xb5, 0xeb, 0x50, 0xf1, 0x48, 0x20, + 0x3b, 0x40, 0x39, 0x98, 0x0c, 0x45, 0xbb, 0x09, 0xb5, 0x3e, 0xb6, 0x71, 0x88, 0xc5, 0x89, 0x3a, + 0x07, 0x59, 0x20, 0x69, 0xb7, 0xa0, 0x4e, 0x3c, 0xde, 0x86, 0xac, 0x4d, 0xc2, 0x97, 0x58, 0x94, + 0x15, 0x9a, 0x92, 0x8a, 0x22, 0x57, 0x1e, 0x2b, 0x14, 0x85, 0x9e, 0xc9, 0x3c, 0x14, 0xf6, 0x8e, + 0x45, 0xfb, 0x22, 0x07, 0x97, 0xe3, 0x8c, 0xb9, 0x89, 0xe6, 0x5c, 0x6e, 0xe2, 0xcc, 0x1d, 0xa4, + 0xbf, 0x56, 0x61, 0x4d, 0x9d, 0x4d, 0x9e, 0xd7, 0x18, 0xcf, 0x6b, 0x8c, 0xff, 0xed, 0x35, 0xc6, + 0xa7, 0x50, 0x65, 0x17, 0x34, 0x94, 0x94, 0x8a, 0x73, 0x50, 0xd2, 0x6e, 0x42, 0x85, 0xdd, 0x36, + 0x29, 0xb1, 0x45, 0xeb, 0x0a, 0x87, 0x2f, 0xea, 0x26, 0x0c, 0x6d, 0xeb, 0x67, 0x55, 0x58, 0x1a, + 0xd3, 0xda, 0xf3, 0x9e, 0xd4, 0x79, 0x4f, 0xea, 0x4c, 0x3d, 0x29, 0x95, 0x0e, 0x6b, 0xf3, 0x58, + 0xc3, 0xb7, 0x01, 0xe2, 0x14, 0xe4, 0x39, 0xdf, 0xf9, 0xfa, 0x55, 0x0d, 0x2e, 0xe6, 0x14, 0x46, + 0xce, 0xaf, 0x29, 0x9c, 0x5f, 0x53, 0x38, 0xbf, 0xa6, 0x10, 0x9b, 0xe1, 0xdf, 0x8b, 0xd0, 0x88, + 0xca, 0xe9, 0xd3, 0x2f, 0x76, 0x6d, 0x47, 0xdd, 0x19, 0x9e, 0x76, 0xaf, 0x65, 0x6b, 0xd6, 0x2c, + 0xf0, 0xc8, 0xab, 0xaf, 0x37, 0xa1, 0xce, 0x2b, 0xab, 0x32, 0x78, 0xac, 0x64, 0x0b, 0xb2, 0x81, + 0x21, 0x71, 0xb4, 0x37, 0xa0, 0x21, 0xae, 0x2b, 0xc9, 0x93, 0xf5, 0x6a, 0xfa, 0x64, 0xcd, 0x61, + 0x46, 0x84, 0x75, 0xf6, 0x3b, 0xcd, 0x18, 0x56, 0x14, 0x97, 0x11, 0xb5, 0xf7, 0x26, 0x3b, 0xa4, + 0x6c, 0xcc, 0x8d, 0x5a, 0x0b, 0x6a, 0x97, 0xf4, 0x93, 0x22, 0xb4, 0xd2, 0x5d, 0x86, 0x1d, 0xea, + 0x88, 0xf8, 0x44, 0x74, 0x7b, 0x5c, 0x71, 0xe6, 0xee, 0x16, 0x8c, 0x08, 0xef, 0xf9, 0x9e, 0xaf, + 0x7e, 0x5a, 0x84, 0x66, 0x74, 0xb2, 0xd7, 0xee, 0x40, 0x4b, 0x6e, 0x63, 0xf6, 0x48, 0x1f, 0x8b, + 0x07, 0xbd, 0x9c, 0xfb, 0xa0, 0xbc, 0xdb, 0xb1, 0x28, 0x17, 0xdd, 0x21, 0x7d, 0x75, 0x2b, 0xb0, + 0x34, 0xcf, 0xdb, 0xf8, 0x75, 0x13, 0x6a, 0xc2, 0x51, 0x2b, 0x4e, 0x7c, 0x79, 0x09, 0x4a, 0xd4, + 0x5b, 0x2d, 0x4f, 0xb8, 0xf4, 0x57, 0x99, 0x78, 0xe9, 0x6f, 0x5a, 0xe2, 0x31, 0x66, 0x89, 0xb5, + 0x8c, 0x25, 0x26, 0x5c, 0x62, 0x7d, 0x06, 0x97, 0xd8, 0x98, 0xee, 0x12, 0x9b, 0x33, 0xb8, 0x44, + 0x98, 0xc9, 0x25, 0x2e, 0x4c, 0x76, 0x89, 0x8b, 0x13, 0x5c, 0x62, 0x6b, 0x82, 0x4b, 0x6c, 0x4f, + 0x72, 0x89, 0x4b, 0x53, 0x5c, 0x62, 0x27, 0xeb, 0x12, 0x5f, 0x81, 0x36, 0x25, 0x9e, 0x30, 0x36, + 0x7e, 0x12, 0x68, 0x39, 0xe8, 0x34, 0x91, 0x2b, 0x50, 0x34, 0xcb, 0x4d, 0xa2, 0x69, 0x02, 0xcd, + 0x72, 0x13, 0x68, 0xc9, 0x40, 0xbf, 0x32, 0x76, 0x4d, 0x73, 0xa6, 0x13, 0xc1, 0x47, 0x79, 0x2e, + 0xe0, 0x42, 0xb6, 0xb5, 0x94, 0xf7, 0xe9, 0x89, 0xda, 0x1b, 0x68, 0xd7, 0x44, 0xd8, 0x5f, 0xcb, + 0xda, 0xfd, 0xa3, 0x91, 0x87, 0x79, 0xee, 0xce, 0x92, 0x81, 0xd7, 0x65, 0xd0, 0xbf, 0x98, 0x3d, + 0xdc, 0x47, 0x4d, 0x73, 0x19, 0xee, 0xaf, 0x43, 0x0d, 0xd9, 0x36, 0xd5, 0x4f, 0x3d, 0xb7, 0x77, + 0x5e, 0x45, 0xb6, 0xbd, 0x37, 0xd0, 0xbe, 0x0c, 0x90, 0x78, 0xa2, 0xf5, 0xac, 0x33, 0x8f, 0xb9, + 0x35, 0x12, 0x98, 0xda, 0xcb, 0xd0, 0xea, 0x5b, 0xd4, 0x82, 0x1c, 0xcb, 0x45, 0x21, 0xf1, 0xf5, + 0x0d, 0xa6, 0x20, 0xe9, 0xc9, 0xf4, 0x95, 0xd7, 0xcd, 0xb1, 0x2b, 0xaf, 0x2f, 0x41, 0xf9, 0xd4, + 0xb1, 0xf5, 0x4b, 0x59, 0x8b, 0xfb, 0xd0, 0xb1, 0x0d, 0x0a, 0xcb, 0x96, 0x59, 0x5f, 0x78, 0xd6, + 0x5b, 0xb1, 0x97, 0x9f, 0xe1, 0x56, 0xec, 0x8b, 0xf3, 0x78, 0xac, 0x1f, 0x00, 0xc4, 0x71, 0x6f, + 0xce, 0x2f, 0x8d, 0xde, 0x86, 0x85, 0x81, 0x65, 0x63, 0x33, 0x3f, 0xa4, 0xc6, 0x37, 0x9e, 0xbb, + 0x05, 0x03, 0x06, 0xd1, 0x28, 0xf6, 0xe2, 0x21, 0xac, 0x28, 0xba, 0xb9, 0xda, 0x77, 0x27, 0xc7, + 0xaf, 0x6b, 0xd9, 0x84, 0x3a, 0xa7, 0x25, 0xac, 0x0e, 0x67, 0x7f, 0xaa, 0xc0, 0xc5, 0xbc, 0x66, + 0xb4, 0x03, 0x2f, 0x1c, 0xa2, 0xc0, 0xea, 0x99, 0x28, 0xf5, 0x95, 0x90, 0x19, 0xd5, 0x7c, 0xb9, + 0x68, 0x5e, 0x4b, 0x55, 0x58, 0xf3, 0xbf, 0x2a, 0xea, 0x16, 0x8c, 0xcd, 0xc3, 0x09, 0x1f, 0x1d, + 0xdd, 0x87, 0x0e, 0xf2, 0x2c, 0xf3, 0x53, 0x3c, 0x8a, 0x77, 0xe0, 0x92, 0x4c, 0xd5, 0xb5, 0xd2, + 0x5f, 0x59, 0x75, 0x0b, 0x46, 0x1b, 0xa5, 0xbf, 0xbb, 0xfa, 0x1e, 0xe8, 0x84, 0xb5, 0x25, 0x4c, + 0x4b, 0x34, 0xa4, 0x62, 0x7a, 0xe5, 0x6c, 0x57, 0x54, 0xdd, 0xbb, 0xea, 0x16, 0x8c, 0x35, 0xa2, + 0xee, 0x6a, 0xc5, 0xf4, 0x3d, 0xd1, 0xeb, 0x89, 0xe9, 0x57, 0xf2, 0xe8, 0x8f, 0xb7, 0x85, 0x62, + 0xfa, 0x99, 0x86, 0xd1, 0x11, 0x6c, 0x0a, 0xfa, 0x28, 0x6e, 0x24, 0xc6, 0x5b, 0xf0, 0x00, 0xf7, + 0x4a, 0x76, 0x0b, 0x45, 0xdb, 0xb1, 0x5b, 0x30, 0xd6, 0x49, 0x6e, 0x4f, 0x12, 0xc7, 0x1b, 0xb1, + 0xae, 0x2e, 0x4b, 0x17, 0xe2, 0x8d, 0x6a, 0x59, 0xef, 0x98, 0xd7, 0x03, 0xee, 0x16, 0x0c, 0x21, + 0x93, 0x2c, 0x2c, 0xd6, 0xf0, 0xe3, 0x58, 0xc3, 0x13, 0x2d, 0x01, 0xed, 0xfd, 0xc9, 0x1a, 0x7e, + 0x29, 0xa7, 0x6d, 0xc4, 0x2f, 0x16, 0xa8, 0xb5, 0xfa, 0x2a, 0x2c, 0x24, 0x6f, 0x2e, 0xac, 0xc6, + 0x1f, 0xf7, 0x95, 0xe3, 0x3b, 0x0e, 0xbf, 0x2d, 0x42, 0xf9, 0x11, 0x52, 0xdf, 0x8a, 0x98, 0xfe, + 0xb1, 0x5b, 0xc6, 0xb3, 0x95, 0xcf, 0xfc, 0x8d, 0xc8, 0x5c, 0x5f, 0x70, 0x5d, 0x81, 0x86, 0x8c, + 0x30, 0x39, 0xcf, 0xf7, 0x31, 0x2c, 0x7d, 0x30, 0x56, 0x6f, 0x7a, 0x8e, 0x1f, 0x93, 0xfc, 0xae, + 0x08, 0xe5, 0x0f, 0x1d, 0x5b, 0x29, 0xbd, 0x4b, 0xd0, 0xa4, 0xbf, 0x81, 0x87, 0x7a, 0xf2, 0x5e, + 0x49, 0x3c, 0x41, 0x93, 0x3f, 0xcf, 0xc7, 0x03, 0xeb, 0x54, 0x64, 0x79, 0x62, 0x44, 0x57, 0xa1, + 0x30, 0xf4, 0xad, 0xc3, 0x61, 0x88, 0xc5, 0x67, 0x7a, 0xf1, 0x04, 0x4d, 0x65, 0x9e, 0xfa, 0xc8, + 0xf3, 0x70, 0x5f, 0x1c, 0xc1, 0xe5, 0xf0, 0xcc, 0x7d, 0xcc, 0xdb, 0xaf, 0x42, 0x9b, 0xf8, 0x47, + 0x12, 0xd7, 0x3c, 0xd9, 0xb9, 0xbd, 0x28, 0xbe, 0x5d, 0xdd, 0xf7, 0x49, 0x48, 0xf6, 0x8b, 0xbf, + 0x28, 0x95, 0xf7, 0x76, 0x0f, 0x0e, 0x6b, 0xec, 0x63, 0xd0, 0x37, 0xff, 0x19, 0x00, 0x00, 0xff, + 0xff, 0xd4, 0x0a, 0xef, 0xca, 0xe4, 0x3a, 0x00, 0x00, +} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto new file mode 100644 index 00000000..2f336b3e --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto @@ -0,0 +1,662 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +syntax = "proto3"; + +package openapi.v2; + +import "google/protobuf/any.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIProto"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v2"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +message AdditionalPropertiesItem { + oneof oneof { + Schema schema = 1; + bool boolean = 2; + } +} + +message Any { + google.protobuf.Any value = 1; + string yaml = 2; +} + +message ApiKeySecurity { + string type = 1; + string name = 2; + string in = 3; + string description = 4; + repeated NamedAny vendor_extension = 5; +} + +message BasicAuthenticationSecurity { + string type = 1; + string description = 2; + repeated NamedAny vendor_extension = 3; +} + +message BodyParameter { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 1; + // The name of the parameter. + string name = 2; + // Determines the location of the parameter. + string in = 3; + // Determines whether or not this parameter is required or optional. + bool required = 4; + Schema schema = 5; + repeated NamedAny vendor_extension = 6; +} + +// Contact information for the owners of the API. +message Contact { + // The identifying name of the contact person/organization. + string name = 1; + // The URL pointing to the contact information. + string url = 2; + // The email address of the contact person/organization. + string email = 3; + repeated NamedAny vendor_extension = 4; +} + +message Default { + repeated NamedAny additional_properties = 1; +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +message Definitions { + repeated NamedSchema additional_properties = 1; +} + +message Document { + // The Swagger version of this document. + string swagger = 1; + Info info = 2; + // The host (name or ip) of the API. Example: 'swagger.io' + string host = 3; + // The base path to the API. Example: '/api'. + string base_path = 4; + // The transfer protocol of the API. + repeated string schemes = 5; + // A list of MIME types accepted by the API. + repeated string consumes = 6; + // A list of MIME types the API can produce. + repeated string produces = 7; + Paths paths = 8; + Definitions definitions = 9; + ParameterDefinitions parameters = 10; + ResponseDefinitions responses = 11; + repeated SecurityRequirement security = 12; + SecurityDefinitions security_definitions = 13; + repeated Tag tags = 14; + ExternalDocs external_docs = 15; + repeated NamedAny vendor_extension = 16; +} + +message Examples { + repeated NamedAny additional_properties = 1; +} + +// information about external documentation +message ExternalDocs { + string description = 1; + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// A deterministic version of a JSON Schema object. +message FileSchema { + string format = 1; + string title = 2; + string description = 3; + Any default = 4; + repeated string required = 5; + string type = 6; + bool read_only = 7; + ExternalDocs external_docs = 8; + Any example = 9; + repeated NamedAny vendor_extension = 10; +} + +message FormDataParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Header { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + string description = 18; + repeated NamedAny vendor_extension = 19; +} + +message HeaderParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +message Headers { + repeated NamedHeader additional_properties = 1; +} + +// General information about the API. +message Info { + // A unique and precise title of the API. + string title = 1; + // A semantic version number of the API. + string version = 2; + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + string description = 3; + // The terms of service for the API. + string terms_of_service = 4; + Contact contact = 5; + License license = 6; + repeated NamedAny vendor_extension = 7; +} + +message ItemsItem { + repeated Schema schema = 1; +} + +message JsonReference { + string _ref = 1; + string description = 2; +} + +message License { + // The name of the license type. It's encouraged to use an OSI compatible license. + string name = 1; + // The URL pointing to the license. + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +message NamedAny { + // Map key + string name = 1; + // Mapped value + Any value = 2; +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +message NamedHeader { + // Map key + string name = 1; + // Mapped value + Header value = 2; +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +message NamedParameter { + // Map key + string name = 1; + // Mapped value + Parameter value = 2; +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +message NamedPathItem { + // Map key + string name = 1; + // Mapped value + PathItem value = 2; +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +message NamedResponse { + // Map key + string name = 1; + // Mapped value + Response value = 2; +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +message NamedResponseValue { + // Map key + string name = 1; + // Mapped value + ResponseValue value = 2; +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +message NamedSchema { + // Map key + string name = 1; + // Mapped value + Schema value = 2; +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +message NamedSecurityDefinitionsItem { + // Map key + string name = 1; + // Mapped value + SecurityDefinitionsItem value = 2; +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +message NamedString { + // Map key + string name = 1; + // Mapped value + string value = 2; +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +message NamedStringArray { + // Map key + string name = 1; + // Mapped value + StringArray value = 2; +} + +message NonBodyParameter { + oneof oneof { + HeaderParameterSubSchema header_parameter_sub_schema = 1; + FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + QueryParameterSubSchema query_parameter_sub_schema = 3; + PathParameterSubSchema path_parameter_sub_schema = 4; + } +} + +message Oauth2AccessCodeSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string token_url = 5; + string description = 6; + repeated NamedAny vendor_extension = 7; +} + +message Oauth2ApplicationSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2ImplicitSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2PasswordSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2Scopes { + repeated NamedString additional_properties = 1; +} + +message Operation { + repeated string tags = 1; + // A brief summary of the operation. + string summary = 2; + // A longer description of the operation, GitHub Flavored Markdown is allowed. + string description = 3; + ExternalDocs external_docs = 4; + // A unique identifier of the operation. + string operation_id = 5; + // A list of MIME types the API can produce. + repeated string produces = 6; + // A list of MIME types the API can consume. + repeated string consumes = 7; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 8; + Responses responses = 9; + // The transfer protocol of the API. + repeated string schemes = 10; + bool deprecated = 11; + repeated SecurityRequirement security = 12; + repeated NamedAny vendor_extension = 13; +} + +message Parameter { + oneof oneof { + BodyParameter body_parameter = 1; + NonBodyParameter non_body_parameter = 2; + } +} + +// One or more JSON representations for parameters +message ParameterDefinitions { + repeated NamedParameter additional_properties = 1; +} + +message ParametersItem { + oneof oneof { + Parameter parameter = 1; + JsonReference json_reference = 2; + } +} + +message PathItem { + string _ref = 1; + Operation get = 2; + Operation put = 3; + Operation post = 4; + Operation delete = 5; + Operation options = 6; + Operation head = 7; + Operation patch = 8; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 9; + repeated NamedAny vendor_extension = 10; +} + +message PathParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +message Paths { + repeated NamedAny vendor_extension = 1; + repeated NamedPathItem path = 2; +} + +message PrimitivesItems { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + repeated NamedAny vendor_extension = 18; +} + +message Properties { + repeated NamedSchema additional_properties = 1; +} + +message QueryParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Response { + string description = 1; + SchemaItem schema = 2; + Headers headers = 3; + Examples examples = 4; + repeated NamedAny vendor_extension = 5; +} + +// One or more JSON representations for parameters +message ResponseDefinitions { + repeated NamedResponse additional_properties = 1; +} + +message ResponseValue { + oneof oneof { + Response response = 1; + JsonReference json_reference = 2; + } +} + +// Response objects names can either be any valid HTTP status code or 'default'. +message Responses { + repeated NamedResponseValue response_code = 1; + repeated NamedAny vendor_extension = 2; +} + +// A deterministic version of a JSON Schema object. +message Schema { + string _ref = 1; + string format = 2; + string title = 3; + string description = 4; + Any default = 5; + double multiple_of = 6; + double maximum = 7; + bool exclusive_maximum = 8; + double minimum = 9; + bool exclusive_minimum = 10; + int64 max_length = 11; + int64 min_length = 12; + string pattern = 13; + int64 max_items = 14; + int64 min_items = 15; + bool unique_items = 16; + int64 max_properties = 17; + int64 min_properties = 18; + repeated string required = 19; + repeated Any enum = 20; + AdditionalPropertiesItem additional_properties = 21; + TypeItem type = 22; + ItemsItem items = 23; + repeated Schema all_of = 24; + Properties properties = 25; + string discriminator = 26; + bool read_only = 27; + Xml xml = 28; + ExternalDocs external_docs = 29; + Any example = 30; + repeated NamedAny vendor_extension = 31; +} + +message SchemaItem { + oneof oneof { + Schema schema = 1; + FileSchema file_schema = 2; + } +} + +message SecurityDefinitions { + repeated NamedSecurityDefinitionsItem additional_properties = 1; +} + +message SecurityDefinitionsItem { + oneof oneof { + BasicAuthenticationSecurity basic_authentication_security = 1; + ApiKeySecurity api_key_security = 2; + Oauth2ImplicitSecurity oauth2_implicit_security = 3; + Oauth2PasswordSecurity oauth2_password_security = 4; + Oauth2ApplicationSecurity oauth2_application_security = 5; + Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + } +} + +message SecurityRequirement { + repeated NamedStringArray additional_properties = 1; +} + +message StringArray { + repeated string value = 1; +} + +message Tag { + string name = 1; + string description = 2; + ExternalDocs external_docs = 3; + repeated NamedAny vendor_extension = 4; +} + +message TypeItem { + repeated string value = 1; +} + +// Any property starting with x- is valid. +message VendorExtension { + repeated NamedAny additional_properties = 1; +} + +message Xml { + string name = 1; + string namespace = 2; + string prefix = 3; + bool attribute = 4; + bool wrapped = 5; + repeated NamedAny vendor_extension = 6; +} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md new file mode 100644 index 00000000..1131b6f1 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md @@ -0,0 +1,16 @@ +# OpenAPI v2 Protocol Buffer Models + +This directory contains a Protocol Buffer-language model +and related code for supporting OpenAPI v2. + +Gnostic applications and plugins can use OpenAPIv2.proto +to generate Protocol Buffer support code for their preferred languages. + +OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI +descriptions into the Protocol Buffer-based datastructures +generated from OpenAPIv2.proto. + +OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic +compiler generator, and OpenAPIv2.pb.go is generated by +protoc, the Protocol Buffer compiler, and protoc-gen-go, the +Protocol Buffer Go code generation plugin. diff --git a/vendor/github.com/googleapis/gnostic/README.md b/vendor/github.com/googleapis/gnostic/README.md new file mode 100644 index 00000000..98aaeaf5 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/README.md @@ -0,0 +1,103 @@ +[![Build Status](https://travis-ci.org/googleapis/gnostic.svg?branch=master)](https://travis-ci.org/googleapis/gnostic) + +# ⨁ gnostic + +This repository contains a Go command line tool which converts +JSON and YAML [OpenAPI](https://github.com/OAI/OpenAPI-Specification) +descriptions to and from equivalent Protocol Buffer representations. + +[Protocol Buffers](https://developers.google.com/protocol-buffers/) +provide a language-neutral, platform-neutral, extensible mechanism +for serializing structured data. +**gnostic**'s Protocol Buffer models for the OpenAPI Specification +can be used to generate code that includes data structures with +explicit fields for the elements of an OpenAPI description. +This makes it possible for developers to work with OpenAPI +descriptions in type-safe ways, which is particularly useful +in strongly-typed languages like Go and Swift. + +**gnostic** reads OpenAPI descriptions into +these generated data structures, reports errors, +resolves internal dependencies, and writes the results +in a binary form that can be used in any language that is +supported by the Protocol Buffer tools. +A plugin interface simplifies integration with API +tools written in a variety of different languages, +and when necessary, Protocol Buffer OpenAPI descriptions +can be reexported as JSON or YAML. + +**gnostic** compilation code and OpenAPI Protocol Buffer +models are automatically generated from an +[OpenAPI JSON Schema](https://github.com/OAI/OpenAPI-Specification/blob/master/schemas/v2.0/schema.json). +Source code for the generator is in the [generate-gnostic](generate-gnostic) directory. + +## Disclaimer + +This is prerelease software and work in progress. Feedback and +contributions are welcome, but we currently make no guarantees of +function or stability. + +## Requirements + +**gnostic** can be run in any environment that supports [Go](http://golang.org) +and the [Google Protocol Buffer Compiler](https://github.com/google/protobuf). + +## Installation + +1. Get this package by downloading it with `go get`. + + go get github.com/googleapis/gnostic + +2. [Optional] Build and run the compiler generator. +This uses the OpenAPI JSON schema to generate a Protocol Buffer language file +that describes the OpenAPI specification and a Go-language file of code that +will read a JSON or YAML OpenAPI representation into the generated protocol +buffers. Pre-generated versions of these files are in the OpenAPIv2 directory. + + cd $GOPATH/src/github.com/googleapis/gnostic/generate-gnostic + go install + cd .. + generate-gnostic --v2 + +3. [Optional] Generate Protocol Buffer support code. +A pre-generated version of this file is checked into the OpenAPIv2 directory. +This step requires a local installation of protoc, the Protocol Buffer Compiler. +You can get protoc [here](https://github.com/google/protobuf). + + ./COMPILE-PROTOS.sh + +4. [Optional] Rebuild **gnostic**. This is only necessary if you've performed steps +2 or 3 above. + + go install github.com/googleapis/gnostic + +5. Run **gnostic**. This will create a file in the current directory named "petstore.pb" that contains a binary +Protocol Buffer description of a sample API. + + gnostic --pb-out=. examples/petstore.json + +6. You can also compile files that you specify with a URL. Here's another way to compile the previous +example. This time we're creating "petstore.text", which contains a textual representation of the +Protocol Buffer description. This is mainly for use in testing and debugging. + + gnostic --text-out=petstore.text https://raw.githubusercontent.com/googleapis/gnostic/master/examples/petstore.json + +7. For a sample application, see apps/report. + + go install github.com/googleapis/gnostic/apps/report + report petstore.pb + +8. **gnostic** supports plugins. This builds and runs a sample plugin +that reports some basic information about an API. The "-" causes the plugin to +write its output to stdout. + + go install github.com/googleapis/gnostic/plugins/gnostic-go-sample + gnostic examples/petstore.json --go-sample-out=- + +## Copyright + +Copyright 2017, Google Inc. + +## License + +Released under the Apache 2.0 license. diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/googleapis/gnostic/compiler/README.md new file mode 100644 index 00000000..848b16c6 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/README.md @@ -0,0 +1,3 @@ +# Compiler support code + +This directory contains compiler support code used by Gnostic and Gnostic extensions. \ No newline at end of file diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go new file mode 100644 index 00000000..a64c1b75 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/context.go @@ -0,0 +1,43 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +// Context contains state of the compiler as it traverses a document. +type Context struct { + Parent *Context + Name string + ExtensionHandlers *[]ExtensionHandler +} + +// NewContextWithExtensions returns a new object representing the compiler state +func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { + return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers} +} + +// NewContext returns a new object representing the compiler state +func NewContext(name string, parent *Context) *Context { + if parent != nil { + return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} + } + return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} +} + +// Description returns a text description of the compiler state +func (context *Context) Description() string { + if context.Parent != nil { + return context.Parent.Description() + "." + context.Name + } + return context.Name +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go new file mode 100644 index 00000000..d8672c10 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/error.go @@ -0,0 +1,61 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +// Error represents compiler errors and their location in the document. +type Error struct { + Context *Context + Message string +} + +// NewError creates an Error. +func NewError(context *Context, message string) *Error { + return &Error{Context: context, Message: message} +} + +// Error returns the string value of an Error. +func (err *Error) Error() string { + if err.Context == nil { + return "ERROR " + err.Message + } + return "ERROR " + err.Context.Description() + " " + err.Message +} + +// ErrorGroup is a container for groups of Error values. +type ErrorGroup struct { + Errors []error +} + +// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty. +func NewErrorGroupOrNil(errors []error) error { + if len(errors) == 0 { + return nil + } else if len(errors) == 1 { + return errors[0] + } else { + return &ErrorGroup{Errors: errors} + } +} + +func (group *ErrorGroup) Error() string { + result := "" + for i, err := range group.Errors { + if i > 0 { + result += "\n" + } + result += err.Error() + } + return result +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go new file mode 100644 index 00000000..1f85b650 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go @@ -0,0 +1,101 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "bytes" + "fmt" + "os/exec" + + "strings" + + "errors" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + ext_plugin "github.com/googleapis/gnostic/extensions" + yaml "gopkg.in/yaml.v2" +) + +// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. +type ExtensionHandler struct { + Name string +} + +// HandleExtension calls a binary extension handler. +func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) { + handled := false + var errFromPlugin error + var outFromPlugin *any.Any + + if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 { + for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) { + outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName) + if outFromPlugin == nil { + continue + } else { + handled = true + break + } + } + } + return handled, outFromPlugin, errFromPlugin +} + +func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) { + if extensionHandlers.Name != "" { + binary, _ := yaml.Marshal(in) + + request := &ext_plugin.ExtensionHandlerRequest{} + + version := &ext_plugin.Version{} + version.Major = 0 + version.Minor = 1 + version.Patch = 0 + request.CompilerVersion = version + + request.Wrapper = &ext_plugin.Wrapper{} + + request.Wrapper.Version = "v2" + request.Wrapper.Yaml = string(binary) + request.Wrapper.ExtensionName = extensionName + + requestBytes, _ := proto.Marshal(request) + cmd := exec.Command(extensionHandlers.Name) + cmd.Stdin = bytes.NewReader(requestBytes) + output, err := cmd.Output() + + if err != nil { + fmt.Printf("Error: %+v\n", err) + return nil, err + } + response := &ext_plugin.ExtensionHandlerResponse{} + err = proto.Unmarshal(output, response) + if err != nil { + fmt.Printf("Error: %+v\n", err) + fmt.Printf("%s\n", string(output)) + return nil, err + } + if !response.Handled { + return nil, nil + } + if len(response.Error) != 0 { + message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ",")) + return nil, errors.New(message) + } + return response.Value, nil + } + return nil, nil +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go new file mode 100644 index 00000000..76df635f --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/helpers.go @@ -0,0 +1,197 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "fmt" + "gopkg.in/yaml.v2" + "regexp" + "sort" + "strconv" +) + +// compiler helper functions, usually called from generated code + +// UnpackMap gets a yaml.MapSlice if possible. +func UnpackMap(in interface{}) (yaml.MapSlice, bool) { + m, ok := in.(yaml.MapSlice) + if ok { + return m, true + } + // do we have an empty array? + a, ok := in.([]interface{}) + if ok && len(a) == 0 { + // if so, return an empty map + return yaml.MapSlice{}, true + } + return nil, false +} + +// SortedKeysForMap returns the sorted keys of a yaml.MapSlice. +func SortedKeysForMap(m yaml.MapSlice) []string { + keys := make([]string, 0) + for _, item := range m { + keys = append(keys, item.Key.(string)) + } + sort.Strings(keys) + return keys +} + +// MapHasKey returns true if a yaml.MapSlice contains a specified key. +func MapHasKey(m yaml.MapSlice, key string) bool { + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok && key == itemKey { + return true + } + } + return false +} + +// MapValueForKey gets the value of a map value for a specified key. +func MapValueForKey(m yaml.MapSlice, key string) interface{} { + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok && key == itemKey { + return item.Value + } + } + return nil +} + +// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible. +func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { + stringArray := make([]string, 0) + for _, item := range interfaceArray { + v, ok := item.(string) + if ok { + stringArray = append(stringArray, v) + } + } + return stringArray +} + +// MissingKeysInMap identifies which keys from a list of required keys are not in a map. +func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string { + missingKeys := make([]string, 0) + for _, k := range requiredKeys { + if !MapHasKey(m, k) { + missingKeys = append(missingKeys, k) + } + } + return missingKeys +} + +// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. +func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { + invalidKeys := make([]string, 0) + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok { + key := itemKey + found := false + // does the key match an allowed key? + for _, allowedKey := range allowedKeys { + if key == allowedKey { + found = true + break + } + } + if !found { + // does the key match an allowed pattern? + for _, allowedPattern := range allowedPatterns { + if allowedPattern.MatchString(key) { + found = true + break + } + } + if !found { + invalidKeys = append(invalidKeys, key) + } + } + } + } + return invalidKeys +} + +// DescribeMap describes a map (for debugging purposes). +func DescribeMap(in interface{}, indent string) string { + description := "" + m, ok := in.(map[string]interface{}) + if ok { + keys := make([]string, 0) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := m[k] + description += fmt.Sprintf("%s%s:\n", indent, k) + description += DescribeMap(v, indent+" ") + } + return description + } + a, ok := in.([]interface{}) + if ok { + for i, v := range a { + description += fmt.Sprintf("%s%d:\n", indent, i) + description += DescribeMap(v, indent+" ") + } + return description + } + description += fmt.Sprintf("%s%+v\n", indent, in) + return description +} + +// PluralProperties returns the string "properties" pluralized. +func PluralProperties(count int) string { + if count == 1 { + return "property" + } + return "properties" +} + +// StringArrayContainsValue returns true if a string array contains a specified value. +func StringArrayContainsValue(array []string, value string) bool { + for _, item := range array { + if item == value { + return true + } + } + return false +} + +// StringArrayContainsValues returns true if a string array contains all of a list of specified values. +func StringArrayContainsValues(array []string, values []string) bool { + for _, value := range values { + if !StringArrayContainsValue(array, value) { + return false + } + } + return true +} + +// StringValue returns the string value of an item. +func StringValue(item interface{}) (value string, ok bool) { + value, ok = item.(string) + if ok { + return value, ok + } + intValue, ok := item.(int) + if ok { + return strconv.Itoa(intValue), true + } + return "", false +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go new file mode 100644 index 00000000..9713a21c --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/main.go @@ -0,0 +1,16 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compiler provides support functions to generated compiler code. +package compiler diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go new file mode 100644 index 00000000..604a46a6 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -0,0 +1,167 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "fmt" + "gopkg.in/yaml.v2" + "io/ioutil" + "log" + "net/http" + "net/url" + "path/filepath" + "strings" +) + +var fileCache map[string][]byte +var infoCache map[string]interface{} +var count int64 + +var verboseReader = false + +func initializeFileCache() { + if fileCache == nil { + fileCache = make(map[string][]byte, 0) + } +} + +func initializeInfoCache() { + if infoCache == nil { + infoCache = make(map[string]interface{}, 0) + } +} + +// FetchFile gets a specified file from the local filesystem or a remote location. +func FetchFile(fileurl string) ([]byte, error) { + initializeFileCache() + bytes, ok := fileCache[fileurl] + if ok { + if verboseReader { + log.Printf("Cache hit %s", fileurl) + } + return bytes, nil + } + log.Printf("Fetching %s", fileurl) + response, err := http.Get(fileurl) + if err != nil { + return nil, err + } + defer response.Body.Close() + bytes, err = ioutil.ReadAll(response.Body) + if err == nil { + fileCache[fileurl] = bytes + } + return bytes, err +} + +// ReadBytesForFile reads the bytes of a file. +func ReadBytesForFile(filename string) ([]byte, error) { + // is the filename a url? + fileurl, _ := url.Parse(filename) + if fileurl.Scheme != "" { + // yes, fetch it + bytes, err := FetchFile(filename) + if err != nil { + return nil, err + } + return bytes, nil + } + // no, it's a local filename + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return bytes, nil +} + +// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice. +func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { + initializeInfoCache() + cachedInfo, ok := infoCache[filename] + if ok { + if verboseReader { + log.Printf("Cache hit info for file %s", filename) + } + return cachedInfo, nil + } + if verboseReader { + log.Printf("Reading info for file %s", filename) + } + var info yaml.MapSlice + err := yaml.Unmarshal(bytes, &info) + if err != nil { + return nil, err + } + infoCache[filename] = info + return info, nil +} + +// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. +func ReadInfoForRef(basefile string, ref string) (interface{}, error) { + initializeInfoCache() + { + info, ok := infoCache[ref] + if ok { + if verboseReader { + log.Printf("Cache hit for ref %s#%s", basefile, ref) + } + return info, nil + } + } + if verboseReader { + log.Printf("Reading info for ref %s#%s", basefile, ref) + } + count = count + 1 + basedir, _ := filepath.Split(basefile) + parts := strings.Split(ref, "#") + var filename string + if parts[0] != "" { + filename = basedir + parts[0] + } else { + filename = basefile + } + bytes, err := ReadBytesForFile(filename) + if err != nil { + return nil, err + } + info, err := ReadInfoFromBytes(filename, bytes) + if err != nil { + log.Printf("File error: %v\n", err) + } else { + if len(parts) > 1 { + path := strings.Split(parts[1], "/") + for i, key := range path { + if i > 0 { + m, ok := info.(yaml.MapSlice) + if ok { + found := false + for _, section := range m { + if section.Key == key { + info = section.Value + found = true + } + } + if !found { + infoCache[ref] = nil + return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) + } + } + } + } + } + } + infoCache[ref] = info + return info, nil +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/googleapis/gnostic/extensions/README.md new file mode 100644 index 00000000..ff1c2eb1 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/README.md @@ -0,0 +1,5 @@ +# Extensions + +This directory contains support code for building Gnostic extensions and associated examples. + +Extensions are used to compile vendor or specification extensions into protocol buffer structures. diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go new file mode 100644 index 00000000..b14f1f94 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -0,0 +1,219 @@ +// Code generated by protoc-gen-go. +// source: extension.proto +// DO NOT EDIT! + +/* +Package openapiextension_v1 is a generated protocol buffer package. + +It is generated from these files: + extension.proto + +It has these top-level messages: + Version + ExtensionHandlerRequest + ExtensionHandlerResponse + Wrapper +*/ +package openapiextension_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The version number of OpenAPI compiler. +type Version struct { + Major int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Version) GetMajor() int32 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *Version) GetMinor() int32 { + if m != nil { + return m.Minor + } + return 0 +} + +func (m *Version) GetPatch() int32 { + if m != nil { + return m.Patch + } + return 0 +} + +func (m *Version) GetSuffix() string { + if m != nil { + return m.Suffix + } + return "" +} + +// An encoded Request is written to the ExtensionHandler's stdin. +type ExtensionHandlerRequest struct { + // The OpenAPI descriptions that were explicitly listed on the command line. + // The specifications will appear in the order they are specified to openapic. + Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"` + // The version number of openapi compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` +} + +func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} } +func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerRequest) ProtoMessage() {} +func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper { + if m != nil { + return m.Wrapper + } + return nil +} + +func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version { + if m != nil { + return m.CompilerVersion + } + return nil +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +type ExtensionHandlerResponse struct { + // true if the extension is handled by the extension handler; false otherwise + Handled bool `protobuf:"varint,1,opt,name=handled" json:"handled,omitempty"` + // Error message. If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + Error []string `protobuf:"bytes,2,rep,name=error" json:"error,omitempty"` + // text output + Value *google_protobuf.Any `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` +} + +func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } +func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerResponse) ProtoMessage() {} +func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ExtensionHandlerResponse) GetHandled() bool { + if m != nil { + return m.Handled + } + return false +} + +func (m *ExtensionHandlerResponse) GetError() []string { + if m != nil { + return m.Error + } + return nil +} + +func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any { + if m != nil { + return m.Value + } + return nil +} + +type Wrapper struct { + // version of the OpenAPI specification in which this extension was written. + Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // Name of the extension + ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName" json:"extension_name,omitempty"` + // Must be a valid yaml for the proto + Yaml string `protobuf:"bytes,3,opt,name=yaml" json:"yaml,omitempty"` +} + +func (m *Wrapper) Reset() { *m = Wrapper{} } +func (m *Wrapper) String() string { return proto.CompactTextString(m) } +func (*Wrapper) ProtoMessage() {} +func (*Wrapper) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Wrapper) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Wrapper) GetExtensionName() string { + if m != nil { + return m.ExtensionName + } + return "" +} + +func (m *Wrapper) GetYaml() string { + if m != nil { + return m.Yaml + } + return "" +} + +func init() { + proto.RegisterType((*Version)(nil), "openapiextension.v1.Version") + proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest") + proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse") + proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") +} + +func init() { proto.RegisterFile("extension.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 355 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xf3, 0x40, + 0x1c, 0xc4, 0x49, 0xdf, 0xf2, 0x64, 0x1f, 0xb4, 0xb2, 0x16, 0x8d, 0xe2, 0xa1, 0x04, 0x84, 0x22, + 0xb8, 0xa5, 0x0a, 0xde, 0x5b, 0x28, 0xea, 0xc5, 0x96, 0x3d, 0xd4, 0x9b, 0x65, 0x9b, 0xfe, 0xdb, + 0x46, 0x92, 0xdd, 0x75, 0xf3, 0x62, 0xfb, 0x55, 0x3c, 0xfa, 0x49, 0x25, 0xbb, 0xd9, 0x7a, 0x50, + 0x6f, 0x99, 0x1f, 0x93, 0xfc, 0x67, 0x26, 0xa8, 0x0d, 0xdb, 0x0c, 0x78, 0x1a, 0x09, 0x4e, 0xa4, + 0x12, 0x99, 0xc0, 0xc7, 0x42, 0x02, 0x67, 0x32, 0xfa, 0xe6, 0xc5, 0xe0, 0xfc, 0x6c, 0x2d, 0xc4, + 0x3a, 0x86, 0xbe, 0xb6, 0x2c, 0xf2, 0x55, 0x9f, 0xf1, 0x9d, 0xf1, 0x07, 0x21, 0x72, 0x67, 0xa0, + 0x4a, 0x23, 0xee, 0xa0, 0x66, 0xc2, 0x5e, 0x85, 0xf2, 0x9d, 0xae, 0xd3, 0x6b, 0x52, 0x23, 0x34, + 0x8d, 0xb8, 0x50, 0x7e, 0xad, 0xa2, 0xa5, 0x28, 0xa9, 0x64, 0x59, 0xb8, 0xf1, 0xeb, 0x86, 0x6a, + 0x81, 0x4f, 0x50, 0x2b, 0xcd, 0x57, 0xab, 0x68, 0xeb, 0x37, 0xba, 0x4e, 0xcf, 0xa3, 0x95, 0x0a, + 0x3e, 0x1c, 0x74, 0x3a, 0xb6, 0x81, 0x1e, 0x18, 0x5f, 0xc6, 0xa0, 0x28, 0xbc, 0xe5, 0x90, 0x66, + 0xf8, 0x0e, 0xb9, 0xef, 0x8a, 0x49, 0x09, 0xe6, 0xee, 0xff, 0x9b, 0x0b, 0xf2, 0x4b, 0x05, 0xf2, + 0x6c, 0x3c, 0xd4, 0x9a, 0xf1, 0x3d, 0x3a, 0x0a, 0x45, 0x22, 0xa3, 0x18, 0xd4, 0xbc, 0x30, 0x0d, + 0x74, 0x98, 0xbf, 0x3e, 0x50, 0xb5, 0xa4, 0x6d, 0xfb, 0x56, 0x05, 0x82, 0x02, 0xf9, 0x3f, 0xb3, + 0xa5, 0x52, 0xf0, 0x14, 0xb0, 0x8f, 0xdc, 0x8d, 0x46, 0x4b, 0x1d, 0xee, 0x1f, 0xb5, 0xb2, 0x1c, + 0x00, 0x94, 0xd2, 0xb3, 0xd4, 0x7b, 0x1e, 0x35, 0x02, 0x5f, 0xa1, 0x66, 0xc1, 0xe2, 0x1c, 0xaa, + 0x24, 0x1d, 0x62, 0x86, 0x27, 0x76, 0x78, 0x32, 0xe4, 0x3b, 0x6a, 0x2c, 0xc1, 0x0b, 0x72, 0xab, + 0x52, 0xe5, 0x19, 0x5b, 0xc1, 0xd1, 0xc3, 0x59, 0x89, 0x2f, 0xd1, 0xe1, 0xbe, 0xc5, 0x9c, 0xb3, + 0x04, 0xf4, 0x6f, 0xf0, 0xe8, 0xc1, 0x9e, 0x3e, 0xb1, 0x04, 0x30, 0x46, 0x8d, 0x1d, 0x4b, 0x62, + 0x7d, 0xd6, 0xa3, 0xfa, 0x79, 0x74, 0x8d, 0xda, 0x42, 0xad, 0xed, 0x16, 0x21, 0x29, 0x06, 0x23, + 0x3c, 0x91, 0xc0, 0x87, 0xd3, 0xc7, 0x7d, 0xdf, 0xd9, 0x60, 0xea, 0x7c, 0xd6, 0xea, 0x93, 0xe1, + 0x78, 0xd1, 0xd2, 0x19, 0x6f, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x56, 0x40, 0x4d, 0x52, + 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto new file mode 100644 index 00000000..806760a1 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.proto @@ -0,0 +1,93 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +import "google/protobuf/any.proto"; +package openapiextension.v1; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIExtensionV1"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapic.v1"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +// +option objc_class_prefix = "OAE"; // "OpenAPI Extension" + +// The version number of OpenAPI compiler. +message Version { + int32 major = 1; + int32 minor = 2; + int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + string suffix = 4; +} + +// An encoded Request is written to the ExtensionHandler's stdin. +message ExtensionHandlerRequest { + + // The OpenAPI descriptions that were explicitly listed on the command line. + // The specifications will appear in the order they are specified to openapic. + Wrapper wrapper = 1; + + // The version number of openapi compiler. + Version compiler_version = 3; +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +message ExtensionHandlerResponse { + + // true if the extension is handled by the extension handler; false otherwise + bool handled = 1; + + // Error message. If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + repeated string error = 2; + + // text output + google.protobuf.Any value = 3; +} + +message Wrapper { + // version of the OpenAPI specification in which this extension was written. + string version = 1; + + // Name of the extension + string extension_name = 2; + + // Must be a valid yaml for the proto + string yaml = 3; +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go new file mode 100644 index 00000000..94a8e62a --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extensions.go @@ -0,0 +1,82 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openapiextension_v1 + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" +) + +type documentHandler func(version string, extensionName string, document string) +type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) + +func forInputYamlFromOpenapic(handler documentHandler) { + data, err := ioutil.ReadAll(os.Stdin) + if err != nil { + fmt.Println("File error:", err.Error()) + os.Exit(1) + } + if len(data) == 0 { + fmt.Println("No input data.") + os.Exit(1) + } + request := &ExtensionHandlerRequest{} + err = proto.Unmarshal(data, request) + if err != nil { + fmt.Println("Input error:", err.Error()) + os.Exit(1) + } + handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml) +} + +// ProcessExtension calles the handler for a specified extension. +func ProcessExtension(handleExtension extensionHandler) { + response := &ExtensionHandlerResponse{} + forInputYamlFromOpenapic( + func(version string, extensionName string, yamlInput string) { + var newObject proto.Message + var err error + + handled, newObject, err := handleExtension(extensionName, yamlInput) + if !handled { + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + + // If we reach here, then the extension is handled + response.Handled = true + if err != nil { + response.Error = append(response.Error, err.Error()) + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + response.Value, err = ptypes.MarshalAny(newObject) + if err != nil { + response.Error = append(response.Error, err.Error()) + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + }) + + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) +} diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt new file mode 100644 index 00000000..81316beb --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/LICENSE.txt @@ -0,0 +1,7 @@ +Copyright © 2012 Greg Jones (greg.jones@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md new file mode 100644 index 00000000..61bd830e --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/README.md @@ -0,0 +1,24 @@ +httpcache +========= + +[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache) + +Package httpcache provides a http.RoundTripper implementation that works as a mostly RFC-compliant cache for http responses. + +It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). + +Cache Backends +-------------- + +- The built-in 'memory' cache stores responses in an in-memory map. +- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library. +- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers. +- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage. +- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb). +- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries. +- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. + +License +------- + +- [MIT License](LICENSE.txt) diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go new file mode 100644 index 00000000..42e3129d --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go @@ -0,0 +1,61 @@ +// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package +// to supplement an in-memory map with persistent storage +// +package diskcache + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "github.com/peterbourgon/diskv" + "io" +) + +// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage +type Cache struct { + d *diskv.Diskv +} + +// Get returns the response corresponding to key if present +func (c *Cache) Get(key string) (resp []byte, ok bool) { + key = keyToFilename(key) + resp, err := c.d.Read(key) + if err != nil { + return []byte{}, false + } + return resp, true +} + +// Set saves a response to the cache as key +func (c *Cache) Set(key string, resp []byte) { + key = keyToFilename(key) + c.d.WriteStream(key, bytes.NewReader(resp), true) +} + +// Delete removes the response with key from the cache +func (c *Cache) Delete(key string) { + key = keyToFilename(key) + c.d.Erase(key) +} + +func keyToFilename(key string) string { + h := md5.New() + io.WriteString(h, key) + return hex.EncodeToString(h.Sum(nil)) +} + +// New returns a new Cache that will store files in basePath +func New(basePath string) *Cache { + return &Cache{ + d: diskv.New(diskv.Options{ + BasePath: basePath, + CacheSizeMax: 100 * 1024 * 1024, // 100MB + }), + } +} + +// NewWithDiskv returns a new Cache using the provided Diskv as underlying +// storage. +func NewWithDiskv(d *diskv.Diskv) *Cache { + return &Cache{d} +} diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go new file mode 100644 index 00000000..8239edc2 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/httpcache.go @@ -0,0 +1,553 @@ +// Package httpcache provides a http.RoundTripper implementation that works as a +// mostly RFC-compliant cache for http responses. +// +// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client +// and not for a shared proxy). +// +package httpcache + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httputil" + "strings" + "sync" + "time" +) + +const ( + stale = iota + fresh + transparent + // XFromCache is the header added to responses that are returned from the cache + XFromCache = "X-From-Cache" +) + +// A Cache interface is used by the Transport to store and retrieve responses. +type Cache interface { + // Get returns the []byte representation of a cached response and a bool + // set to true if the value isn't empty + Get(key string) (responseBytes []byte, ok bool) + // Set stores the []byte representation of a response against a key + Set(key string, responseBytes []byte) + // Delete removes the value associated with the key + Delete(key string) +} + +// cacheKey returns the cache key for req. +func cacheKey(req *http.Request) string { + return req.URL.String() +} + +// CachedResponse returns the cached http.Response for req if present, and nil +// otherwise. +func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) { + cachedVal, ok := c.Get(cacheKey(req)) + if !ok { + return + } + + b := bytes.NewBuffer(cachedVal) + return http.ReadResponse(bufio.NewReader(b), req) +} + +// MemoryCache is an implemtation of Cache that stores responses in an in-memory map. +type MemoryCache struct { + mu sync.RWMutex + items map[string][]byte +} + +// Get returns the []byte representation of the response and true if present, false if not +func (c *MemoryCache) Get(key string) (resp []byte, ok bool) { + c.mu.RLock() + resp, ok = c.items[key] + c.mu.RUnlock() + return resp, ok +} + +// Set saves response resp to the cache with key +func (c *MemoryCache) Set(key string, resp []byte) { + c.mu.Lock() + c.items[key] = resp + c.mu.Unlock() +} + +// Delete removes key from the cache +func (c *MemoryCache) Delete(key string) { + c.mu.Lock() + delete(c.items, key) + c.mu.Unlock() +} + +// NewMemoryCache returns a new Cache that will store items in an in-memory map +func NewMemoryCache() *MemoryCache { + c := &MemoryCache{items: map[string][]byte{}} + return c +} + +// Transport is an implementation of http.RoundTripper that will return values from a cache +// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since) +// to repeated requests allowing servers to return 304 / Not Modified +type Transport struct { + // The RoundTripper interface actually used to make requests + // If nil, http.DefaultTransport is used + Transport http.RoundTripper + Cache Cache + // If true, responses returned from the cache will be given an extra header, X-From-Cache + MarkCachedResponses bool +} + +// NewTransport returns a new Transport with the +// provided Cache implementation and MarkCachedResponses set to true +func NewTransport(c Cache) *Transport { + return &Transport{Cache: c, MarkCachedResponses: true} +} + +// Client returns an *http.Client that caches responses. +func (t *Transport) Client() *http.Client { + return &http.Client{Transport: t} +} + +// varyMatches will return false unless all of the cached values for the headers listed in Vary +// match the new request +func varyMatches(cachedResp *http.Response, req *http.Request) bool { + for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") { + header = http.CanonicalHeaderKey(header) + if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) { + return false + } + } + return true +} + +// RoundTrip takes a Request and returns a Response +// +// If there is a fresh Response already in cache, then it will be returned without connecting to +// the server. +// +// If there is a stale Response, then any validators it contains will be set on the new request +// to give the server a chance to respond with NotModified. If this happens, then the cached Response +// will be returned. +func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + cacheKey := cacheKey(req) + cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == "" + var cachedResp *http.Response + if cacheable { + cachedResp, err = CachedResponse(t.Cache, req) + } else { + // Need to invalidate an existing value + t.Cache.Delete(cacheKey) + } + + transport := t.Transport + if transport == nil { + transport = http.DefaultTransport + } + + if cacheable && cachedResp != nil && err == nil { + if t.MarkCachedResponses { + cachedResp.Header.Set(XFromCache, "1") + } + + if varyMatches(cachedResp, req) { + // Can only use cached value if the new request doesn't Vary significantly + freshness := getFreshness(cachedResp.Header, req.Header) + if freshness == fresh { + return cachedResp, nil + } + + if freshness == stale { + var req2 *http.Request + // Add validators if caller hasn't already done so + etag := cachedResp.Header.Get("etag") + if etag != "" && req.Header.Get("etag") == "" { + req2 = cloneRequest(req) + req2.Header.Set("if-none-match", etag) + } + lastModified := cachedResp.Header.Get("last-modified") + if lastModified != "" && req.Header.Get("last-modified") == "" { + if req2 == nil { + req2 = cloneRequest(req) + } + req2.Header.Set("if-modified-since", lastModified) + } + if req2 != nil { + req = req2 + } + } + } + + resp, err = transport.RoundTrip(req) + if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified { + // Replace the 304 response with the one from cache, but update with some new headers + endToEndHeaders := getEndToEndHeaders(resp.Header) + for _, header := range endToEndHeaders { + cachedResp.Header[header] = resp.Header[header] + } + cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK)) + cachedResp.StatusCode = http.StatusOK + + resp = cachedResp + } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && + req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { + // In case of transport failure and stale-if-error activated, returns cached content + // when available + cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK)) + cachedResp.StatusCode = http.StatusOK + return cachedResp, nil + } else { + if err != nil || resp.StatusCode != http.StatusOK { + t.Cache.Delete(cacheKey) + } + if err != nil { + return nil, err + } + } + } else { + reqCacheControl := parseCacheControl(req.Header) + if _, ok := reqCacheControl["only-if-cached"]; ok { + resp = newGatewayTimeoutResponse(req) + } else { + resp, err = transport.RoundTrip(req) + if err != nil { + return nil, err + } + } + } + + if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) { + for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") { + varyKey = http.CanonicalHeaderKey(varyKey) + fakeHeader := "X-Varied-" + varyKey + reqValue := req.Header.Get(varyKey) + if reqValue != "" { + resp.Header.Set(fakeHeader, reqValue) + } + } + switch req.Method { + case "GET": + // Delay caching until EOF is reached. + resp.Body = &cachingReadCloser{ + R: resp.Body, + OnEOF: func(r io.Reader) { + resp := *resp + resp.Body = ioutil.NopCloser(r) + respBytes, err := httputil.DumpResponse(&resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + }, + } + default: + respBytes, err := httputil.DumpResponse(resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + } + } else { + t.Cache.Delete(cacheKey) + } + return resp, nil +} + +// ErrNoDateHeader indicates that the HTTP headers contained no Date header. +var ErrNoDateHeader = errors.New("no Date header") + +// Date parses and returns the value of the Date header. +func Date(respHeaders http.Header) (date time.Time, err error) { + dateHeader := respHeaders.Get("date") + if dateHeader == "" { + err = ErrNoDateHeader + return + } + + return time.Parse(time.RFC1123, dateHeader) +} + +type realClock struct{} + +func (c *realClock) since(d time.Time) time.Duration { + return time.Since(d) +} + +type timer interface { + since(d time.Time) time.Duration +} + +var clock timer = &realClock{} + +// getFreshness will return one of fresh/stale/transparent based on the cache-control +// values of the request and the response +// +// fresh indicates the response can be returned +// stale indicates that the response needs validating before it is returned +// transparent indicates the response should not be used to fulfil the request +// +// Because this is only a private cache, 'public' and 'private' in cache-control aren't +// signficant. Similarly, smax-age isn't used. +func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + if _, ok := reqCacheControl["no-cache"]; ok { + return transparent + } + if _, ok := respCacheControl["no-cache"]; ok { + return stale + } + if _, ok := reqCacheControl["only-if-cached"]; ok { + return fresh + } + + date, err := Date(respHeaders) + if err != nil { + return stale + } + currentAge := clock.since(date) + + var lifetime time.Duration + var zeroDuration time.Duration + + // If a response includes both an Expires header and a max-age directive, + // the max-age directive overrides the Expires header, even if the Expires header is more restrictive. + if maxAge, ok := respCacheControl["max-age"]; ok { + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } else { + expiresHeader := respHeaders.Get("Expires") + if expiresHeader != "" { + expires, err := time.Parse(time.RFC1123, expiresHeader) + if err != nil { + lifetime = zeroDuration + } else { + lifetime = expires.Sub(date) + } + } + } + + if maxAge, ok := reqCacheControl["max-age"]; ok { + // the client is willing to accept a response whose age is no greater than the specified time in seconds + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } + if minfresh, ok := reqCacheControl["min-fresh"]; ok { + // the client wants a response that will still be fresh for at least the specified number of seconds. + minfreshDuration, err := time.ParseDuration(minfresh + "s") + if err == nil { + currentAge = time.Duration(currentAge + minfreshDuration) + } + } + + if maxstale, ok := reqCacheControl["max-stale"]; ok { + // Indicates that the client is willing to accept a response that has exceeded its expiration time. + // If max-stale is assigned a value, then the client is willing to accept a response that has exceeded + // its expiration time by no more than the specified number of seconds. + // If no value is assigned to max-stale, then the client is willing to accept a stale response of any age. + // + // Responses served only because of a max-stale value are supposed to have a Warning header added to them, + // but that seems like a hassle, and is it actually useful? If so, then there needs to be a different + // return-value available here. + if maxstale == "" { + return fresh + } + maxstaleDuration, err := time.ParseDuration(maxstale + "s") + if err == nil { + currentAge = time.Duration(currentAge - maxstaleDuration) + } + } + + if lifetime > currentAge { + return fresh + } + + return stale +} + +// Returns true if either the request or the response includes the stale-if-error +// cache control extension: https://tools.ietf.org/html/rfc5861 +func canStaleOnError(respHeaders, reqHeaders http.Header) bool { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + + var err error + lifetime := time.Duration(-1) + + if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + + if lifetime >= 0 { + date, err := Date(respHeaders) + if err != nil { + return false + } + currentAge := clock.since(date) + if lifetime > currentAge { + return true + } + } + + return false +} + +func getEndToEndHeaders(respHeaders http.Header) []string { + // These headers are always hop-by-hop + hopByHopHeaders := map[string]struct{}{ + "Connection": struct{}{}, + "Keep-Alive": struct{}{}, + "Proxy-Authenticate": struct{}{}, + "Proxy-Authorization": struct{}{}, + "Te": struct{}{}, + "Trailers": struct{}{}, + "Transfer-Encoding": struct{}{}, + "Upgrade": struct{}{}, + } + + for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { + // any header listed in connection, if present, is also considered hop-by-hop + if strings.Trim(extra, " ") != "" { + hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{} + } + } + endToEndHeaders := []string{} + for respHeader, _ := range respHeaders { + if _, ok := hopByHopHeaders[respHeader]; !ok { + endToEndHeaders = append(endToEndHeaders, respHeader) + } + } + return endToEndHeaders +} + +func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) { + if _, ok := respCacheControl["no-store"]; ok { + return false + } + if _, ok := reqCacheControl["no-store"]; ok { + return false + } + return true +} + +func newGatewayTimeoutResponse(req *http.Request) *http.Response { + var braw bytes.Buffer + braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n") + resp, err := http.ReadResponse(bufio.NewReader(&braw), req) + if err != nil { + panic(err) + } + return resp +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +// (This function copyright goauth2 authors: https://code.google.com/p/goauth2) +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +type cacheControl map[string]string + +func parseCacheControl(headers http.Header) cacheControl { + cc := cacheControl{} + ccHeader := headers.Get("Cache-Control") + for _, part := range strings.Split(ccHeader, ",") { + part = strings.Trim(part, " ") + if part == "" { + continue + } + if strings.ContainsRune(part, '=') { + keyval := strings.Split(part, "=") + cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") + } else { + cc[part] = "" + } + } + return cc +} + +// headerAllCommaSepValues returns all comma-separated values (each +// with whitespace trimmed) for header name in headers. According to +// Section 4.2 of the HTTP/1.1 spec +// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), +// values from multiple occurrences of a header should be concatenated, if +// the header's value is a comma-separated list. +func headerAllCommaSepValues(headers http.Header, name string) []string { + var vals []string + for _, val := range headers[http.CanonicalHeaderKey(name)] { + fields := strings.Split(val, ",") + for i, f := range fields { + fields[i] = strings.TrimSpace(f) + } + vals = append(vals, fields...) + } + return vals +} + +// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF +// handler with a full copy of the content read from R when EOF is +// reached. +type cachingReadCloser struct { + // Underlying ReadCloser. + R io.ReadCloser + // OnEOF is called with a copy of the content of R when EOF is reached. + OnEOF func(io.Reader) + + buf bytes.Buffer // buf stores a copy of the content of R. +} + +// Read reads the next len(p) bytes from R or until R is drained. The +// return value n is the number of bytes read. If R has no data to +// return, err is io.EOF and OnEOF is called with a full copy of what +// has been read so far. +func (r *cachingReadCloser) Read(p []byte) (n int, err error) { + n, err = r.R.Read(p) + r.buf.Write(p[:n]) + if err == io.EOF { + r.OnEOF(bytes.NewReader(r.buf.Bytes())) + } + return n, err +} + +func (r *cachingReadCloser) Close() error { + return r.R.Close() +} + +// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation +func NewMemoryCacheTransport() *Transport { + c := NewMemoryCache() + t := NewTransport(c) + return t +} diff --git a/vendor/github.com/ulule/deepcopier/LICENSE b/vendor/github.com/json-iterator/go/LICENSE similarity index 95% rename from vendor/github.com/ulule/deepcopier/LICENSE rename to vendor/github.com/json-iterator/go/LICENSE index d5c4ea02..2cf4f5ab 100644 --- a/vendor/github.com/ulule/deepcopier/LICENSE +++ b/vendor/github.com/json-iterator/go/LICENSE @@ -1,6 +1,6 @@ -The MIT License (MIT) +MIT License -Copyright (c) 2015 Ulule +Copyright (c) 2016 json-iterator Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -19,4 +19,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md new file mode 100644 index 00000000..eca7ab97 --- /dev/null +++ b/vendor/github.com/json-iterator/go/README.md @@ -0,0 +1,80 @@ +[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go) +[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) +[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) +[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) +[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) + +A high-performance 100% compatible drop-in replacement of "encoding/json" + +``` +Go开发者们请加入我们,滴滴出行平台技术部 taowen@didichuxing.com +``` + +# Benchmark + +![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) + +Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go + +Raw Result (easyjson requires static code generation) + +| | ns/op | allocation bytes | allocation times | +| --- | --- | --- | --- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | + +# Usage + +100% compatibility with standard lib + +Replace + +```go +import "encoding/json" +json.Marshal(&data) +``` + +with + +```go +import "github.com/json-iterator/go" +jsoniter.Marshal(&data) +``` + +Replace + +```go +import "encoding/json" +json.Unmarshal(input, &data) +``` + +with + +```go +import "github.com/json-iterator/go" +jsoniter.Unmarshal(input, &data) +``` + +[More documentation](http://jsoniter.com/migrate-from-go-std.html) + +# How to get + +``` +go get github.com/json-iterator/go +``` + +# Contribution Welcomed ! + +Contributors + +* [thockin](https://github.com/thockin) +* [mattn](https://github.com/mattn) +* [cch123](https://github.com/cch123) + +Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/feature_adapter.go b/vendor/github.com/json-iterator/go/feature_adapter.go new file mode 100644 index 00000000..edb477c4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_adapter.go @@ -0,0 +1,127 @@ +package jsoniter + +import ( + "bytes" + "io" +) + +// RawMessage to make replace json with jsoniter +type RawMessage []byte + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information +func Unmarshal(data []byte, v interface{}) error { + return ConfigDefault.Unmarshal(data, v) +} + +func lastNotSpacePos(data []byte) int { + for i := len(data) - 1; i >= 0; i-- { + if data[i] != ' ' && data[i] != '\t' && data[i] != '\r' && data[i] != '\n' { + return i + 1 + } + } + return 0 +} + +// UnmarshalFromString convenient method to read from string instead of []byte +func UnmarshalFromString(str string, v interface{}) error { + return ConfigDefault.UnmarshalFromString(str, v) +} + +// Get quick method to get value from deeply nested JSON structure +func Get(data []byte, path ...interface{}) Any { + return ConfigDefault.Get(data, path...) +} + +// Marshal adapts to json/encoding Marshal API +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information +func Marshal(v interface{}) ([]byte, error) { + return ConfigDefault.Marshal(v) +} + +// MarshalIndent same as json.MarshalIndent. Prefix is not supported. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return ConfigDefault.MarshalIndent(v, prefix, indent) +} + +// MarshalToString convenient method to write as string instead of []byte +func MarshalToString(v interface{}) (string, error) { + return ConfigDefault.MarshalToString(v) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, an Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information +func NewDecoder(reader io.Reader) *Decoder { + return ConfigDefault.NewDecoder(reader) +} + +// Decoder reads and decodes JSON values from an input stream. +// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) +type Decoder struct { + iter *Iterator +} + +// Decode decode JSON into interface{} +func (adapter *Decoder) Decode(obj interface{}) error { + adapter.iter.ReadVal(obj) + err := adapter.iter.Error + if err == io.EOF { + return nil + } + return adapter.iter.Error +} + +// More is there more? +func (adapter *Decoder) More() bool { + return adapter.iter.head != adapter.iter.tail +} + +// Buffered remaining buffer +func (adapter *Decoder) Buffered() io.Reader { + remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] + return bytes.NewReader(remaining) +} + +// UseNumber for number JSON element, use float64 or json.NumberValue (alias of string) +func (adapter *Decoder) UseNumber() { + origCfg := adapter.iter.cfg.configBeforeFrozen + origCfg.UseNumber = true + adapter.iter.cfg = origCfg.Froze().(*frozenConfig) +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *Encoder { + return ConfigDefault.NewEncoder(writer) +} + +// Encoder same as json.Encoder +type Encoder struct { + stream *Stream +} + +// Encode encode interface{} as JSON to io.Writer +func (adapter *Encoder) Encode(val interface{}) error { + adapter.stream.WriteVal(val) + adapter.stream.Flush() + return adapter.stream.Error +} + +// SetIndent set the indention. Prefix is not supported +func (adapter *Encoder) SetIndent(prefix, indent string) { + adapter.stream.cfg.indentionStep = len(indent) +} + +// SetEscapeHTML escape html by default, set to false to disable +func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { + config := adapter.stream.cfg.configBeforeFrozen + config.EscapeHTML = escapeHTML + adapter.stream.cfg = config.Froze().(*frozenConfig) +} diff --git a/vendor/github.com/json-iterator/go/feature_any.go b/vendor/github.com/json-iterator/go/feature_any.go new file mode 100644 index 00000000..6733dce4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any.go @@ -0,0 +1,242 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" +) + +// Any generic object representation. +// The lazy json implementation holds []byte and parse lazily. +type Any interface { + LastError() error + ValueType() ValueType + MustBeValid() Any + ToBool() bool + ToInt() int + ToInt32() int32 + ToInt64() int64 + ToUint() uint + ToUint32() uint32 + ToUint64() uint64 + ToFloat32() float32 + ToFloat64() float64 + ToString() string + ToVal(val interface{}) + Get(path ...interface{}) Any + // TODO: add Set + Size() int + Keys() []string + GetInterface() interface{} + WriteTo(stream *Stream) +} + +type baseAny struct{} + +func (any *baseAny) Get(path ...interface{}) Any { + return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} +} + +func (any *baseAny) Size() int { + return 0 +} + +func (any *baseAny) Keys() []string { + return []string{} +} + +func (any *baseAny) ToVal(obj interface{}) { + panic("not implemented") +} + +// WrapInt32 turn int32 into Any interface +func WrapInt32(val int32) Any { + return &int32Any{baseAny{}, val} +} + +// WrapInt64 turn int64 into Any interface +func WrapInt64(val int64) Any { + return &int64Any{baseAny{}, val} +} + +// WrapUint32 turn uint32 into Any interface +func WrapUint32(val uint32) Any { + return &uint32Any{baseAny{}, val} +} + +// WrapUint64 turn uint64 into Any interface +func WrapUint64(val uint64) Any { + return &uint64Any{baseAny{}, val} +} + +// WrapFloat64 turn float64 into Any interface +func WrapFloat64(val float64) Any { + return &floatAny{baseAny{}, val} +} + +// WrapString turn string into Any interface +func WrapString(val string) Any { + return &stringAny{baseAny{}, val} +} + +// Wrap turn a go object into Any interface +func Wrap(val interface{}) Any { + if val == nil { + return &nilAny{} + } + asAny, isAny := val.(Any) + if isAny { + return asAny + } + typ := reflect.TypeOf(val) + switch typ.Kind() { + case reflect.Slice: + return wrapArray(val) + case reflect.Struct: + return wrapStruct(val) + case reflect.Map: + return wrapMap(val) + case reflect.String: + return WrapString(val.(string)) + case reflect.Int: + return WrapInt64(int64(val.(int))) + case reflect.Int8: + return WrapInt32(int32(val.(int8))) + case reflect.Int16: + return WrapInt32(int32(val.(int16))) + case reflect.Int32: + return WrapInt32(val.(int32)) + case reflect.Int64: + return WrapInt64(val.(int64)) + case reflect.Uint: + return WrapUint64(uint64(val.(uint))) + case reflect.Uint8: + return WrapUint32(uint32(val.(uint8))) + case reflect.Uint16: + return WrapUint32(uint32(val.(uint16))) + case reflect.Uint32: + return WrapUint32(uint32(val.(uint32))) + case reflect.Uint64: + return WrapUint64(val.(uint64)) + case reflect.Float32: + return WrapFloat64(float64(val.(float32))) + case reflect.Float64: + return WrapFloat64(val.(float64)) + case reflect.Bool: + if val.(bool) == true { + return &trueAny{} + } + return &falseAny{} + } + return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} +} + +// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. +func (iter *Iterator) ReadAny() Any { + return iter.readAny() +} + +func (iter *Iterator) readAny() Any { + c := iter.nextToken() + switch c { + case '"': + iter.unreadByte() + return &stringAny{baseAny{}, iter.ReadString()} + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + return &nilAny{} + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + return &trueAny{} + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + return &falseAny{} + case '{': + return iter.readObjectAny() + case '[': + return iter.readArrayAny() + case '-': + return iter.readNumberAny(false) + default: + return iter.readNumberAny(true) + } +} + +func (iter *Iterator) readNumberAny(positive bool) Any { + iter.startCapture(iter.head - 1) + iter.skipNumber() + lazyBuf := iter.stopCapture() + return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readObjectAny() Any { + iter.startCapture(iter.head - 1) + iter.skipObject() + lazyBuf := iter.stopCapture() + return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readArrayAny() Any { + iter.startCapture(iter.head - 1) + iter.skipArray() + lazyBuf := iter.stopCapture() + return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func locateObjectField(iter *Iterator, target string) []byte { + var found []byte + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + if field == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + return true + }) + return found +} + +func locateArrayElement(iter *Iterator, target int) []byte { + var found []byte + n := 0 + iter.ReadArrayCB(func(iter *Iterator) bool { + if n == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + n++ + return true + }) + return found +} + +func locatePath(iter *Iterator, path []interface{}) Any { + for i, pathKeyObj := range path { + switch pathKey := pathKeyObj.(type) { + case string: + valueBytes := locateObjectField(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int: + valueBytes := locateArrayElement(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int32: + if '*' == pathKey { + return iter.readAny().Get(path[i:]...) + } + return newInvalidAny(path[i:]) + default: + return newInvalidAny(path[i:]) + } + } + if iter.Error != nil && iter.Error != io.EOF { + return &invalidAny{baseAny{}, iter.Error} + } + return iter.readAny() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_array.go b/vendor/github.com/json-iterator/go/feature_any_array.go new file mode 100644 index 00000000..0449e9aa --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_array.go @@ -0,0 +1,278 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type arrayLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *arrayLazyAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayLazyAny) MustBeValid() Any { + return any +} + +func (any *arrayLazyAny) LastError() error { + return any.err +} + +func (any *arrayLazyAny) ToBool() bool { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.ReadArray() +} + +func (any *arrayLazyAny) ToInt() int { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt32() int32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt64() int64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint() uint { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint32() uint32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint64() uint64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat32() float32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat64() float64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *arrayLazyAny) ToVal(val interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(val) +} + +func (any *arrayLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateArrayElement(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + arr := make([]Any, 0) + iter.ReadArrayCB(func(iter *Iterator) bool { + found := iter.readAny().Get(path[1:]...) + if found.ValueType() != InvalidValue { + arr = append(arr, found) + } + return true + }) + return wrapArray(arr) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadArrayCB(func(iter *Iterator) bool { + size++ + iter.Skip() + return true + }) + return size +} + +func (any *arrayLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *arrayLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type arrayAny struct { + baseAny + val reflect.Value +} + +func wrapArray(val interface{}) *arrayAny { + return &arrayAny{baseAny{}, reflect.ValueOf(val)} +} + +func (any *arrayAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayAny) MustBeValid() Any { + return any +} + +func (any *arrayAny) LastError() error { + return nil +} + +func (any *arrayAny) ToBool() bool { + return any.val.Len() != 0 +} + +func (any *arrayAny) ToInt() int { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt32() int32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt64() int64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint() uint { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint32() uint32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint64() uint64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat32() float32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat64() float64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToString() string { + str, _ := MarshalToString(any.val.Interface()) + return str +} + +func (any *arrayAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + if firstPath < 0 || firstPath >= any.val.Len() { + return newInvalidAny(path) + } + return Wrap(any.val.Index(firstPath).Interface()) + case int32: + if '*' == firstPath { + mappedAll := make([]Any, 0) + for i := 0; i < any.val.Len(); i++ { + mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll = append(mappedAll, mapped) + } + } + return wrapArray(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayAny) Size() int { + return any.val.Len() +} + +func (any *arrayAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *arrayAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_bool.go b/vendor/github.com/json-iterator/go/feature_any_bool.go new file mode 100644 index 00000000..9452324a --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_bool.go @@ -0,0 +1,137 @@ +package jsoniter + +type trueAny struct { + baseAny +} + +func (any *trueAny) LastError() error { + return nil +} + +func (any *trueAny) ToBool() bool { + return true +} + +func (any *trueAny) ToInt() int { + return 1 +} + +func (any *trueAny) ToInt32() int32 { + return 1 +} + +func (any *trueAny) ToInt64() int64 { + return 1 +} + +func (any *trueAny) ToUint() uint { + return 1 +} + +func (any *trueAny) ToUint32() uint32 { + return 1 +} + +func (any *trueAny) ToUint64() uint64 { + return 1 +} + +func (any *trueAny) ToFloat32() float32 { + return 1 +} + +func (any *trueAny) ToFloat64() float64 { + return 1 +} + +func (any *trueAny) ToString() string { + return "true" +} + +func (any *trueAny) WriteTo(stream *Stream) { + stream.WriteTrue() +} + +func (any *trueAny) Parse() *Iterator { + return nil +} + +func (any *trueAny) GetInterface() interface{} { + return true +} + +func (any *trueAny) ValueType() ValueType { + return BoolValue +} + +func (any *trueAny) MustBeValid() Any { + return any +} + +type falseAny struct { + baseAny +} + +func (any *falseAny) LastError() error { + return nil +} + +func (any *falseAny) ToBool() bool { + return false +} + +func (any *falseAny) ToInt() int { + return 0 +} + +func (any *falseAny) ToInt32() int32 { + return 0 +} + +func (any *falseAny) ToInt64() int64 { + return 0 +} + +func (any *falseAny) ToUint() uint { + return 0 +} + +func (any *falseAny) ToUint32() uint32 { + return 0 +} + +func (any *falseAny) ToUint64() uint64 { + return 0 +} + +func (any *falseAny) ToFloat32() float32 { + return 0 +} + +func (any *falseAny) ToFloat64() float64 { + return 0 +} + +func (any *falseAny) ToString() string { + return "false" +} + +func (any *falseAny) WriteTo(stream *Stream) { + stream.WriteFalse() +} + +func (any *falseAny) Parse() *Iterator { + return nil +} + +func (any *falseAny) GetInterface() interface{} { + return false +} + +func (any *falseAny) ValueType() ValueType { + return BoolValue +} + +func (any *falseAny) MustBeValid() Any { + return any +} diff --git a/vendor/github.com/json-iterator/go/feature_any_float.go b/vendor/github.com/json-iterator/go/feature_any_float.go new file mode 100644 index 00000000..35fdb094 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_float.go @@ -0,0 +1,83 @@ +package jsoniter + +import ( + "strconv" +) + +type floatAny struct { + baseAny + val float64 +} + +func (any *floatAny) Parse() *Iterator { + return nil +} + +func (any *floatAny) ValueType() ValueType { + return NumberValue +} + +func (any *floatAny) MustBeValid() Any { + return any +} + +func (any *floatAny) LastError() error { + return nil +} + +func (any *floatAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *floatAny) ToInt() int { + return int(any.val) +} + +func (any *floatAny) ToInt32() int32 { + return int32(any.val) +} + +func (any *floatAny) ToInt64() int64 { + return int64(any.val) +} + +func (any *floatAny) ToUint() uint { + if any.val > 0 { + return uint(any.val) + } + return 0 +} + +func (any *floatAny) ToUint32() uint32 { + if any.val > 0 { + return uint32(any.val) + } + return 0 +} + +func (any *floatAny) ToUint64() uint64 { + if any.val > 0 { + return uint64(any.val) + } + return 0 +} + +func (any *floatAny) ToFloat32() float32 { + return float32(any.val) +} + +func (any *floatAny) ToFloat64() float64 { + return any.val +} + +func (any *floatAny) ToString() string { + return strconv.FormatFloat(any.val, 'E', -1, 64) +} + +func (any *floatAny) WriteTo(stream *Stream) { + stream.WriteFloat64(any.val) +} + +func (any *floatAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_int32.go b/vendor/github.com/json-iterator/go/feature_any_int32.go new file mode 100644 index 00000000..1b56f399 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_int32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int32Any struct { + baseAny + val int32 +} + +func (any *int32Any) LastError() error { + return nil +} + +func (any *int32Any) ValueType() ValueType { + return NumberValue +} + +func (any *int32Any) MustBeValid() Any { + return any +} + +func (any *int32Any) ToBool() bool { + return any.val != 0 +} + +func (any *int32Any) ToInt() int { + return int(any.val) +} + +func (any *int32Any) ToInt32() int32 { + return any.val +} + +func (any *int32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *int32Any) ToUint() uint { + return uint(any.val) +} + +func (any *int32Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *int32Any) WriteTo(stream *Stream) { + stream.WriteInt32(any.val) +} + +func (any *int32Any) Parse() *Iterator { + return nil +} + +func (any *int32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_int64.go b/vendor/github.com/json-iterator/go/feature_any_int64.go new file mode 100644 index 00000000..c440d72b --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_int64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int64Any struct { + baseAny + val int64 +} + +func (any *int64Any) LastError() error { + return nil +} + +func (any *int64Any) ValueType() ValueType { + return NumberValue +} + +func (any *int64Any) MustBeValid() Any { + return any +} + +func (any *int64Any) ToBool() bool { + return any.val != 0 +} + +func (any *int64Any) ToInt() int { + return int(any.val) +} + +func (any *int64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *int64Any) ToInt64() int64 { + return any.val +} + +func (any *int64Any) ToUint() uint { + return uint(any.val) +} + +func (any *int64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int64Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int64Any) ToString() string { + return strconv.FormatInt(any.val, 10) +} + +func (any *int64Any) WriteTo(stream *Stream) { + stream.WriteInt64(any.val) +} + +func (any *int64Any) Parse() *Iterator { + return nil +} + +func (any *int64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_invalid.go b/vendor/github.com/json-iterator/go/feature_any_invalid.go new file mode 100644 index 00000000..1d859eac --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_invalid.go @@ -0,0 +1,82 @@ +package jsoniter + +import "fmt" + +type invalidAny struct { + baseAny + err error +} + +func newInvalidAny(path []interface{}) *invalidAny { + return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} +} + +func (any *invalidAny) LastError() error { + return any.err +} + +func (any *invalidAny) ValueType() ValueType { + return InvalidValue +} + +func (any *invalidAny) MustBeValid() Any { + panic(any.err) +} + +func (any *invalidAny) ToBool() bool { + return false +} + +func (any *invalidAny) ToInt() int { + return 0 +} + +func (any *invalidAny) ToInt32() int32 { + return 0 +} + +func (any *invalidAny) ToInt64() int64 { + return 0 +} + +func (any *invalidAny) ToUint() uint { + return 0 +} + +func (any *invalidAny) ToUint32() uint32 { + return 0 +} + +func (any *invalidAny) ToUint64() uint64 { + return 0 +} + +func (any *invalidAny) ToFloat32() float32 { + return 0 +} + +func (any *invalidAny) ToFloat64() float64 { + return 0 +} + +func (any *invalidAny) ToString() string { + return "" +} + +func (any *invalidAny) WriteTo(stream *Stream) { +} + +func (any *invalidAny) Get(path ...interface{}) Any { + if any.err == nil { + return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} + } + return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} +} + +func (any *invalidAny) Parse() *Iterator { + return nil +} + +func (any *invalidAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/feature_any_nil.go b/vendor/github.com/json-iterator/go/feature_any_nil.go new file mode 100644 index 00000000..d04cb54c --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_nil.go @@ -0,0 +1,69 @@ +package jsoniter + +type nilAny struct { + baseAny +} + +func (any *nilAny) LastError() error { + return nil +} + +func (any *nilAny) ValueType() ValueType { + return NilValue +} + +func (any *nilAny) MustBeValid() Any { + return any +} + +func (any *nilAny) ToBool() bool { + return false +} + +func (any *nilAny) ToInt() int { + return 0 +} + +func (any *nilAny) ToInt32() int32 { + return 0 +} + +func (any *nilAny) ToInt64() int64 { + return 0 +} + +func (any *nilAny) ToUint() uint { + return 0 +} + +func (any *nilAny) ToUint32() uint32 { + return 0 +} + +func (any *nilAny) ToUint64() uint64 { + return 0 +} + +func (any *nilAny) ToFloat32() float32 { + return 0 +} + +func (any *nilAny) ToFloat64() float64 { + return 0 +} + +func (any *nilAny) ToString() string { + return "" +} + +func (any *nilAny) WriteTo(stream *Stream) { + stream.WriteNil() +} + +func (any *nilAny) Parse() *Iterator { + return nil +} + +func (any *nilAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/feature_any_number.go b/vendor/github.com/json-iterator/go/feature_any_number.go new file mode 100644 index 00000000..4e1c2764 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_number.go @@ -0,0 +1,104 @@ +package jsoniter + +import "unsafe" + +type numberLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *numberLazyAny) ValueType() ValueType { + return NumberValue +} + +func (any *numberLazyAny) MustBeValid() Any { + return any +} + +func (any *numberLazyAny) LastError() error { + return any.err +} + +func (any *numberLazyAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *numberLazyAny) ToInt() int { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToInt32() int32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt32() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToInt64() int64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt64() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToUint() uint { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToUint32() uint32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint32() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToUint64() uint64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint64() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToFloat32() float32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat32() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToFloat64() float64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat64() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *numberLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *numberLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_object.go b/vendor/github.com/json-iterator/go/feature_any_object.go new file mode 100644 index 00000000..c44ef5c9 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_object.go @@ -0,0 +1,374 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type objectLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *objectLazyAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectLazyAny) MustBeValid() Any { + return any +} + +func (any *objectLazyAny) LastError() error { + return any.err +} + +func (any *objectLazyAny) ToBool() bool { + return true +} + +func (any *objectLazyAny) ToInt() int { + return 0 +} + +func (any *objectLazyAny) ToInt32() int32 { + return 0 +} + +func (any *objectLazyAny) ToInt64() int64 { + return 0 +} + +func (any *objectLazyAny) ToUint() uint { + return 0 +} + +func (any *objectLazyAny) ToUint32() uint32 { + return 0 +} + +func (any *objectLazyAny) ToUint64() uint64 { + return 0 +} + +func (any *objectLazyAny) ToFloat32() float32 { + return 0 +} + +func (any *objectLazyAny) ToFloat64() float64 { + return 0 +} + +func (any *objectLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *objectLazyAny) ToVal(obj interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(obj) +} + +func (any *objectLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateObjectField(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + mapped := locatePath(iter, path[1:]) + if mapped.ValueType() != InvalidValue { + mappedAll[field] = mapped + } + return true + }) + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectLazyAny) Keys() []string { + keys := []string{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + iter.Skip() + keys = append(keys, field) + return true + }) + return keys +} + +func (any *objectLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + size++ + return true + }) + return size +} + +func (any *objectLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *objectLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type objectAny struct { + baseAny + err error + val reflect.Value +} + +func wrapStruct(val interface{}) *objectAny { + return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *objectAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectAny) MustBeValid() Any { + return any +} + +func (any *objectAny) Parse() *Iterator { + return nil +} + +func (any *objectAny) LastError() error { + return any.err +} + +func (any *objectAny) ToBool() bool { + return any.val.NumField() != 0 +} + +func (any *objectAny) ToInt() int { + return 0 +} + +func (any *objectAny) ToInt32() int32 { + return 0 +} + +func (any *objectAny) ToInt64() int64 { + return 0 +} + +func (any *objectAny) ToUint() uint { + return 0 +} + +func (any *objectAny) ToUint32() uint32 { + return 0 +} + +func (any *objectAny) ToUint64() uint64 { + return 0 +} + +func (any *objectAny) ToFloat32() float32 { + return 0 +} + +func (any *objectAny) ToFloat64() float64 { + return 0 +} + +func (any *objectAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *objectAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + field := any.val.FieldByName(firstPath) + if !field.IsValid() { + return newInvalidAny(path) + } + return Wrap(field.Interface()) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for i := 0; i < any.val.NumField(); i++ { + field := any.val.Field(i) + if field.CanInterface() { + mapped := Wrap(field.Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[any.val.Type().Field(i).Name] = mapped + } + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectAny) Keys() []string { + keys := make([]string, 0, any.val.NumField()) + for i := 0; i < any.val.NumField(); i++ { + keys = append(keys, any.val.Type().Field(i).Name) + } + return keys +} + +func (any *objectAny) Size() int { + return any.val.NumField() +} + +func (any *objectAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *objectAny) GetInterface() interface{} { + return any.val.Interface() +} + +type mapAny struct { + baseAny + err error + val reflect.Value +} + +func wrapMap(val interface{}) *mapAny { + return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *mapAny) ValueType() ValueType { + return ObjectValue +} + +func (any *mapAny) MustBeValid() Any { + return any +} + +func (any *mapAny) Parse() *Iterator { + return nil +} + +func (any *mapAny) LastError() error { + return any.err +} + +func (any *mapAny) ToBool() bool { + return true +} + +func (any *mapAny) ToInt() int { + return 0 +} + +func (any *mapAny) ToInt32() int32 { + return 0 +} + +func (any *mapAny) ToInt64() int64 { + return 0 +} + +func (any *mapAny) ToUint() uint { + return 0 +} + +func (any *mapAny) ToUint32() uint32 { + return 0 +} + +func (any *mapAny) ToUint64() uint64 { + return 0 +} + +func (any *mapAny) ToFloat32() float32 { + return 0 +} + +func (any *mapAny) ToFloat64() float64 { + return 0 +} + +func (any *mapAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *mapAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for _, key := range any.val.MapKeys() { + keyAsStr := key.String() + element := Wrap(any.val.MapIndex(key).Interface()) + mapped := element.Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[keyAsStr] = mapped + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + value := any.val.MapIndex(reflect.ValueOf(firstPath)) + if !value.IsValid() { + return newInvalidAny(path) + } + return Wrap(value.Interface()) + } +} + +func (any *mapAny) Keys() []string { + keys := make([]string, 0, any.val.Len()) + for _, key := range any.val.MapKeys() { + keys = append(keys, key.String()) + } + return keys +} + +func (any *mapAny) Size() int { + return any.val.Len() +} + +func (any *mapAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *mapAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_string.go b/vendor/github.com/json-iterator/go/feature_any_string.go new file mode 100644 index 00000000..abf060bd --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_string.go @@ -0,0 +1,166 @@ +package jsoniter + +import ( + "fmt" + "strconv" +) + +type stringAny struct { + baseAny + val string +} + +func (any *stringAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} +} + +func (any *stringAny) Parse() *Iterator { + return nil +} + +func (any *stringAny) ValueType() ValueType { + return StringValue +} + +func (any *stringAny) MustBeValid() Any { + return any +} + +func (any *stringAny) LastError() error { + return nil +} + +func (any *stringAny) ToBool() bool { + str := any.ToString() + if str == "0" { + return false + } + for _, c := range str { + switch c { + case ' ', '\n', '\r', '\t': + default: + return true + } + } + return false +} + +func (any *stringAny) ToInt() int { + return int(any.ToInt64()) + +} + +func (any *stringAny) ToInt32() int32 { + return int32(any.ToInt64()) +} + +func (any *stringAny) ToInt64() int64 { + if any.val == "" { + return 0 + } + + flag := 1 + startPos := 0 + endPos := 0 + if any.val[0] == '+' || any.val[0] == '-' { + startPos = 1 + } + + if any.val[0] == '-' { + flag = -1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) + return int64(flag) * parsed +} + +func (any *stringAny) ToUint() uint { + return uint(any.ToUint64()) +} + +func (any *stringAny) ToUint32() uint32 { + return uint32(any.ToUint64()) +} + +func (any *stringAny) ToUint64() uint64 { + if any.val == "" { + return 0 + } + + startPos := 0 + endPos := 0 + + if any.val[0] == '-' { + return 0 + } + if any.val[0] == '+' { + startPos = 1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) + return parsed +} + +func (any *stringAny) ToFloat32() float32 { + return float32(any.ToFloat64()) +} + +func (any *stringAny) ToFloat64() float64 { + if len(any.val) == 0 { + return 0 + } + + // first char invalid + if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { + return 0 + } + + // extract valid num expression from string + // eg 123true => 123, -12.12xxa => -12.12 + endPos := 1 + for i := 1; i < len(any.val); i++ { + if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { + endPos = i + 1 + continue + } + + // end position is the first char which is not digit + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + endPos = i + break + } + } + parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) + return parsed +} + +func (any *stringAny) ToString() string { + return any.val +} + +func (any *stringAny) WriteTo(stream *Stream) { + stream.WriteString(any.val) +} + +func (any *stringAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_uint32.go b/vendor/github.com/json-iterator/go/feature_any_uint32.go new file mode 100644 index 00000000..656bbd33 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_uint32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint32Any struct { + baseAny + val uint32 +} + +func (any *uint32Any) LastError() error { + return nil +} + +func (any *uint32Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint32Any) MustBeValid() Any { + return any +} + +func (any *uint32Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint32Any) ToInt() int { + return int(any.val) +} + +func (any *uint32Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint32Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint32Any) ToUint32() uint32 { + return any.val +} + +func (any *uint32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *uint32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *uint32Any) WriteTo(stream *Stream) { + stream.WriteUint32(any.val) +} + +func (any *uint32Any) Parse() *Iterator { + return nil +} + +func (any *uint32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_uint64.go b/vendor/github.com/json-iterator/go/feature_any_uint64.go new file mode 100644 index 00000000..7df2fce3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_uint64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint64Any struct { + baseAny + val uint64 +} + +func (any *uint64Any) LastError() error { + return nil +} + +func (any *uint64Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint64Any) MustBeValid() Any { + return any +} + +func (any *uint64Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint64Any) ToInt() int { + return int(any.val) +} + +func (any *uint64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint64Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint64Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *uint64Any) ToUint64() uint64 { + return any.val +} + +func (any *uint64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint64Any) ToString() string { + return strconv.FormatUint(any.val, 10) +} + +func (any *uint64Any) WriteTo(stream *Stream) { + stream.WriteUint64(any.val) +} + +func (any *uint64Any) Parse() *Iterator { + return nil +} + +func (any *uint64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_config.go b/vendor/github.com/json-iterator/go/feature_config.go new file mode 100644 index 00000000..fc055d50 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_config.go @@ -0,0 +1,312 @@ +package jsoniter + +import ( + "encoding/json" + "errors" + "io" + "reflect" + "sync/atomic" + "unsafe" +) + +// Config customize how the API should behave. +// The API is created from Config by Froze. +type Config struct { + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + TagKey string +} + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + decoderCache unsafe.Pointer + encoderCache unsafe.Pointer + extensions []Extension + streamPool chan *Stream + iteratorPool chan *Iterator +} + +// API the public interface of this package. +// Primary Marshal and Unmarshal. +type API interface { + IteratorPool + StreamPool + MarshalToString(v interface{}) (string, error) + Marshal(v interface{}) ([]byte, error) + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + UnmarshalFromString(str string, v interface{}) error + Unmarshal(data []byte, v interface{}) error + Get(data []byte, path ...interface{}) Any + NewEncoder(writer io.Writer) *Encoder + NewDecoder(reader io.Reader) *Decoder +} + +// ConfigDefault the default API +var ConfigDefault = Config{ + EscapeHTML: true, +}.Froze() + +// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior +var ConfigCompatibleWithStandardLibrary = Config{ + EscapeHTML: true, + SortMapKeys: true, +}.Froze() + +// ConfigFastest marshals float with only 6 digits precision +var ConfigFastest = Config{ + EscapeHTML: false, + MarshalFloatWith6Digits: true, +}.Froze() + +// Froze forge API from config +func (cfg Config) Froze() API { + // TODO: cache frozen config + frozenConfig := &frozenConfig{ + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + streamPool: make(chan *Stream, 16), + iteratorPool: make(chan *Iterator, 16), + } + atomic.StorePointer(&frozenConfig.decoderCache, unsafe.Pointer(&map[string]ValDecoder{})) + atomic.StorePointer(&frozenConfig.encoderCache, unsafe.Pointer(&map[string]ValEncoder{})) + if cfg.MarshalFloatWith6Digits { + frozenConfig.marshalFloatWith6Digits() + } + if cfg.EscapeHTML { + frozenConfig.escapeHTML() + } + if cfg.UseNumber { + frozenConfig.useNumber() + } + frozenConfig.configBeforeFrozen = cfg + return frozenConfig +} + +func (cfg *frozenConfig) useNumber() { + cfg.addDecoderToCache(reflect.TypeOf((*interface{})(nil)).Elem(), &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() == NumberValue { + *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) + } else { + *((*interface{})(ptr)) = iter.Read() + } + }}) +} +func (cfg *frozenConfig) getTagKey() string { + tagKey := cfg.configBeforeFrozen.TagKey + if tagKey == "" { + return "json" + } + return tagKey +} + +func (cfg *frozenConfig) registerExtension(extension Extension) { + cfg.extensions = append(cfg.extensions, extension) +} + +type lossyFloat32Encoder struct { +} + +func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32Lossy(*((*float32)(ptr))) +} + +func (encoder *lossyFloat32Encoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type lossyFloat64Encoder struct { +} + +func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64Lossy(*((*float64)(ptr))) +} + +func (encoder *lossyFloat64Encoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +// EnableLossyFloatMarshalling keeps 10**(-6) precision +// for float variables for better performance. +func (cfg *frozenConfig) marshalFloatWith6Digits() { + // for better performance + cfg.addEncoderToCache(reflect.TypeOf((*float32)(nil)).Elem(), &lossyFloat32Encoder{}) + cfg.addEncoderToCache(reflect.TypeOf((*float64)(nil)).Elem(), &lossyFloat64Encoder{}) +} + +type htmlEscapedStringEncoder struct { +} + +func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteStringWithHTMLEscaped(str) +} + +func (encoder *htmlEscapedStringEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +func (cfg *frozenConfig) escapeHTML() { + cfg.addEncoderToCache(reflect.TypeOf((*string)(nil)).Elem(), &htmlEscapedStringEncoder{}) +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey reflect.Type, decoder ValDecoder) { + done := false + for !done { + ptr := atomic.LoadPointer(&cfg.decoderCache) + cache := *(*map[reflect.Type]ValDecoder)(ptr) + copied := map[reflect.Type]ValDecoder{} + for k, v := range cache { + copied[k] = v + } + copied[cacheKey] = decoder + done = atomic.CompareAndSwapPointer(&cfg.decoderCache, ptr, unsafe.Pointer(&copied)) + } +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey reflect.Type, encoder ValEncoder) { + done := false + for !done { + ptr := atomic.LoadPointer(&cfg.encoderCache) + cache := *(*map[reflect.Type]ValEncoder)(ptr) + copied := map[reflect.Type]ValEncoder{} + for k, v := range cache { + copied[k] = v + } + copied[cacheKey] = encoder + done = atomic.CompareAndSwapPointer(&cfg.encoderCache, ptr, unsafe.Pointer(&copied)) + } +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey reflect.Type) ValDecoder { + ptr := atomic.LoadPointer(&cfg.decoderCache) + cache := *(*map[reflect.Type]ValDecoder)(ptr) + return cache[cacheKey] +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey reflect.Type) ValEncoder { + ptr := atomic.LoadPointer(&cfg.encoderCache) + cache := *(*map[reflect.Type]ValEncoder)(ptr) + return cache[cacheKey] +} + +func (cfg *frozenConfig) cleanDecoders() { + typeDecoders = map[string]ValDecoder{} + fieldDecoders = map[string]ValDecoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) cleanEncoders() { + typeEncoders = map[string]ValEncoder{} + fieldEncoders = map[string]ValEncoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return "", stream.Error + } + return string(stream.Buffer()), nil +} + +func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return nil, stream.Error + } + result := stream.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + return copied, nil +} + +func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + if prefix != "" { + panic("prefix is not supported") + } + for _, r := range indent { + if r != ' ' { + panic("indent can only be space") + } + } + newCfg := cfg.configBeforeFrozen + newCfg.IndentionStep = len(indent) + return newCfg.Froze().Marshal(v) +} + +func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { + data := []byte(str) + data = data[:lastNotSpacePos(data)] + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + if iter.head == iter.tail { + iter.loadMore() + } + if iter.Error == io.EOF { + return nil + } + if iter.Error == nil { + iter.ReportError("UnmarshalFromString", "there are bytes left after unmarshal") + } + return iter.Error +} + +func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + return locatePath(iter, path) +} + +func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { + data = data[:lastNotSpacePos(data)] + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + typ := reflect.TypeOf(v) + if typ.Kind() != reflect.Ptr { + // return non-pointer error + return errors.New("the second param must be ptr type") + } + iter.ReadVal(v) + if iter.head == iter.tail { + iter.loadMore() + } + if iter.Error == io.EOF { + return nil + } + if iter.Error == nil { + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + } + return iter.Error +} + +func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { + stream := NewStream(cfg, writer, 512) + return &Encoder{stream} +} + +func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { + iter := Parse(cfg, reader, 512) + return &Decoder{iter} +} diff --git a/vendor/github.com/json-iterator/go/feature_iter.go b/vendor/github.com/json-iterator/go/feature_iter.go new file mode 100644 index 00000000..4357d69b --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter.go @@ -0,0 +1,307 @@ +package jsoniter + +import ( + "encoding/json" + "fmt" + "io" +) + +// ValueType the type for JSON element +type ValueType int + +const ( + // InvalidValue invalid JSON element + InvalidValue ValueType = iota + // StringValue JSON element "string" + StringValue + // NumberValue JSON element 100 or 0.10 + NumberValue + // NilValue JSON element null + NilValue + // BoolValue JSON element true or false + BoolValue + // ArrayValue JSON element [] + ArrayValue + // ObjectValue JSON element {} + ObjectValue +) + +var hexDigits []byte +var valueTypes []ValueType + +func init() { + hexDigits = make([]byte, 256) + for i := 0; i < len(hexDigits); i++ { + hexDigits[i] = 255 + } + for i := '0'; i <= '9'; i++ { + hexDigits[i] = byte(i - '0') + } + for i := 'a'; i <= 'f'; i++ { + hexDigits[i] = byte((i - 'a') + 10) + } + for i := 'A'; i <= 'F'; i++ { + hexDigits[i] = byte((i - 'A') + 10) + } + valueTypes = make([]ValueType, 256) + for i := 0; i < len(valueTypes); i++ { + valueTypes[i] = InvalidValue + } + valueTypes['"'] = StringValue + valueTypes['-'] = NumberValue + valueTypes['0'] = NumberValue + valueTypes['1'] = NumberValue + valueTypes['2'] = NumberValue + valueTypes['3'] = NumberValue + valueTypes['4'] = NumberValue + valueTypes['5'] = NumberValue + valueTypes['6'] = NumberValue + valueTypes['7'] = NumberValue + valueTypes['8'] = NumberValue + valueTypes['9'] = NumberValue + valueTypes['t'] = BoolValue + valueTypes['f'] = BoolValue + valueTypes['n'] = NilValue + valueTypes['['] = ArrayValue + valueTypes['{'] = ObjectValue +} + +// Iterator is a io.Reader like object, with JSON specific read functions. +// Error is not returned as return value, but stored as Error member on this iterator instance. +type Iterator struct { + cfg *frozenConfig + reader io.Reader + buf []byte + head int + tail int + captureStartedAt int + captured []byte + Error error +} + +// NewIterator creates an empty Iterator instance +func NewIterator(cfg API) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: nil, + head: 0, + tail: 0, + } +} + +// Parse creates an Iterator instance from io.Reader +func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: reader, + buf: make([]byte, bufSize), + head: 0, + tail: 0, + } +} + +// ParseBytes creates an Iterator instance from byte array +func ParseBytes(cfg API, input []byte) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: input, + head: 0, + tail: len(input), + } +} + +// ParseString creates an Iterator instance from string +func ParseString(cfg API, input string) *Iterator { + return ParseBytes(cfg, []byte(input)) +} + +// Pool returns a pool can provide more iterator with same configuration +func (iter *Iterator) Pool() IteratorPool { + return iter.cfg +} + +// Reset reuse iterator instance by specifying another reader +func (iter *Iterator) Reset(reader io.Reader) *Iterator { + iter.reader = reader + iter.head = 0 + iter.tail = 0 + return iter +} + +// ResetBytes reuse iterator instance by specifying another byte array as input +func (iter *Iterator) ResetBytes(input []byte) *Iterator { + iter.reader = nil + iter.buf = input + iter.head = 0 + iter.tail = len(input) + return iter +} + +// WhatIsNext gets ValueType of relatively next json element +func (iter *Iterator) WhatIsNext() ValueType { + valueType := valueTypes[iter.nextToken()] + iter.unreadByte() + return valueType +} + +func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + return false + } + return true +} + +func (iter *Iterator) isObjectEnd() bool { + c := iter.nextToken() + if c == ',' { + return false + } + if c == '}' { + return true + } + iter.ReportError("isObjectEnd", "object ended prematurely") + return true +} + +func (iter *Iterator) nextToken() byte { + // a variation of skip whitespaces, returning the next non-whitespace token + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + 1 + return c + } + if !iter.loadMore() { + return 0 + } + } +} + +// ReportError record a error in iterator instance with current position. +func (iter *Iterator) ReportError(operation string, msg string) { + if iter.Error != nil { + if iter.Error != io.EOF { + return + } + } + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + iter.Error = fmt.Errorf("%s: %s, parsing %v ...%s... at %s", operation, msg, iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +// CurrentBuffer gets current buffer as string for debugging purpose +func (iter *Iterator) CurrentBuffer() string { + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + return fmt.Sprintf("parsing %v ...|%s|... at %s", iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +func (iter *Iterator) readByte() (ret byte) { + if iter.head == iter.tail { + if iter.loadMore() { + ret = iter.buf[iter.head] + iter.head++ + return ret + } + return 0 + } + ret = iter.buf[iter.head] + iter.head++ + return ret +} + +func (iter *Iterator) loadMore() bool { + if iter.reader == nil { + if iter.Error == nil { + iter.head = iter.tail + iter.Error = io.EOF + } + return false + } + if iter.captured != nil { + iter.captured = append(iter.captured, + iter.buf[iter.captureStartedAt:iter.tail]...) + iter.captureStartedAt = 0 + } + for { + n, err := iter.reader.Read(iter.buf) + if n == 0 { + if err != nil { + if iter.Error == nil { + iter.Error = err + } + return false + } + } else { + iter.head = 0 + iter.tail = n + return true + } + } +} + +func (iter *Iterator) unreadByte() { + if iter.Error != nil { + return + } + iter.head-- + return +} + +// Read read the next JSON element as generic interface{}. +func (iter *Iterator) Read() interface{} { + valueType := iter.WhatIsNext() + switch valueType { + case StringValue: + return iter.ReadString() + case NumberValue: + if iter.cfg.configBeforeFrozen.UseNumber { + return json.Number(iter.readNumberAsString()) + } + return iter.ReadFloat64() + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + return nil + case BoolValue: + return iter.ReadBool() + case ArrayValue: + arr := []interface{}{} + iter.ReadArrayCB(func(iter *Iterator) bool { + var elem interface{} + iter.ReadVal(&elem) + arr = append(arr, elem) + return true + }) + return arr + case ObjectValue: + obj := map[string]interface{}{} + iter.ReadMapCB(func(Iter *Iterator, field string) bool { + var elem interface{} + iter.ReadVal(&elem) + obj[field] = elem + return true + }) + return obj + default: + iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) + return nil + } +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_array.go b/vendor/github.com/json-iterator/go/feature_iter_array.go new file mode 100644 index 00000000..cbc3ec8d --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_array.go @@ -0,0 +1,58 @@ +package jsoniter + +// ReadArray read array element, tells if the array has more element to read. +func (iter *Iterator) ReadArray() (ret bool) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return false // null + case '[': + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + return true + } + return false + case ']': + return false + case ',': + return true + default: + iter.ReportError("ReadArray", "expect [ or , or ] or n, but found: "+string([]byte{c})) + return + } +} + +// ReadArrayCB read array with callback +func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { + c := iter.nextToken() + if c == '[' { + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + if !callback(iter) { + return false + } + c = iter.nextToken() + for c == ',' { + if !callback(iter) { + return false + } + c = iter.nextToken() + } + if c != ']' { + iter.ReportError("ReadArrayCB", "expect ] in the end") + return false + } + return true + } + return true + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadArrayCB", "expect [ or n, but found: "+string([]byte{c})) + return false +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_float.go b/vendor/github.com/json-iterator/go/feature_iter_float.go new file mode 100644 index 00000000..86f45991 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_float.go @@ -0,0 +1,341 @@ +package jsoniter + +import ( + "io" + "math/big" + "strconv" + "strings" + "unsafe" +) + +var floatDigits []int8 + +const invalidCharForNumber = int8(-1) +const endOfNumber = int8(-2) +const dotInNumber = int8(-3) + +func init() { + floatDigits = make([]int8, 256) + for i := 0; i < len(floatDigits); i++ { + floatDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + floatDigits[i] = i - int8('0') + } + floatDigits[','] = endOfNumber + floatDigits[']'] = endOfNumber + floatDigits['}'] = endOfNumber + floatDigits[' '] = endOfNumber + floatDigits['\t'] = endOfNumber + floatDigits['\n'] = endOfNumber + floatDigits['.'] = dotInNumber +} + +// ReadBigFloat read big.Float +func (iter *Iterator) ReadBigFloat() (ret *big.Float) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + prec := 64 + if len(str) > prec { + prec = len(str) + } + val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) + if err != nil { + iter.Error = err + return nil + } + return val +} + +// ReadBigInt read big.Int +func (iter *Iterator) ReadBigInt() (ret *big.Int) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + ret = big.NewInt(0) + var success bool + ret, success = ret.SetString(str, 10) + if !success { + iter.ReportError("ReadBigInt", "invalid big int") + return nil + } + return ret +} + +//ReadFloat32 read float32 +func (iter *Iterator) ReadFloat32() (ret float32) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat32() + } + iter.unreadByte() + return iter.readPositiveFloat32() +} + +func (iter *Iterator) readPositiveFloat32() (ret float32) { + value := uint64(0) + c := byte(' ') + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.ReportError("readFloat32", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat32", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat32", "leading zero is invalid") + return + } + } + value = uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.head = i + return float32(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat32SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float32(float64(value) / float64(pow10[decimalPlaces])) + } + // too many decimal places + return iter.readFloat32SlowPath() + case invalidCharForNumber: + fallthrough + case dotInNumber: + return iter.readFloat32SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat32SlowPath() +} + +func (iter *Iterator) readNumberAsString() (ret string) { + strBuf := [16]byte{} + str := strBuf[0:0] +load_loop: + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + str = append(str, c) + continue + default: + iter.head = i + break load_loop + } + } + if !iter.loadMore() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + return + } + if len(str) == 0 { + iter.ReportError("readNumberAsString", "invalid number") + } + return *(*string)(unsafe.Pointer(&str)) +} + +func (iter *Iterator) readFloat32SlowPath() (ret float32) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat32SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 32) + if err != nil { + iter.Error = err + return + } + return float32(val) +} + +// ReadFloat64 read float64 +func (iter *Iterator) ReadFloat64() (ret float64) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat64() + } + iter.unreadByte() + return iter.readPositiveFloat64() +} + +func (iter *Iterator) readPositiveFloat64() (ret float64) { + value := uint64(0) + c := byte(' ') + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.ReportError("readFloat64", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat64", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat64", "leading zero is invalid") + return + } + } + value = uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.head = i + return float64(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat64SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float64(value) / float64(pow10[decimalPlaces]) + } + // too many decimal places + return iter.readFloat64SlowPath() + case invalidCharForNumber: + fallthrough + case dotInNumber: + return iter.readFloat64SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat64SlowPath() +} + +func (iter *Iterator) readFloat64SlowPath() (ret float64) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat64SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 64) + if err != nil { + iter.Error = err + return + } + return val +} + +func validateFloat(str string) string { + // strconv.ParseFloat is not validating `1.` or `1.e1` + if len(str) == 0 { + return "empty number" + } + if str[0] == '-' { + return "-- is not valid" + } + dotPos := strings.IndexByte(str, '.') + if dotPos != -1 { + if dotPos == len(str)-1 { + return "dot can not be last character" + } + switch str[dotPos+1] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + return "missing digit after dot" + } + } + return "" +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_int.go b/vendor/github.com/json-iterator/go/feature_iter_int.go new file mode 100644 index 00000000..886879ef --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_int.go @@ -0,0 +1,258 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var intDigits []int8 + +const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 +const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 + +func init() { + intDigits = make([]int8, 256) + for i := 0; i < len(intDigits); i++ { + intDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + intDigits[i] = i - int8('0') + } +} + +// ReadUint read uint +func (iter *Iterator) ReadUint() uint { + return uint(iter.ReadUint64()) +} + +// ReadInt read int +func (iter *Iterator) ReadInt() int { + return int(iter.ReadInt64()) +} + +// ReadInt8 read int8 +func (iter *Iterator) ReadInt8() (ret int8) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt8+1 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int8(val) + } + val := iter.readUint32(c) + if val > math.MaxInt8 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int8(val) +} + +// ReadUint8 read uint8 +func (iter *Iterator) ReadUint8() (ret uint8) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint8 { + iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint8(val) +} + +// ReadInt16 read int16 +func (iter *Iterator) ReadInt16() (ret int16) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt16+1 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int16(val) + } + val := iter.readUint32(c) + if val > math.MaxInt16 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int16(val) +} + +// ReadUint16 read uint16 +func (iter *Iterator) ReadUint16() (ret uint16) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint16 { + iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint16(val) +} + +// ReadInt32 read int32 +func (iter *Iterator) ReadInt32() (ret int32) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt32+1 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int32(val) + } + val := iter.readUint32(c) + if val > math.MaxInt32 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int32(val) +} + +// ReadUint32 read uint32 +func (iter *Iterator) ReadUint32() (ret uint32) { + return iter.readUint32(iter.nextToken()) +} + +func (iter *Iterator) readUint32(c byte) (ret uint32) { + ind := intDigits[c] + if ind == 0 { + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint32(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + return value*10 + uint32(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + return value*100 + uint32(ind2)*10 + uint32(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + return value + } + if value > uint32SafeToMultiply10 { + value2 := (value << 3) + (value << 1) + uint32(ind) + if value2 < value { + iter.ReportError("readUint32", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint32(ind) + } + if !iter.loadMore() { + return value + } + } +} + +// ReadInt64 read int64 +func (iter *Iterator) ReadInt64() (ret int64) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint64(iter.readByte()) + if val > math.MaxInt64+1 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return -int64(val) + } + val := iter.readUint64(c) + if val > math.MaxInt64 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return int64(val) +} + +// ReadUint64 read uint64 +func (iter *Iterator) ReadUint64() uint64 { + return iter.readUint64(iter.nextToken()) +} + +func (iter *Iterator) readUint64(c byte) (ret uint64) { + ind := intDigits[c] + if ind == 0 { + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint64(ind) + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + return value + } + if value > uint64SafeToMultiple10 { + value2 := (value << 3) + (value << 1) + uint64(ind) + if value2 < value { + iter.ReportError("readUint64", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint64(ind) + } + if !iter.loadMore() { + return value + } + } +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_object.go b/vendor/github.com/json-iterator/go/feature_iter_object.go new file mode 100644 index 00000000..3bdb5576 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_object.go @@ -0,0 +1,212 @@ +package jsoniter + +import ( + "fmt" + "unicode" + "unsafe" +) + +// ReadObject read one field from object. +// If object ended, returns empty string. +// Otherwise, returns the field name. +func (iter *Iterator) ReadObject() (ret string) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return "" // null + case '{': + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + return string(iter.readObjectFieldAsBytes()) + } + if c == '}' { + return "" // end of object + } + iter.ReportError("ReadObject", `expect " after {`) + return + case ',': + return string(iter.readObjectFieldAsBytes()) + case '}': + return "" // end of object + default: + iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) + return + } +} + +func (iter *Iterator) readFieldHash() int32 { + hash := int64(0x811c9dc5) + c := iter.nextToken() + if c == '"' { + for { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + b := iter.buf[i] + if 'A' <= b && b <= 'Z' { + b += 'a' - 'A' + } + if b == '"' { + iter.head = i + 1 + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + } + return int32(hash) + } + hash ^= int64(b) + hash *= 0x1000193 + } + if !iter.loadMore() { + iter.ReportError("readFieldHash", `incomplete field name`) + return 0 + } + } + } + iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) + return 0 +} + +func calcHash(str string) int32 { + hash := int64(0x811c9dc5) + for _, b := range str { + hash ^= int64(unicode.ToLower(b)) + hash *= 0x1000193 + } + return int32(hash) +} + +// ReadObjectCB read object with callback, the key is ascii only and field name not copied +func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.readObjectFieldAsBytes() + if !callback(iter, *(*string)(unsafe.Pointer(&field))) { + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.readObjectFieldAsBytes() + if !callback(iter, *(*string)(unsafe.Pointer(&field))) { + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadObjectCB", `object not ended with }`) + return false + } + return true + } + if c == '}' { + return true + } + iter.ReportError("ReadObjectCB", `expect " after }`) + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadObjectCB", `expect { or n`) + return false +} + +// ReadMapCB read map with callback, the key can be any string +func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field") + return false + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field") + return false + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadMapCB", `object not ended with }`) + return false + } + return true + } + if c == '}' { + return true + } + iter.ReportError("ReadMapCB", `expect " after }`) + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadMapCB", `expect { or n`) + return false +} + +func (iter *Iterator) readObjectStart() bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '}' { + return false + } + iter.unreadByte() + return true + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return false + } + iter.ReportError("readObjectStart", "expect { or n") + return false +} + +func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { + str := iter.ReadStringAsSlice() + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if iter.buf[iter.head] != ':' { + iter.ReportError("readObjectFieldAsBytes", "expect : after object field") + return + } + iter.head++ + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if ret == nil { + return str + } + return ret +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip.go b/vendor/github.com/json-iterator/go/feature_iter_skip.go new file mode 100644 index 00000000..b008d98c --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_skip.go @@ -0,0 +1,127 @@ +package jsoniter + +import "fmt" + +// ReadNil reads a json object as nil and +// returns whether it's a nil or not +func (iter *Iterator) ReadNil() (ret bool) { + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') // null + return true + } + iter.unreadByte() + return false +} + +// ReadBool reads a json object as BoolValue +func (iter *Iterator) ReadBool() (ret bool) { + c := iter.nextToken() + if c == 't' { + iter.skipThreeBytes('r', 'u', 'e') + return true + } + if c == 'f' { + iter.skipFourBytes('a', 'l', 's', 'e') + return false + } + iter.ReportError("ReadBool", "expect t or f") + return +} + +// SkipAndReturnBytes skip next JSON element, and return its content as []byte. +// The []byte can be kept, it is a copy of data. +func (iter *Iterator) SkipAndReturnBytes() []byte { + iter.startCapture(iter.head) + iter.Skip() + return iter.stopCapture() +} + +type captureBuffer struct { + startedAt int + captured []byte +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + if iter.captured != nil { + panic("already in capture mode") + } + iter.captureStartedAt = captureStartedAt + iter.captured = make([]byte, 0, 32) +} + +func (iter *Iterator) stopCapture() []byte { + if iter.captured == nil { + panic("not in capture mode") + } + captured := iter.captured + remaining := iter.buf[iter.captureStartedAt:iter.head] + iter.captureStartedAt = -1 + iter.captured = nil + if len(captured) == 0 { + return remaining + } + captured = append(captured, remaining...) + return captured +} + +// Skip skips a json object and positions to relatively the next json object +func (iter *Iterator) Skip() { + c := iter.nextToken() + switch c { + case '"': + iter.skipString() + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + case '0': + iter.unreadByte() + iter.ReadFloat32() + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.skipNumber() + case '[': + iter.skipArray() + case '{': + iter.skipObject() + default: + iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) + return + } +} + +func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b4 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } +} + +func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go new file mode 100644 index 00000000..047d58a4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go @@ -0,0 +1,144 @@ +//+build jsoniter-sloppy + +package jsoniter + +// sloppy but faster implementation, do not validate the input json + +func (iter *Iterator) skipNumber() { + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + iter.head = i + return + } + } + if !iter.loadMore() { + return + } + } +} + +func (iter *Iterator) skipArray() { + level := 1 + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '[': // If open symbol, increase level + level++ + case ']': // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete array") + return + } + } +} + +func (iter *Iterator) skipObject() { + level := 1 + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '{': // If open symbol, increase level + level++ + case '}': // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete object") + return + } + } +} + +func (iter *Iterator) skipString() { + for { + end, escaped := iter.findStringEnd() + if end == -1 { + if !iter.loadMore() { + iter.ReportError("skipString", "incomplete string") + return + } + if escaped { + iter.head = 1 // skip the first char as last char read is \ + } + } else { + iter.head = end + return + } + } +} + +// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func (iter *Iterator) findStringEnd() (int, bool) { + escaped := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + if !escaped { + return i + 1, false + } + j := i - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return i + 1, true + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + } + } else if c == '\\' { + escaped = true + } + } + j := iter.tail - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return -1, false // do not end with \ + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + + } + return -1, true // end with \ +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go b/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go new file mode 100644 index 00000000..d2676382 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go @@ -0,0 +1,89 @@ +//+build !jsoniter-sloppy + +package jsoniter + +import "fmt" + +func (iter *Iterator) skipNumber() { + if !iter.trySkipNumber() { + iter.unreadByte() + iter.ReadFloat32() + } +} + +func (iter *Iterator) trySkipNumber() bool { + dotFound := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + case '.': + if dotFound { + iter.ReportError("validateNumber", `more than one dot found in number`) + return true // already failed + } + if i+1 == iter.tail { + return false + } + c = iter.buf[i+1] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + iter.ReportError("validateNumber", `missing digit after dot`) + return true // already failed + } + dotFound = true + default: + switch c { + case ',', ']', '}', ' ', '\t', '\n', '\r': + if iter.head == i { + return false // if - without following digits + } + iter.head = i + return true // must be valid + } + return false // may be invalid + } + } + return false +} + +func (iter *Iterator) skipString() { + if !iter.trySkipString() { + iter.unreadByte() + iter.ReadString() + } +} + +func (iter *Iterator) trySkipString() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + iter.head = i + 1 + return true // valid + } else if c == '\\' { + return false + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return true // already failed + } + } + return false +} + +func (iter *Iterator) skipObject() { + iter.unreadByte() + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + return true + }) +} + +func (iter *Iterator) skipArray() { + iter.unreadByte() + iter.ReadArrayCB(func(iter *Iterator) bool { + iter.Skip() + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_string.go b/vendor/github.com/json-iterator/go/feature_iter_string.go new file mode 100644 index 00000000..b7646004 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_string.go @@ -0,0 +1,215 @@ +package jsoniter + +import ( + "fmt" + "unicode/utf16" +) + +// ReadString read string from iterator +func (iter *Iterator) ReadString() (ret string) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + ret = string(iter.buf[iter.head:i]) + iter.head = i + 1 + return ret + } else if c == '\\' { + break + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return + } + } + return iter.readStringSlowPath() + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return "" + } + iter.ReportError("ReadString", `expects " or n`) + return +} + +func (iter *Iterator) readStringSlowPath() (ret string) { + var str []byte + var c byte + for iter.Error == nil { + c = iter.readByte() + if c == '"' { + return string(str) + } + if c == '\\' { + c = iter.readByte() + str = iter.readEscapedChar(c, str) + } else { + str = append(str, c) + } + } + iter.ReportError("ReadString", "unexpected end of input") + return +} + +func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { + switch c { + case 'u': + r := iter.readU4() + if utf16.IsSurrogate(r) { + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != '\\' { + iter.unreadByte() + str = appendRune(str, r) + return str + } + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != 'u' { + str = appendRune(str, r) + return iter.readEscapedChar(c, str) + } + r2 := iter.readU4() + if iter.Error != nil { + return nil + } + combined := utf16.DecodeRune(r, r2) + if combined == '\uFFFD' { + str = appendRune(str, r) + str = appendRune(str, r2) + } else { + str = appendRune(str, combined) + } + } else { + str = appendRune(str, r) + } + case '"': + str = append(str, '"') + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + default: + iter.ReportError("ReadString", + `invalid escape char after \`) + return nil + } + return str +} + +// ReadStringAsSlice read string from iterator without copying into string form. +// The []byte can not be kept, as it will change after next iterator call. +func (iter *Iterator) ReadStringAsSlice() (ret []byte) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + // for: field name, base64, number + if iter.buf[i] == '"' { + // fast path: reuse the underlying buffer + ret = iter.buf[iter.head:i] + iter.head = i + 1 + return ret + } + } + readLen := iter.tail - iter.head + copied := make([]byte, readLen, readLen*2) + copy(copied, iter.buf[iter.head:iter.tail]) + iter.head = iter.tail + for iter.Error == nil { + c := iter.readByte() + if c == '"' { + return copied + } + copied = append(copied, c) + } + return copied + } + iter.ReportError("ReadString", `expects " or n`) + return +} + +func (iter *Iterator) readU4() (ret rune) { + for i := 0; i < 4; i++ { + c := iter.readByte() + if iter.Error != nil { + return + } + if c >= '0' && c <= '9' { + ret = ret*16 + rune(c-'0') + } else if c >= 'a' && c <= 'f' { + ret = ret*16 + rune(c-'a'+10) + } else if c >= 'A' && c <= 'F' { + ret = ret*16 + rune(c-'A'+10) + } else { + iter.ReportError("readU4", "expects 0~9 or a~f") + return + } + } + return ret +} + +const ( + t1 = 0x00 // 0000 0000 + tx = 0x80 // 1000 0000 + t2 = 0xC0 // 1100 0000 + t3 = 0xE0 // 1110 0000 + t4 = 0xF0 // 1111 0000 + t5 = 0xF8 // 1111 1000 + + maskx = 0x3F // 0011 1111 + mask2 = 0x1F // 0001 1111 + mask3 = 0x0F // 0000 1111 + mask4 = 0x07 // 0000 0111 + + rune1Max = 1<<7 - 1 + rune2Max = 1<<11 - 1 + rune3Max = 1<<16 - 1 + + surrogateMin = 0xD800 + surrogateMax = 0xDFFF + + maxRune = '\U0010FFFF' // Maximum valid Unicode code point. + runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" +) + +func appendRune(p []byte, r rune) []byte { + // Negative values are erroneous. Making it unsigned addresses the problem. + switch i := uint32(r); { + case i <= rune1Max: + p = append(p, byte(r)) + return p + case i <= rune2Max: + p = append(p, t2|byte(r>>6)) + p = append(p, tx|byte(r)&maskx) + return p + case i > maxRune, surrogateMin <= i && i <= surrogateMax: + r = runeError + fallthrough + case i <= rune3Max: + p = append(p, t3|byte(r>>12)) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + default: + p = append(p, t4|byte(r>>18)) + p = append(p, tx|byte(r>>12)&maskx) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + } +} diff --git a/vendor/github.com/json-iterator/go/feature_json_number.go b/vendor/github.com/json-iterator/go/feature_json_number.go new file mode 100644 index 00000000..0439f672 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_json_number.go @@ -0,0 +1,15 @@ +package jsoniter + +import "encoding/json" + +type Number string + +func CastJsonNumber(val interface{}) (string, bool) { + switch typedVal := val.(type) { + case json.Number: + return string(typedVal), true + case Number: + return string(typedVal), true + } + return "", false +} diff --git a/vendor/github.com/json-iterator/go/feature_pool.go b/vendor/github.com/json-iterator/go/feature_pool.go new file mode 100644 index 00000000..73962bc6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_pool.go @@ -0,0 +1,57 @@ +package jsoniter + +import ( + "io" +) + +// IteratorPool a thread safe pool of iterators with same configuration +type IteratorPool interface { + BorrowIterator(data []byte) *Iterator + ReturnIterator(iter *Iterator) +} + +// StreamPool a thread safe pool of streams with same configuration +type StreamPool interface { + BorrowStream(writer io.Writer) *Stream + ReturnStream(stream *Stream) +} + +func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { + select { + case stream := <-cfg.streamPool: + stream.Reset(writer) + return stream + default: + return NewStream(cfg, writer, 512) + } +} + +func (cfg *frozenConfig) ReturnStream(stream *Stream) { + stream.Error = nil + select { + case cfg.streamPool <- stream: + return + default: + return + } +} + +func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { + select { + case iter := <-cfg.iteratorPool: + iter.ResetBytes(data) + return iter + default: + return ParseBytes(cfg, data) + } +} + +func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { + iter.Error = nil + select { + case cfg.iteratorPool <- iter: + return + default: + return + } +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect.go b/vendor/github.com/json-iterator/go/feature_reflect.go new file mode 100644 index 00000000..05d91b49 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect.go @@ -0,0 +1,691 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "fmt" + "reflect" + "time" + "unsafe" +) + +// ValDecoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValDecoder with json.Decoder. +// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). +// +// Reflection on type to create decoders, which is then cached +// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions +// 1. create instance of new value, for example *int will need a int to be allocated +// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New +// 3. assignment to map, both key and value will be reflect.Value +// For a simple struct binding, it will be reflect.Value free and allocation free +type ValDecoder interface { + Decode(ptr unsafe.Pointer, iter *Iterator) +} + +// ValEncoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValEncoder with json.Encoder. +// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). +type ValEncoder interface { + IsEmpty(ptr unsafe.Pointer) bool + Encode(ptr unsafe.Pointer, stream *Stream) + EncodeInterface(val interface{}, stream *Stream) +} + +type checkIsEmpty interface { + IsEmpty(ptr unsafe.Pointer) bool +} + +// WriteToStream the default implementation for TypeEncoder method EncodeInterface +func WriteToStream(val interface{}, stream *Stream, encoder ValEncoder) { + e := (*emptyInterface)(unsafe.Pointer(&val)) + if e.word == nil { + stream.WriteNil() + return + } + if reflect.TypeOf(val).Kind() == reflect.Ptr { + encoder.Encode(unsafe.Pointer(&e.word), stream) + } else { + encoder.Encode(e.word, stream) + } +} + +var jsonNumberType reflect.Type +var jsoniterNumberType reflect.Type +var jsonRawMessageType reflect.Type +var jsoniterRawMessageType reflect.Type +var anyType reflect.Type +var marshalerType reflect.Type +var unmarshalerType reflect.Type +var textMarshalerType reflect.Type +var textUnmarshalerType reflect.Type + +func init() { + jsonNumberType = reflect.TypeOf((*json.Number)(nil)).Elem() + jsoniterNumberType = reflect.TypeOf((*Number)(nil)).Elem() + jsonRawMessageType = reflect.TypeOf((*json.RawMessage)(nil)).Elem() + jsoniterRawMessageType = reflect.TypeOf((*RawMessage)(nil)).Elem() + anyType = reflect.TypeOf((*Any)(nil)).Elem() + marshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +} + +type optionalDecoder struct { + valueType reflect.Type + valueDecoder ValDecoder +} + +func (decoder *optionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*unsafe.Pointer)(ptr)) = nil + } else { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + value := reflect.New(decoder.valueType) + newPtr := extractInterface(value.Interface()).word + decoder.valueDecoder.Decode(newPtr, iter) + *((*uintptr)(ptr)) = uintptr(newPtr) + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } + } +} + +type deferenceDecoder struct { + // only to deference a pointer + valueType reflect.Type + valueDecoder ValDecoder +} + +func (decoder *deferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + value := reflect.New(decoder.valueType) + newPtr := extractInterface(value.Interface()).word + decoder.valueDecoder.Decode(newPtr, iter) + *((*uintptr)(ptr)) = uintptr(newPtr) + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } +} + +type optionalEncoder struct { + valueEncoder ValEncoder +} + +func (encoder *optionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.valueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *optionalEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *optionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if *((*unsafe.Pointer)(ptr)) == nil { + return true + } + return false +} + +type placeholderEncoder struct { + cfg *frozenConfig + cacheKey reflect.Type +} + +func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.getRealEncoder().Encode(ptr, stream) +} + +func (encoder *placeholderEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.getRealEncoder().IsEmpty(ptr) +} + +func (encoder *placeholderEncoder) getRealEncoder() ValEncoder { + for i := 0; i < 30; i++ { + realDecoder := encoder.cfg.getEncoderFromCache(encoder.cacheKey) + _, isPlaceholder := realDecoder.(*placeholderEncoder) + if isPlaceholder { + time.Sleep(time.Second) + } else { + return realDecoder + } + } + panic(fmt.Sprintf("real encoder not found for cache key: %v", encoder.cacheKey)) +} + +type placeholderDecoder struct { + cfg *frozenConfig + cacheKey reflect.Type +} + +func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + for i := 0; i < 30; i++ { + realDecoder := decoder.cfg.getDecoderFromCache(decoder.cacheKey) + _, isPlaceholder := realDecoder.(*placeholderDecoder) + if isPlaceholder { + time.Sleep(time.Second) + } else { + realDecoder.Decode(ptr, iter) + return + } + } + panic(fmt.Sprintf("real decoder not found for cache key: %v", decoder.cacheKey)) +} + +// emptyInterface is the header for an interface{} value. +type emptyInterface struct { + typ unsafe.Pointer + word unsafe.Pointer +} + +// emptyInterface is the header for an interface with method (not interface{}) +type nonEmptyInterface struct { + // see ../runtime/iface.go:/Itab + itab *struct { + ityp unsafe.Pointer // static interface type + typ unsafe.Pointer // dynamic concrete type + link unsafe.Pointer + bad int32 + unused int32 + fun [100000]unsafe.Pointer // method table + } + word unsafe.Pointer +} + +// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal +func (iter *Iterator) ReadVal(obj interface{}) { + typ := reflect.TypeOf(obj) + cacheKey := typ.Elem() + decoder, err := decoderOfType(iter.cfg, cacheKey) + if err != nil { + iter.Error = err + return + } + e := (*emptyInterface)(unsafe.Pointer(&obj)) + decoder.Decode(e.word, iter) +} + +// WriteVal copy the go interface into underlying JSON, same as json.Marshal +func (stream *Stream) WriteVal(val interface{}) { + if nil == val { + stream.WriteNil() + return + } + typ := reflect.TypeOf(val) + cacheKey := typ + encoder, err := encoderOfType(stream.cfg, cacheKey) + if err != nil { + stream.Error = err + return + } + encoder.EncodeInterface(val, stream) +} + +type prefix string + +func (p prefix) addToDecoder(decoder ValDecoder, err error) (ValDecoder, error) { + if err != nil { + return nil, fmt.Errorf("%s: %s", p, err.Error()) + } + return decoder, err +} + +func (p prefix) addToEncoder(encoder ValEncoder, err error) (ValEncoder, error) { + if err != nil { + return nil, fmt.Errorf("%s: %s", p, err.Error()) + } + return encoder, err +} + +func decoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + cacheKey := typ + decoder := cfg.getDecoderFromCache(cacheKey) + if decoder != nil { + return decoder, nil + } + decoder = getTypeDecoderFromExtension(typ) + if decoder != nil { + cfg.addDecoderToCache(cacheKey, decoder) + return decoder, nil + } + decoder = &placeholderDecoder{cfg: cfg, cacheKey: cacheKey} + cfg.addDecoderToCache(cacheKey, decoder) + decoder, err := createDecoderOfType(cfg, typ) + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + cfg.addDecoderToCache(cacheKey, decoder) + return decoder, err +} + +func createDecoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + typeName := typ.String() + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{}, nil + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{}, nil + } + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{}, nil + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{}, nil + } + if typ.Implements(unmarshalerType) { + templateInterface := reflect.New(typ).Elem().Interface() + var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} + if typ.Kind() == reflect.Ptr { + decoder = &optionalDecoder{typ.Elem(), decoder} + } + return decoder, nil + } + if reflect.PtrTo(typ).Implements(unmarshalerType) { + templateInterface := reflect.New(typ).Interface() + var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} + return decoder, nil + } + if typ.Implements(textUnmarshalerType) { + templateInterface := reflect.New(typ).Elem().Interface() + var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} + if typ.Kind() == reflect.Ptr { + decoder = &optionalDecoder{typ.Elem(), decoder} + } + return decoder, nil + } + if reflect.PtrTo(typ).Implements(textUnmarshalerType) { + templateInterface := reflect.New(typ).Interface() + var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} + return decoder, nil + } + if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { + sliceDecoder, err := prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ)) + if err != nil { + return nil, err + } + return &base64Codec{sliceDecoder: sliceDecoder}, nil + } + if typ.Implements(anyType) { + return &anyCodec{}, nil + } + switch typ.Kind() { + case reflect.String: + if typeName != "string" { + return decoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem()) + } + return &stringCodec{}, nil + case reflect.Int: + if typeName != "int" { + return decoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem()) + } + return &intCodec{}, nil + case reflect.Int8: + if typeName != "int8" { + return decoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem()) + } + return &int8Codec{}, nil + case reflect.Int16: + if typeName != "int16" { + return decoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem()) + } + return &int16Codec{}, nil + case reflect.Int32: + if typeName != "int32" { + return decoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem()) + } + return &int32Codec{}, nil + case reflect.Int64: + if typeName != "int64" { + return decoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem()) + } + return &int64Codec{}, nil + case reflect.Uint: + if typeName != "uint" { + return decoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem()) + } + return &uintCodec{}, nil + case reflect.Uint8: + if typeName != "uint8" { + return decoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem()) + } + return &uint8Codec{}, nil + case reflect.Uint16: + if typeName != "uint16" { + return decoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem()) + } + return &uint16Codec{}, nil + case reflect.Uint32: + if typeName != "uint32" { + return decoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem()) + } + return &uint32Codec{}, nil + case reflect.Uintptr: + if typeName != "uintptr" { + return decoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem()) + } + return &uintptrCodec{}, nil + case reflect.Uint64: + if typeName != "uint64" { + return decoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem()) + } + return &uint64Codec{}, nil + case reflect.Float32: + if typeName != "float32" { + return decoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem()) + } + return &float32Codec{}, nil + case reflect.Float64: + if typeName != "float64" { + return decoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem()) + } + return &float64Codec{}, nil + case reflect.Bool: + if typeName != "bool" { + return decoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem()) + } + return &boolCodec{}, nil + case reflect.Interface: + if typ.NumMethod() == 0 { + return &emptyInterfaceCodec{}, nil + } + return &nonEmptyInterfaceCodec{}, nil + case reflect.Struct: + return prefix(fmt.Sprintf("[%s]", typeName)).addToDecoder(decoderOfStruct(cfg, typ)) + case reflect.Array: + return prefix("[array]").addToDecoder(decoderOfArray(cfg, typ)) + case reflect.Slice: + return prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ)) + case reflect.Map: + return prefix("[map]").addToDecoder(decoderOfMap(cfg, typ)) + case reflect.Ptr: + return prefix("[optional]").addToDecoder(decoderOfOptional(cfg, typ)) + default: + return nil, fmt.Errorf("unsupported type: %v", typ) + } +} + +func encoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + cacheKey := typ + encoder := cfg.getEncoderFromCache(cacheKey) + if encoder != nil { + return encoder, nil + } + encoder = getTypeEncoderFromExtension(typ) + if encoder != nil { + cfg.addEncoderToCache(cacheKey, encoder) + return encoder, nil + } + encoder = &placeholderEncoder{cfg: cfg, cacheKey: cacheKey} + cfg.addEncoderToCache(cacheKey, encoder) + encoder, err := createEncoderOfType(cfg, typ) + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + cfg.addEncoderToCache(cacheKey, encoder) + return encoder, err +} + +func createEncoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{}, nil + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{}, nil + } + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{}, nil + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{}, nil + } + if typ.Implements(marshalerType) { + checkIsEmpty, err := createCheckIsEmpty(typ) + if err != nil { + return nil, err + } + templateInterface := reflect.New(typ).Elem().Interface() + var encoder ValEncoder = &marshalerEncoder{ + templateInterface: extractInterface(templateInterface), + checkIsEmpty: checkIsEmpty, + } + if typ.Kind() == reflect.Ptr { + encoder = &optionalEncoder{encoder} + } + return encoder, nil + } + if typ.Implements(textMarshalerType) { + checkIsEmpty, err := createCheckIsEmpty(typ) + if err != nil { + return nil, err + } + templateInterface := reflect.New(typ).Elem().Interface() + var encoder ValEncoder = &textMarshalerEncoder{ + templateInterface: extractInterface(templateInterface), + checkIsEmpty: checkIsEmpty, + } + if typ.Kind() == reflect.Ptr { + encoder = &optionalEncoder{encoder} + } + return encoder, nil + } + if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { + return &base64Codec{}, nil + } + if typ.Implements(anyType) { + return &anyCodec{}, nil + } + return createEncoderOfSimpleType(cfg, typ) +} + +func createCheckIsEmpty(typ reflect.Type) (checkIsEmpty, error) { + kind := typ.Kind() + switch kind { + case reflect.String: + return &stringCodec{}, nil + case reflect.Int: + return &intCodec{}, nil + case reflect.Int8: + return &int8Codec{}, nil + case reflect.Int16: + return &int16Codec{}, nil + case reflect.Int32: + return &int32Codec{}, nil + case reflect.Int64: + return &int64Codec{}, nil + case reflect.Uint: + return &uintCodec{}, nil + case reflect.Uint8: + return &uint8Codec{}, nil + case reflect.Uint16: + return &uint16Codec{}, nil + case reflect.Uint32: + return &uint32Codec{}, nil + case reflect.Uintptr: + return &uintptrCodec{}, nil + case reflect.Uint64: + return &uint64Codec{}, nil + case reflect.Float32: + return &float32Codec{}, nil + case reflect.Float64: + return &float64Codec{}, nil + case reflect.Bool: + return &boolCodec{}, nil + case reflect.Interface: + if typ.NumMethod() == 0 { + return &emptyInterfaceCodec{}, nil + } + return &nonEmptyInterfaceCodec{}, nil + case reflect.Struct: + return &structEncoder{}, nil + case reflect.Array: + return &arrayEncoder{}, nil + case reflect.Slice: + return &sliceEncoder{}, nil + case reflect.Map: + return &mapEncoder{}, nil + case reflect.Ptr: + return &optionalEncoder{}, nil + default: + return nil, fmt.Errorf("unsupported type: %v", typ) + } +} + +func createEncoderOfSimpleType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + typeName := typ.String() + kind := typ.Kind() + switch kind { + case reflect.String: + if typeName != "string" { + return encoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem()) + } + return &stringCodec{}, nil + case reflect.Int: + if typeName != "int" { + return encoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem()) + } + return &intCodec{}, nil + case reflect.Int8: + if typeName != "int8" { + return encoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem()) + } + return &int8Codec{}, nil + case reflect.Int16: + if typeName != "int16" { + return encoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem()) + } + return &int16Codec{}, nil + case reflect.Int32: + if typeName != "int32" { + return encoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem()) + } + return &int32Codec{}, nil + case reflect.Int64: + if typeName != "int64" { + return encoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem()) + } + return &int64Codec{}, nil + case reflect.Uint: + if typeName != "uint" { + return encoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem()) + } + return &uintCodec{}, nil + case reflect.Uint8: + if typeName != "uint8" { + return encoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem()) + } + return &uint8Codec{}, nil + case reflect.Uint16: + if typeName != "uint16" { + return encoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem()) + } + return &uint16Codec{}, nil + case reflect.Uint32: + if typeName != "uint32" { + return encoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem()) + } + return &uint32Codec{}, nil + case reflect.Uintptr: + if typeName != "uintptr" { + return encoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem()) + } + return &uintptrCodec{}, nil + case reflect.Uint64: + if typeName != "uint64" { + return encoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem()) + } + return &uint64Codec{}, nil + case reflect.Float32: + if typeName != "float32" { + return encoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem()) + } + return &float32Codec{}, nil + case reflect.Float64: + if typeName != "float64" { + return encoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem()) + } + return &float64Codec{}, nil + case reflect.Bool: + if typeName != "bool" { + return encoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem()) + } + return &boolCodec{}, nil + case reflect.Interface: + if typ.NumMethod() == 0 { + return &emptyInterfaceCodec{}, nil + } + return &nonEmptyInterfaceCodec{}, nil + case reflect.Struct: + return prefix(fmt.Sprintf("[%s]", typeName)).addToEncoder(encoderOfStruct(cfg, typ)) + case reflect.Array: + return prefix("[array]").addToEncoder(encoderOfArray(cfg, typ)) + case reflect.Slice: + return prefix("[slice]").addToEncoder(encoderOfSlice(cfg, typ)) + case reflect.Map: + return prefix("[map]").addToEncoder(encoderOfMap(cfg, typ)) + case reflect.Ptr: + return prefix("[optional]").addToEncoder(encoderOfOptional(cfg, typ)) + default: + return nil, fmt.Errorf("unsupported type: %v", typ) + } +} + +func decoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + elemType := typ.Elem() + decoder, err := decoderOfType(cfg, elemType) + if err != nil { + return nil, err + } + return &optionalDecoder{elemType, decoder}, nil +} + +func encoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + elemType := typ.Elem() + elemEncoder, err := encoderOfType(cfg, elemType) + if err != nil { + return nil, err + } + encoder := &optionalEncoder{elemEncoder} + if elemType.Kind() == reflect.Map { + encoder = &optionalEncoder{encoder} + } + return encoder, nil +} + +func decoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + decoder, err := decoderOfType(cfg, typ.Elem()) + if err != nil { + return nil, err + } + mapInterface := reflect.New(typ).Interface() + return &mapDecoder{typ, typ.Key(), typ.Elem(), decoder, extractInterface(mapInterface)}, nil +} + +func extractInterface(val interface{}) emptyInterface { + return *((*emptyInterface)(unsafe.Pointer(&val))) +} + +func encoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + elemType := typ.Elem() + encoder, err := encoderOfType(cfg, elemType) + if err != nil { + return nil, err + } + mapInterface := reflect.New(typ).Elem().Interface() + if cfg.sortMapKeys { + return &sortKeysMapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil + } + return &mapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_array.go b/vendor/github.com/json-iterator/go/feature_reflect_array.go new file mode 100644 index 00000000..e23f187b --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_array.go @@ -0,0 +1,99 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "unsafe" +) + +func decoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + decoder, err := decoderOfType(cfg, typ.Elem()) + if err != nil { + return nil, err + } + return &arrayDecoder{typ, typ.Elem(), decoder}, nil +} + +func encoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + encoder, err := encoderOfType(cfg, typ.Elem()) + if err != nil { + return nil, err + } + if typ.Elem().Kind() == reflect.Map { + encoder = &optionalEncoder{encoder} + } + return &arrayEncoder{typ, typ.Elem(), encoder}, nil +} + +type arrayEncoder struct { + arrayType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder +} + +func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(ptr) + encoder.elemEncoder.Encode(elemPtr, stream) + for i := 1; i < encoder.arrayType.Len(); i++ { + stream.WriteMore() + elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) + encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) + } +} + +func (encoder *arrayEncoder) EncodeInterface(val interface{}, stream *Stream) { + // special optimization for interface{} + e := (*emptyInterface)(unsafe.Pointer(&val)) + if e.word == nil { + stream.WriteArrayStart() + stream.WriteNil() + stream.WriteArrayEnd() + return + } + elemType := encoder.arrayType.Elem() + if encoder.arrayType.Len() == 1 && (elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map) { + ptr := uintptr(e.word) + e.word = unsafe.Pointer(&ptr) + } + if reflect.TypeOf(val).Kind() == reflect.Ptr { + encoder.Encode(unsafe.Pointer(&e.word), stream) + } else { + encoder.Encode(e.word, stream) + } +} + +func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type arrayDecoder struct { + arrayType reflect.Type + elemType reflect.Type + elemDecoder ValDecoder +} + +func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) + } +} + +func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + offset := uintptr(0) + iter.ReadArrayCB(func(iter *Iterator) bool { + if offset < decoder.arrayType.Size() { + decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(ptr)+offset), iter) + offset += decoder.elemType.Size() + } else { + iter.Skip() + } + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_extension.go b/vendor/github.com/json-iterator/go/feature_reflect_extension.go new file mode 100644 index 00000000..3dd38299 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_extension.go @@ -0,0 +1,413 @@ +package jsoniter + +import ( + "fmt" + "reflect" + "sort" + "strings" + "unicode" + "unsafe" +) + +var typeDecoders = map[string]ValDecoder{} +var fieldDecoders = map[string]ValDecoder{} +var typeEncoders = map[string]ValEncoder{} +var fieldEncoders = map[string]ValEncoder{} +var extensions = []Extension{} + +// StructDescriptor describe how should we encode/decode the struct +type StructDescriptor struct { + onePtrEmbedded bool + onePtrOptimization bool + Type reflect.Type + Fields []*Binding +} + +// GetField get one field from the descriptor by its name. +// Can not use map here to keep field orders. +func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { + for _, binding := range structDescriptor.Fields { + if binding.Field.Name == fieldName { + return binding + } + } + return nil +} + +// Binding describe how should we encode/decode the struct field +type Binding struct { + levels []int + Field *reflect.StructField + FromNames []string + ToNames []string + Encoder ValEncoder + Decoder ValDecoder +} + +// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. +// Can also rename fields by UpdateStructDescriptor. +type Extension interface { + UpdateStructDescriptor(structDescriptor *StructDescriptor) + CreateDecoder(typ reflect.Type) ValDecoder + CreateEncoder(typ reflect.Type) ValEncoder + DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder + DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder +} + +// DummyExtension embed this type get dummy implementation for all methods of Extension +type DummyExtension struct { +} + +// UpdateStructDescriptor No-op +func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateDecoder No-op +func (extension *DummyExtension) CreateDecoder(typ reflect.Type) ValDecoder { + return nil +} + +// CreateEncoder No-op +func (extension *DummyExtension) CreateEncoder(typ reflect.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension *DummyExtension) DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension *DummyExtension) DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type funcDecoder struct { + fun DecoderFunc +} + +func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.fun(ptr, iter) +} + +type funcEncoder struct { + fun EncoderFunc + isEmptyFunc func(ptr unsafe.Pointer) bool +} + +func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.fun(ptr, stream) +} + +func (encoder *funcEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if encoder.isEmptyFunc == nil { + return false + } + return encoder.isEmptyFunc(ptr) +} + +// DecoderFunc the function form of TypeDecoder +type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) + +// EncoderFunc the function form of TypeEncoder +type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) + +// RegisterTypeDecoderFunc register TypeDecoder for a type with function +func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { + typeDecoders[typ] = &funcDecoder{fun} +} + +// RegisterTypeDecoder register TypeDecoder for a typ +func RegisterTypeDecoder(typ string, decoder ValDecoder) { + typeDecoders[typ] = decoder +} + +// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function +func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { + RegisterFieldDecoder(typ, field, &funcDecoder{fun}) +} + +// RegisterFieldDecoder register TypeDecoder for a struct field +func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { + fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder +} + +// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function +func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} +} + +// RegisterTypeEncoder register TypeEncoder for a type +func RegisterTypeEncoder(typ string, encoder ValEncoder) { + typeEncoders[typ] = encoder +} + +// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function +func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) +} + +// RegisterFieldEncoder register TypeEncoder for a struct field +func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { + fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder +} + +// RegisterExtension register extension +func RegisterExtension(extension Extension) { + extensions = append(extensions, extension) +} + +func getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(typ) + if decoder != nil { + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + } + return decoder +} +func _getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { + for _, extension := range extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + typeName := typ.String() + decoder := typeDecoders[typeName] + if decoder != nil { + return decoder + } + if typ.Kind() == reflect.Ptr { + decoder := typeDecoders[typ.Elem().String()] + if decoder != nil { + return &optionalDecoder{typ.Elem(), decoder} + } + } + return nil +} + +func getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(typ) + if encoder != nil { + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + } + return encoder +} + +func _getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { + for _, extension := range extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + typeName := typ.String() + encoder := typeEncoders[typeName] + if encoder != nil { + return encoder + } + if typ.Kind() == reflect.Ptr { + encoder := typeEncoders[typ.Elem().String()] + if encoder != nil { + return &optionalEncoder{encoder} + } + } + return nil +} + +func describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, error) { + embeddedBindings := []*Binding{} + bindings := []*Binding{} + for i := 0; i < typ.NumField(); i++ { + field := typ.Field(i) + tag := field.Tag.Get(cfg.getTagKey()) + tagParts := strings.Split(tag, ",") + if tag == "-" { + continue + } + if field.Anonymous && (tag == "" || tagParts[0] == "") { + if field.Type.Kind() == reflect.Struct { + structDescriptor, err := describeStruct(cfg, field.Type) + if err != nil { + return nil, err + } + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} + binding.Decoder = &structFieldDecoder{&field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } else if field.Type.Kind() == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct { + structDescriptor, err := describeStruct(cfg, field.Type.Elem()) + if err != nil { + return nil, err + } + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &optionalEncoder{binding.Encoder} + binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} + binding.Decoder = &deferenceDecoder{field.Type.Elem(), binding.Decoder} + binding.Decoder = &structFieldDecoder{&field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } + } + fieldNames := calcFieldNames(field.Name, tagParts[0], tag) + fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name) + decoder := fieldDecoders[fieldCacheKey] + if decoder == nil { + var err error + decoder, err = decoderOfType(cfg, field.Type) + if err != nil { + return nil, err + } + } + encoder := fieldEncoders[fieldCacheKey] + if encoder == nil { + var err error + encoder, err = encoderOfType(cfg, field.Type) + if err != nil { + return nil, err + } + // map is stored as pointer in the struct + if field.Type.Kind() == reflect.Map { + encoder = &optionalEncoder{encoder} + } + } + binding := &Binding{ + Field: &field, + FromNames: fieldNames, + ToNames: fieldNames, + Decoder: decoder, + Encoder: encoder, + } + binding.levels = []int{i} + bindings = append(bindings, binding) + } + return createStructDescriptor(cfg, typ, bindings, embeddedBindings), nil +} +func createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { + onePtrEmbedded := false + onePtrOptimization := false + if typ.NumField() == 1 { + firstField := typ.Field(0) + switch firstField.Type.Kind() { + case reflect.Ptr: + if firstField.Anonymous && firstField.Type.Elem().Kind() == reflect.Struct { + onePtrEmbedded = true + } + fallthrough + case reflect.Map: + onePtrOptimization = true + case reflect.Struct: + onePtrOptimization = isStructOnePtr(firstField.Type) + } + } + structDescriptor := &StructDescriptor{ + onePtrEmbedded: onePtrEmbedded, + onePtrOptimization: onePtrOptimization, + Type: typ, + Fields: bindings, + } + for _, extension := range extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + processTags(structDescriptor, cfg) + // merge normal & embedded bindings & sort with original order + allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) + sort.Sort(allBindings) + structDescriptor.Fields = allBindings + return structDescriptor +} + +func isStructOnePtr(typ reflect.Type) bool { + if typ.NumField() == 1 { + firstField := typ.Field(0) + switch firstField.Type.Kind() { + case reflect.Ptr: + return true + case reflect.Map: + return true + case reflect.Struct: + return isStructOnePtr(firstField.Type) + } + } + return false +} + +type sortableBindings []*Binding + +func (bindings sortableBindings) Len() int { + return len(bindings) +} + +func (bindings sortableBindings) Less(i, j int) bool { + left := bindings[i].levels + right := bindings[j].levels + k := 0 + for { + if left[k] < right[k] { + return true + } else if left[k] > right[k] { + return false + } + k++ + } +} + +func (bindings sortableBindings) Swap(i, j int) { + bindings[i], bindings[j] = bindings[j], bindings[i] +} + +func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { + for _, binding := range structDescriptor.Fields { + shouldOmitEmpty := false + tagParts := strings.Split(binding.Field.Tag.Get(cfg.getTagKey()), ",") + for _, tagPart := range tagParts[1:] { + if tagPart == "omitempty" { + shouldOmitEmpty = true + } else if tagPart == "string" { + if binding.Field.Type.Kind() == reflect.String { + binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} + binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} + } else { + binding.Decoder = &stringModeNumberDecoder{binding.Decoder} + binding.Encoder = &stringModeNumberEncoder{binding.Encoder} + } + } + } + binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} + binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} + } +} + +func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { + // ignore? + if wholeTag == "-" { + return []string{} + } + // rename? + var fieldNames []string + if tagProvidedFieldName == "" { + fieldNames = []string{originalFieldName} + } else { + fieldNames = []string{tagProvidedFieldName} + } + // private? + isNotExported := unicode.IsLower(rune(originalFieldName[0])) + if isNotExported { + fieldNames = []string{} + } + return fieldNames +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_map.go b/vendor/github.com/json-iterator/go/feature_reflect_map.go new file mode 100644 index 00000000..005671e0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_map.go @@ -0,0 +1,244 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "reflect" + "sort" + "strconv" + "unsafe" +) + +type mapDecoder struct { + mapType reflect.Type + keyType reflect.Type + elemType reflect.Type + elemDecoder ValDecoder + mapInterface emptyInterface +} + +func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + // dark magic to cast unsafe.Pointer back to interface{} using reflect.Type + mapInterface := decoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface).Elem() + if iter.ReadNil() { + realVal.Set(reflect.Zero(decoder.mapType)) + return + } + if realVal.IsNil() { + realVal.Set(reflect.MakeMap(realVal.Type())) + } + iter.ReadMapCB(func(iter *Iterator, keyStr string) bool { + elem := reflect.New(decoder.elemType) + decoder.elemDecoder.Decode(unsafe.Pointer(elem.Pointer()), iter) + // to put into map, we have to use reflection + keyType := decoder.keyType + // TODO: remove this from loop + switch { + case keyType.Kind() == reflect.String: + realVal.SetMapIndex(reflect.ValueOf(keyStr).Convert(keyType), elem.Elem()) + return true + case keyType.Implements(textUnmarshalerType): + textUnmarshaler := reflect.New(keyType.Elem()).Interface().(encoding.TextUnmarshaler) + err := textUnmarshaler.UnmarshalText([]byte(keyStr)) + if err != nil { + iter.ReportError("read map key as TextUnmarshaler", err.Error()) + return false + } + realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler), elem.Elem()) + return true + case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + textUnmarshaler := reflect.New(keyType).Interface().(encoding.TextUnmarshaler) + err := textUnmarshaler.UnmarshalText([]byte(keyStr)) + if err != nil { + iter.ReportError("read map key as TextUnmarshaler", err.Error()) + return false + } + realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler).Elem(), elem.Elem()) + return true + default: + switch keyType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(keyStr, 10, 64) + if err != nil || reflect.Zero(keyType).OverflowInt(n) { + iter.ReportError("read map key as int64", "read int64 failed") + return false + } + realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(keyStr, 10, 64) + if err != nil || reflect.Zero(keyType).OverflowUint(n) { + iter.ReportError("read map key as uint64", "read uint64 failed") + return false + } + realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) + return true + } + } + iter.ReportError("read map key", "unexpected map key type "+keyType.String()) + return true + }) +} + +type mapEncoder struct { + mapType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder + mapInterface emptyInterface +} + +func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + stream.WriteObjectStart() + for i, key := range realVal.MapKeys() { + if i != 0 { + stream.WriteMore() + } + encodeMapKey(key, stream) + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + val := realVal.MapIndex(key).Interface() + encoder.elemEncoder.EncodeInterface(val, stream) + } + stream.WriteObjectEnd() +} + +func encodeMapKey(key reflect.Value, stream *Stream) { + if key.Kind() == reflect.String { + stream.WriteString(key.String()) + return + } + if tm, ok := key.Interface().(encoding.TextMarshaler); ok { + buf, err := tm.MarshalText() + if err != nil { + stream.Error = err + return + } + stream.writeByte('"') + stream.Write(buf) + stream.writeByte('"') + return + } + switch key.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + stream.writeByte('"') + stream.WriteInt64(key.Int()) + stream.writeByte('"') + return + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + stream.writeByte('"') + stream.WriteUint64(key.Uint()) + stream.writeByte('"') + return + } + stream.Error = &json.UnsupportedTypeError{Type: key.Type()} +} + +func (encoder *mapEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + return realVal.Len() == 0 +} + +type sortKeysMapEncoder struct { + mapType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder + mapInterface emptyInterface +} + +func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + + // Extract and sort the keys. + keys := realVal.MapKeys() + sv := stringValues(make([]reflectWithString, len(keys))) + for i, v := range keys { + sv[i].v = v + if err := sv[i].resolve(); err != nil { + stream.Error = err + return + } + } + sort.Sort(sv) + + stream.WriteObjectStart() + for i, key := range sv { + if i != 0 { + stream.WriteMore() + } + stream.WriteVal(key.s) // might need html escape, so can not WriteString directly + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + val := realVal.MapIndex(key.v).Interface() + encoder.elemEncoder.EncodeInterface(val, stream) + } + stream.WriteObjectEnd() +} + +// stringValues is a slice of reflect.Value holding *reflect.StringValue. +// It implements the methods to sort by string. +type stringValues []reflectWithString + +type reflectWithString struct { + v reflect.Value + s string +} + +func (w *reflectWithString) resolve() error { + if w.v.Kind() == reflect.String { + w.s = w.v.String() + return nil + } + if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok { + buf, err := tm.MarshalText() + w.s = string(buf) + return err + } + switch w.v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + w.s = strconv.FormatInt(w.v.Int(), 10) + return nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + w.s = strconv.FormatUint(w.v.Uint(), 10) + return nil + } + return &json.UnsupportedTypeError{Type: w.v.Type()} +} + +func (sv stringValues) Len() int { return len(sv) } +func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv stringValues) Less(i, j int) bool { return sv[i].s < sv[j].s } + +func (encoder *sortKeysMapEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + return realVal.Len() == 0 +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_native.go b/vendor/github.com/json-iterator/go/feature_reflect_native.go new file mode 100644 index 00000000..b37dab3d --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_native.go @@ -0,0 +1,672 @@ +package jsoniter + +import ( + "encoding" + "encoding/base64" + "encoding/json" + "unsafe" +) + +type stringCodec struct { +} + +func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*string)(ptr)) = iter.ReadString() +} + +func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteString(str) +} + +func (codec *stringCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +type intCodec struct { +} + +func (codec *intCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*int)(ptr)) = iter.ReadInt() +} + +func (codec *intCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt(*((*int)(ptr))) +} + +func (codec *intCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *intCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int)(ptr)) == 0 +} + +type uintptrCodec struct { +} + +func (codec *uintptrCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*uintptr)(ptr)) = uintptr(iter.ReadUint64()) +} + +func (codec *uintptrCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(uint64(*((*uintptr)(ptr)))) +} + +func (codec *uintptrCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uintptrCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uintptr)(ptr)) == 0 +} + +type int8Codec struct { +} + +func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*int8)(ptr)) = iter.ReadInt8() +} + +func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt8(*((*int8)(ptr))) +} + +func (codec *int8Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int8)(ptr)) == 0 +} + +type int16Codec struct { +} + +func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*int16)(ptr)) = iter.ReadInt16() +} + +func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt16(*((*int16)(ptr))) +} + +func (codec *int16Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int16)(ptr)) == 0 +} + +type int32Codec struct { +} + +func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*int32)(ptr)) = iter.ReadInt32() +} + +func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt32(*((*int32)(ptr))) +} + +func (codec *int32Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int32)(ptr)) == 0 +} + +type int64Codec struct { +} + +func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*int64)(ptr)) = iter.ReadInt64() +} + +func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt64(*((*int64)(ptr))) +} + +func (codec *int64Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int64)(ptr)) == 0 +} + +type uintCodec struct { +} + +func (codec *uintCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*uint)(ptr)) = iter.ReadUint() +} + +func (codec *uintCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint(*((*uint)(ptr))) +} + +func (codec *uintCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uintCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint)(ptr)) == 0 +} + +type uint8Codec struct { +} + +func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*uint8)(ptr)) = iter.ReadUint8() +} + +func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint8(*((*uint8)(ptr))) +} + +func (codec *uint8Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint8)(ptr)) == 0 +} + +type uint16Codec struct { +} + +func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*uint16)(ptr)) = iter.ReadUint16() +} + +func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint16(*((*uint16)(ptr))) +} + +func (codec *uint16Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint16)(ptr)) == 0 +} + +type uint32Codec struct { +} + +func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*uint32)(ptr)) = iter.ReadUint32() +} + +func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint32(*((*uint32)(ptr))) +} + +func (codec *uint32Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint32)(ptr)) == 0 +} + +type uint64Codec struct { +} + +func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*uint64)(ptr)) = iter.ReadUint64() +} + +func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(*((*uint64)(ptr))) +} + +func (codec *uint64Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint64)(ptr)) == 0 +} + +type float32Codec struct { +} + +func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*float32)(ptr)) = iter.ReadFloat32() +} + +func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32(*((*float32)(ptr))) +} + +func (codec *float32Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type float64Codec struct { +} + +func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*float64)(ptr)) = iter.ReadFloat64() +} + +func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64(*((*float64)(ptr))) +} + +func (codec *float64Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +type boolCodec struct { +} + +func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*bool)(ptr)) = iter.ReadBool() +} + +func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteBool(*((*bool)(ptr))) +} + +func (codec *boolCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { + return !(*((*bool)(ptr))) +} + +type emptyInterfaceCodec struct { +} + +func (codec *emptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*interface{})(ptr)) = iter.Read() +} + +func (codec *emptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteVal(*((*interface{})(ptr))) +} + +func (codec *emptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteVal(val) +} + +func (codec *emptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { + return ptr == nil +} + +type nonEmptyInterfaceCodec struct { +} + +func (codec *nonEmptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + nonEmptyInterface := (*nonEmptyInterface)(ptr) + if nonEmptyInterface.itab == nil { + iter.ReportError("read non-empty interface", "do not know which concrete type to decode to") + return + } + var i interface{} + e := (*emptyInterface)(unsafe.Pointer(&i)) + e.typ = nonEmptyInterface.itab.typ + e.word = nonEmptyInterface.word + iter.ReadVal(&i) + nonEmptyInterface.word = e.word +} + +func (codec *nonEmptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + nonEmptyInterface := (*nonEmptyInterface)(ptr) + var i interface{} + e := (*emptyInterface)(unsafe.Pointer(&i)) + e.typ = nonEmptyInterface.itab.typ + e.word = nonEmptyInterface.word + stream.WriteVal(i) +} + +func (codec *nonEmptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteVal(val) +} + +func (codec *nonEmptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { + nonEmptyInterface := (*nonEmptyInterface)(ptr) + return nonEmptyInterface.word == nil +} + +type anyCodec struct { +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*Any)(ptr)) = iter.ReadAny() +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + (*((*Any)(ptr))).WriteTo(stream) +} + +func (codec *anyCodec) EncodeInterface(val interface{}, stream *Stream) { + (val.(Any)).WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + return (*((*Any)(ptr))).Size() == 0 +} + +type jsonNumberCodec struct { +} + +func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) +} + +func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*json.Number)(ptr)))) +} + +func (codec *jsonNumberCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteRaw(string(val.(json.Number))) +} + +func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.Number)(ptr))) == 0 +} + +type jsoniterNumberCodec struct { +} + +func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) +} + +func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*Number)(ptr)))) +} + +func (codec *jsoniterNumberCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteRaw(string(val.(Number))) +} + +func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*Number)(ptr))) == 0 +} + +type jsonRawMessageCodec struct { +} + +func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) +} + +func (codec *jsonRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteRaw(string(val.(json.RawMessage))) +} + +func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 +} + +type jsoniterRawMessageCodec struct { +} + +func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) +} + +func (codec *jsoniterRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteRaw(string(val.(RawMessage))) +} + +func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*RawMessage)(ptr))) == 0 +} + +type base64Codec struct { + sliceDecoder ValDecoder +} + +func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + ptrSlice := (*sliceHeader)(ptr) + ptrSlice.Len = 0 + ptrSlice.Cap = 0 + ptrSlice.Data = nil + return + } + switch iter.WhatIsNext() { + case StringValue: + encoding := base64.StdEncoding + src := iter.SkipAndReturnBytes() + src = src[1 : len(src)-1] + decodedLen := encoding.DecodedLen(len(src)) + dst := make([]byte, decodedLen) + len, err := encoding.Decode(dst, src) + if err != nil { + iter.ReportError("decode base64", err.Error()) + } else { + dst = dst[:len] + dstSlice := (*sliceHeader)(unsafe.Pointer(&dst)) + ptrSlice := (*sliceHeader)(ptr) + ptrSlice.Data = dstSlice.Data + ptrSlice.Cap = dstSlice.Cap + ptrSlice.Len = dstSlice.Len + } + case ArrayValue: + codec.sliceDecoder.Decode(ptr, iter) + default: + iter.ReportError("base64Codec", "invalid input") + } +} + +func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + src := *((*[]byte)(ptr)) + if len(src) == 0 { + stream.WriteNil() + return + } + encoding := base64.StdEncoding + stream.writeByte('"') + toGrow := encoding.EncodedLen(len(src)) + stream.ensure(toGrow) + encoding.Encode(stream.buf[stream.n:], src) + stream.n += toGrow + stream.writeByte('"') +} + +func (codec *base64Codec) EncodeInterface(val interface{}, stream *Stream) { + ptr := extractInterface(val).word + src := *((*[]byte)(ptr)) + if len(src) == 0 { + stream.WriteNil() + return + } + encoding := base64.StdEncoding + stream.writeByte('"') + toGrow := encoding.EncodedLen(len(src)) + stream.ensure(toGrow) + encoding.Encode(stream.buf[stream.n:], src) + stream.n += toGrow + stream.writeByte('"') +} + +func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*[]byte)(ptr))) == 0 +} + +type stringModeNumberDecoder struct { + elemDecoder ValDecoder +} + +func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect "`) + return + } + decoder.elemDecoder.Decode(ptr, iter) + if iter.Error != nil { + return + } + c = iter.readByte() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect "`) + return + } +} + +type stringModeStringDecoder struct { + elemDecoder ValDecoder + cfg *frozenConfig +} + +func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.elemDecoder.Decode(ptr, iter) + str := *((*string)(ptr)) + tempIter := decoder.cfg.BorrowIterator([]byte(str)) + defer decoder.cfg.ReturnIterator(tempIter) + *((*string)(ptr)) = tempIter.ReadString() +} + +type stringModeNumberEncoder struct { + elemEncoder ValEncoder +} + +func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.elemEncoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *stringModeNumberEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type stringModeStringEncoder struct { + elemEncoder ValEncoder + cfg *frozenConfig +} + +func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + tempStream := encoder.cfg.BorrowStream(nil) + defer encoder.cfg.ReturnStream(tempStream) + encoder.elemEncoder.Encode(ptr, tempStream) + stream.WriteString(string(tempStream.Buffer())) +} + +func (encoder *stringModeStringEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type marshalerEncoder struct { + templateInterface emptyInterface + checkIsEmpty checkIsEmpty +} + +func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + templateInterface := encoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + marshaler := (*realInterface).(json.Marshaler) + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} +func (encoder *marshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type textMarshalerEncoder struct { + templateInterface emptyInterface + checkIsEmpty checkIsEmpty +} + +func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + templateInterface := encoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + marshaler := (*realInterface).(encoding.TextMarshaler) + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + stream.WriteString(string(bytes)) + } +} + +func (encoder *textMarshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type unmarshalerDecoder struct { + templateInterface emptyInterface +} + +func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + templateInterface := decoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + unmarshaler := (*realInterface).(json.Unmarshaler) + iter.nextToken() + iter.unreadByte() // skip spaces + bytes := iter.SkipAndReturnBytes() + err := unmarshaler.UnmarshalJSON(bytes) + if err != nil { + iter.ReportError("unmarshalerDecoder", err.Error()) + } +} + +type textUnmarshalerDecoder struct { + templateInterface emptyInterface +} + +func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + templateInterface := decoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + unmarshaler := (*realInterface).(encoding.TextUnmarshaler) + str := iter.ReadString() + err := unmarshaler.UnmarshalText([]byte(str)) + if err != nil { + iter.ReportError("textUnmarshalerDecoder", err.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_object.go b/vendor/github.com/json-iterator/go/feature_reflect_object.go new file mode 100644 index 00000000..59b1235c --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_object.go @@ -0,0 +1,196 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "strings" + "unsafe" +) + +func encoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + type bindingTo struct { + binding *Binding + toName string + ignored bool + } + orderedBindings := []*bindingTo{} + structDescriptor, err := describeStruct(cfg, typ) + if err != nil { + return nil, err + } + for _, binding := range structDescriptor.Fields { + for _, toName := range binding.ToNames { + new := &bindingTo{ + binding: binding, + toName: toName, + } + for _, old := range orderedBindings { + if old.toName != toName { + continue + } + old.ignored, new.ignored = resolveConflictBinding(cfg, old.binding, new.binding) + } + orderedBindings = append(orderedBindings, new) + } + } + if len(orderedBindings) == 0 { + return &emptyStructEncoder{}, nil + } + finalOrderedFields := []structFieldTo{} + for _, bindingTo := range orderedBindings { + if !bindingTo.ignored { + finalOrderedFields = append(finalOrderedFields, structFieldTo{ + encoder: bindingTo.binding.Encoder.(*structFieldEncoder), + toName: bindingTo.toName, + }) + } + } + return &structEncoder{structDescriptor.onePtrEmbedded, structDescriptor.onePtrOptimization, finalOrderedFields}, nil +} + +func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { + newTagged := new.Field.Tag.Get(cfg.getTagKey()) != "" + oldTagged := old.Field.Tag.Get(cfg.getTagKey()) != "" + if newTagged { + if oldTagged { + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } else { + return true, false + } + } else { + if oldTagged { + return true, false + } + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } +} + +func decoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + bindings := map[string]*Binding{} + structDescriptor, err := describeStruct(cfg, typ) + if err != nil { + return nil, err + } + for _, binding := range structDescriptor.Fields { + for _, fromName := range binding.FromNames { + old := bindings[fromName] + if old == nil { + bindings[fromName] = binding + continue + } + ignoreOld, ignoreNew := resolveConflictBinding(cfg, old, binding) + if ignoreOld { + delete(bindings, fromName) + } + if !ignoreNew { + bindings[fromName] = binding + } + } + } + fields := map[string]*structFieldDecoder{} + for k, binding := range bindings { + fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) + } + return createStructDecoder(typ, fields) +} + +type structFieldEncoder struct { + field *reflect.StructField + fieldEncoder ValEncoder + omitempty bool +} + +func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) + encoder.fieldEncoder.Encode(fieldPtr, stream) + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%s: %s", encoder.field.Name, stream.Error.Error()) + } +} + +func (encoder *structFieldEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { + fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) + return encoder.fieldEncoder.IsEmpty(fieldPtr) +} + +type structEncoder struct { + onePtrEmbedded bool + onePtrOptimization bool + fields []structFieldTo +} + +type structFieldTo struct { + encoder *structFieldEncoder + toName string +} + +func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + isNotFirst := false + for _, field := range encoder.fields { + if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { + continue + } + if isNotFirst { + stream.WriteMore() + } + stream.WriteObjectField(field.toName) + field.encoder.Encode(ptr, stream) + isNotFirst = true + } + stream.WriteObjectEnd() +} + +func (encoder *structEncoder) EncodeInterface(val interface{}, stream *Stream) { + e := (*emptyInterface)(unsafe.Pointer(&val)) + if encoder.onePtrOptimization { + if e.word == nil && encoder.onePtrEmbedded { + stream.WriteObjectStart() + stream.WriteObjectEnd() + return + } + ptr := uintptr(e.word) + e.word = unsafe.Pointer(&ptr) + } + if reflect.TypeOf(val).Kind() == reflect.Ptr { + encoder.Encode(unsafe.Pointer(&e.word), stream) + } else { + encoder.Encode(e.word, stream) + } +} + +func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type emptyStructEncoder struct { +} + +func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyObject() +} + +func (encoder *emptyStructEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_slice.go b/vendor/github.com/json-iterator/go/feature_reflect_slice.go new file mode 100644 index 00000000..7377eec7 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_slice.go @@ -0,0 +1,149 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "unsafe" +) + +func decoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + decoder, err := decoderOfType(cfg, typ.Elem()) + if err != nil { + return nil, err + } + return &sliceDecoder{typ, typ.Elem(), decoder}, nil +} + +func encoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + encoder, err := encoderOfType(cfg, typ.Elem()) + if err != nil { + return nil, err + } + if typ.Elem().Kind() == reflect.Map { + encoder = &optionalEncoder{encoder} + } + return &sliceEncoder{typ, typ.Elem(), encoder}, nil +} + +type sliceEncoder struct { + sliceType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder +} + +func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + slice := (*sliceHeader)(ptr) + if slice.Data == nil { + stream.WriteNil() + return + } + if slice.Len == 0 { + stream.WriteEmptyArray() + return + } + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(slice.Data) + encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) + for i := 1; i < slice.Len; i++ { + stream.WriteMore() + elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) + encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) + } +} + +func (encoder *sliceEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + slice := (*sliceHeader)(ptr) + return slice.Len == 0 +} + +type sliceDecoder struct { + sliceType reflect.Type + elemType reflect.Type + elemDecoder ValDecoder +} + +// sliceHeader is a safe version of SliceHeader used within this package. +type sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int +} + +func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) + } +} + +func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + slice := (*sliceHeader)(ptr) + if iter.ReadNil() { + slice.Len = 0 + slice.Cap = 0 + slice.Data = nil + return + } + reuseSlice(slice, decoder.sliceType, 4) + slice.Len = 0 + offset := uintptr(0) + iter.ReadArrayCB(func(iter *Iterator) bool { + growOne(slice, decoder.sliceType, decoder.elemType) + decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(slice.Data)+offset), iter) + offset += decoder.elemType.Size() + return true + }) +} + +// grow grows the slice s so that it can hold extra more values, allocating +// more capacity if needed. It also returns the old and new slice lengths. +func growOne(slice *sliceHeader, sliceType reflect.Type, elementType reflect.Type) { + newLen := slice.Len + 1 + if newLen <= slice.Cap { + slice.Len = newLen + return + } + newCap := slice.Cap + if newCap == 0 { + newCap = 1 + } else { + for newCap < newLen { + if slice.Len < 1024 { + newCap += newCap + } else { + newCap += newCap / 4 + } + } + } + newVal := reflect.MakeSlice(sliceType, newLen, newCap) + dst := unsafe.Pointer(newVal.Pointer()) + // copy old array into new array + originalBytesCount := uintptr(slice.Len) * elementType.Size() + srcPtr := (*[1 << 30]byte)(slice.Data) + dstPtr := (*[1 << 30]byte)(dst) + for i := uintptr(0); i < originalBytesCount; i++ { + dstPtr[i] = srcPtr[i] + } + slice.Data = dst + slice.Len = newLen + slice.Cap = newCap +} + +func reuseSlice(slice *sliceHeader, sliceType reflect.Type, expectedCap int) { + if expectedCap <= slice.Cap { + return + } + newVal := reflect.MakeSlice(sliceType, 0, expectedCap) + dst := unsafe.Pointer(newVal.Pointer()) + slice.Data = dst + slice.Cap = expectedCap +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go new file mode 100644 index 00000000..b3417fd7 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go @@ -0,0 +1,916 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "strings" + "unsafe" +) + +func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder) (ValDecoder, error) { + knownHash := map[int32]struct{}{ + 0: {}, + } + switch len(fields) { + case 0: + return &skipObjectDecoder{typ}, nil + case 1: + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}, nil + } + case 2: + var fieldHash1 int32 + var fieldHash2 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldHash1 == 0 { + fieldHash1 = fieldHash + fieldDecoder1 = fieldDecoder + } else { + fieldHash2 = fieldHash + fieldDecoder2 = fieldDecoder + } + } + return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}, nil + case 3: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } + } + return &threeFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3}, nil + case 4: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } + } + return &fourFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4}, nil + case 5: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } + } + return &fiveFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, fieldName5, fieldDecoder5}, nil + case 6: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } + } + return &sixFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6}, nil + case 7: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } + } + return &sevenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7}, nil + case 8: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldName8 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } + } + return &eightFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, fieldName8, fieldDecoder8}, nil + case 9: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldName8 int32 + var fieldName9 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } + } + return &nineFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9}, nil + case 10: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldName8 int32 + var fieldName9 int32 + var fieldName10 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + var fieldDecoder10 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else if fieldName9 == 0 { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } else { + fieldName10 = fieldHash + fieldDecoder10 = fieldDecoder + } + } + return &tenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9, + fieldName10, fieldDecoder10}, nil + } + return &generalStructDecoder{typ, fields}, nil +} + +type generalStructDecoder struct { + typ reflect.Type + fields map[string]*structFieldDecoder +} + +func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + fieldBytes := iter.readObjectFieldAsBytes() + field := *(*string)(unsafe.Pointer(&fieldBytes)) + fieldDecoder := decoder.fields[strings.ToLower(field)] + if fieldDecoder == nil { + iter.Skip() + } else { + fieldDecoder.Decode(ptr, iter) + } + for iter.nextToken() == ',' { + fieldBytes = iter.readObjectFieldAsBytes() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + fieldDecoder = decoder.fields[strings.ToLower(field)] + if fieldDecoder == nil { + iter.Skip() + } else { + fieldDecoder.Decode(ptr, iter) + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type skipObjectDecoder struct { + typ reflect.Type +} + +func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valueType := iter.WhatIsNext() + if valueType != ObjectValue && valueType != NilValue { + iter.ReportError("skipObjectDecoder", "expect object or null") + return + } + iter.Skip() +} + +type oneFieldStructDecoder struct { + typ reflect.Type + fieldHash int32 + fieldDecoder *structFieldDecoder +} + +func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + if iter.readFieldHash() == decoder.fieldHash { + decoder.fieldDecoder.Decode(ptr, iter) + } else { + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type twoFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder +} + +func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type threeFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder +} + +func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type fourFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder +} + +func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type fiveFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder +} + +func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type sixFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder +} + +func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type sevenFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder +} + +func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type eightFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder + fieldHash8 int32 + fieldDecoder8 *structFieldDecoder +} + +func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type nineFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder + fieldHash8 int32 + fieldDecoder8 *structFieldDecoder + fieldHash9 int32 + fieldDecoder9 *structFieldDecoder +} + +func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type tenFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder + fieldHash8 int32 + fieldDecoder8 *structFieldDecoder + fieldHash9 int32 + fieldDecoder9 *structFieldDecoder + fieldHash10 int32 + fieldDecoder10 *structFieldDecoder +} + +func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + case decoder.fieldHash10: + decoder.fieldDecoder10.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type structFieldDecoder struct { + field *reflect.StructField + fieldDecoder ValDecoder +} + +func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + fieldPtr := unsafe.Pointer(uintptr(ptr) + decoder.field.Offset) + decoder.fieldDecoder.Decode(fieldPtr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%s: %s", decoder.field.Name, iter.Error.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/feature_stream.go b/vendor/github.com/json-iterator/go/feature_stream.go new file mode 100644 index 00000000..9c8470a0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream.go @@ -0,0 +1,305 @@ +package jsoniter + +import ( + "io" +) + +// Stream is a io.Writer like object, with JSON specific write functions. +// Error is not returned as return value, but stored as Error member on this stream instance. +type Stream struct { + cfg *frozenConfig + out io.Writer + buf []byte + n int + Error error + indention int +} + +// NewStream create new stream instance. +// cfg can be jsoniter.ConfigDefault. +// out can be nil if write to internal buffer. +// bufSize is the initial size for the internal buffer in bytes. +func NewStream(cfg API, out io.Writer, bufSize int) *Stream { + return &Stream{ + cfg: cfg.(*frozenConfig), + out: out, + buf: make([]byte, bufSize), + n: 0, + Error: nil, + indention: 0, + } +} + +// Pool returns a pool can provide more stream with same configuration +func (stream *Stream) Pool() StreamPool { + return stream.cfg +} + +// Reset reuse this stream instance by assign a new writer +func (stream *Stream) Reset(out io.Writer) { + stream.out = out + stream.n = 0 +} + +// Available returns how many bytes are unused in the buffer. +func (stream *Stream) Available() int { + return len(stream.buf) - stream.n +} + +// Buffered returns the number of bytes that have been written into the current buffer. +func (stream *Stream) Buffered() int { + return stream.n +} + +// Buffer if writer is nil, use this method to take the result +func (stream *Stream) Buffer() []byte { + return stream.buf[:stream.n] +} + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (stream *Stream) Write(p []byte) (nn int, err error) { + for len(p) > stream.Available() && stream.Error == nil { + if stream.out == nil { + stream.growAtLeast(len(p)) + } else { + var n int + if stream.Buffered() == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, stream.Error = stream.out.Write(p) + } else { + n = copy(stream.buf[stream.n:], p) + stream.n += n + stream.Flush() + } + nn += n + p = p[n:] + } + } + if stream.Error != nil { + return nn, stream.Error + } + n := copy(stream.buf[stream.n:], p) + stream.n += n + nn += n + return nn, nil +} + +// WriteByte writes a single byte. +func (stream *Stream) writeByte(c byte) { + if stream.Error != nil { + return + } + if stream.Available() < 1 { + stream.growAtLeast(1) + } + stream.buf[stream.n] = c + stream.n++ +} + +func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 2 { + stream.growAtLeast(2) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.n += 2 +} + +func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 3 { + stream.growAtLeast(3) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.buf[stream.n+2] = c3 + stream.n += 3 +} + +func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 4 { + stream.growAtLeast(4) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.buf[stream.n+2] = c3 + stream.buf[stream.n+3] = c4 + stream.n += 4 +} + +func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 5 { + stream.growAtLeast(5) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.buf[stream.n+2] = c3 + stream.buf[stream.n+3] = c4 + stream.buf[stream.n+4] = c5 + stream.n += 5 +} + +// Flush writes any buffered data to the underlying io.Writer. +func (stream *Stream) Flush() error { + if stream.out == nil { + return nil + } + if stream.Error != nil { + return stream.Error + } + if stream.n == 0 { + return nil + } + n, err := stream.out.Write(stream.buf[0:stream.n]) + if n < stream.n && err == nil { + err = io.ErrShortWrite + } + if err != nil { + if n > 0 && n < stream.n { + copy(stream.buf[0:stream.n-n], stream.buf[n:stream.n]) + } + stream.n -= n + stream.Error = err + return err + } + stream.n = 0 + return nil +} + +func (stream *Stream) ensure(minimal int) { + available := stream.Available() + if available < minimal { + stream.growAtLeast(minimal) + } +} + +func (stream *Stream) growAtLeast(minimal int) { + if stream.out != nil { + stream.Flush() + } + toGrow := len(stream.buf) + if toGrow < minimal { + toGrow = minimal + } + newBuf := make([]byte, len(stream.buf)+toGrow) + copy(newBuf, stream.Buffer()) + stream.buf = newBuf +} + +// WriteRaw write string out without quotes, just like []byte +func (stream *Stream) WriteRaw(s string) { + stream.ensure(len(s)) + if stream.Error != nil { + return + } + n := copy(stream.buf[stream.n:], s) + stream.n += n +} + +// WriteNil write null to stream +func (stream *Stream) WriteNil() { + stream.writeFourBytes('n', 'u', 'l', 'l') +} + +// WriteTrue write true to stream +func (stream *Stream) WriteTrue() { + stream.writeFourBytes('t', 'r', 'u', 'e') +} + +// WriteFalse write false to stream +func (stream *Stream) WriteFalse() { + stream.writeFiveBytes('f', 'a', 'l', 's', 'e') +} + +// WriteBool write true or false into stream +func (stream *Stream) WriteBool(val bool) { + if val { + stream.WriteTrue() + } else { + stream.WriteFalse() + } +} + +// WriteObjectStart write { with possible indention +func (stream *Stream) WriteObjectStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('{') + stream.writeIndention(0) +} + +// WriteObjectField write "field": with possible indention +func (stream *Stream) WriteObjectField(field string) { + stream.WriteString(field) + if stream.indention > 0 { + stream.writeTwoBytes(':', ' ') + } else { + stream.writeByte(':') + } +} + +// WriteObjectEnd write } with possible indention +func (stream *Stream) WriteObjectEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte('}') +} + +// WriteEmptyObject write {} +func (stream *Stream) WriteEmptyObject() { + stream.writeByte('{') + stream.writeByte('}') +} + +// WriteMore write , with possible indention +func (stream *Stream) WriteMore() { + stream.writeByte(',') + stream.writeIndention(0) +} + +// WriteArrayStart write [ with possible indention +func (stream *Stream) WriteArrayStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('[') + stream.writeIndention(0) +} + +// WriteEmptyArray write [] +func (stream *Stream) WriteEmptyArray() { + stream.writeByte('[') + stream.writeByte(']') +} + +// WriteArrayEnd write ] with possible indention +func (stream *Stream) WriteArrayEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte(']') +} + +func (stream *Stream) writeIndention(delta int) { + if stream.indention == 0 { + return + } + stream.writeByte('\n') + toWrite := stream.indention - delta + stream.ensure(toWrite) + for i := 0; i < toWrite && stream.n < len(stream.buf); i++ { + stream.buf[stream.n] = ' ' + stream.n++ + } +} diff --git a/vendor/github.com/json-iterator/go/feature_stream_float.go b/vendor/github.com/json-iterator/go/feature_stream_float.go new file mode 100644 index 00000000..9a404e11 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream_float.go @@ -0,0 +1,96 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var pow10 []uint64 + +func init() { + pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} +} + +// WriteFloat32 write float32 to stream +func (stream *Stream) WriteFloat32(val float32) { + abs := math.Abs(float64(val)) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if float32(abs) < 1e-6 || float32(abs) >= 1e21 { + fmt = 'e' + } + } + stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 32)) +} + +// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat32Lossy(val float32) { + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat32(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(float64(val)*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + stream.ensure(10) + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[stream.n-1] == '0' { + stream.n-- + } +} + +// WriteFloat64 write float64 to stream +func (stream *Stream) WriteFloat64(val float64) { + abs := math.Abs(val) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 64)) +} + +// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat64Lossy(val float64) { + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat64(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(val*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + stream.ensure(10) + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[stream.n-1] == '0' { + stream.n-- + } +} diff --git a/vendor/github.com/json-iterator/go/feature_stream_int.go b/vendor/github.com/json-iterator/go/feature_stream_int.go new file mode 100644 index 00000000..7cfd522c --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream_int.go @@ -0,0 +1,320 @@ +package jsoniter + +var digits []uint32 + +func init() { + digits = make([]uint32, 1000) + for i := uint32(0); i < 1000; i++ { + digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' + if i < 10 { + digits[i] += 2 << 24 + } else if i < 100 { + digits[i] += 1 << 24 + } + } +} + +func writeFirstBuf(buf []byte, v uint32, n int) int { + start := v >> 24 + if start == 0 { + buf[n] = byte(v >> 16) + n++ + buf[n] = byte(v >> 8) + n++ + } else if start == 1 { + buf[n] = byte(v >> 8) + n++ + } + buf[n] = byte(v) + n++ + return n +} + +func writeBuf(buf []byte, v uint32, n int) { + buf[n] = byte(v >> 16) + buf[n+1] = byte(v >> 8) + buf[n+2] = byte(v) +} + +// WriteUint8 write uint8 to stream +func (stream *Stream) WriteUint8(val uint8) { + stream.ensure(3) + stream.n = writeFirstBuf(stream.buf, digits[val], stream.n) +} + +// WriteInt8 write int8 to stream +func (stream *Stream) WriteInt8(nval int8) { + stream.ensure(4) + n := stream.n + var val uint8 + if nval < 0 { + val = uint8(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint8(nval) + } + stream.n = writeFirstBuf(stream.buf, digits[val], n) +} + +// WriteUint16 write uint16 to stream +func (stream *Stream) WriteUint16(val uint16) { + stream.ensure(5) + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], stream.n) + return + } + r1 := val - q1*1000 + n := writeFirstBuf(stream.buf, digits[q1], stream.n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return +} + +// WriteInt16 write int16 to stream +func (stream *Stream) WriteInt16(nval int16) { + stream.ensure(6) + n := stream.n + var val uint16 + if nval < 0 { + val = uint16(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint16(nval) + } + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + n = writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return +} + +// WriteUint32 write uint32 to stream +func (stream *Stream) WriteUint32(val uint32) { + stream.ensure(10) + n := stream.n + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + } else { + r3 := q2 - q3*1000 + stream.buf[n] = byte(q3 + '0') + n++ + writeBuf(stream.buf, digits[r3], n) + n += 3 + } + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 +} + +// WriteInt32 write int32 to stream +func (stream *Stream) WriteInt32(nval int32) { + stream.ensure(11) + n := stream.n + var val uint32 + if nval < 0 { + val = uint32(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint32(nval) + } + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + } else { + r3 := q2 - q3*1000 + stream.buf[n] = byte(q3 + '0') + n++ + writeBuf(stream.buf, digits[r3], n) + n += 3 + } + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 +} + +// WriteUint64 write uint64 to stream +func (stream *Stream) WriteUint64(val uint64) { + stream.ensure(20) + n := stream.n + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + n = writeFirstBuf(stream.buf, digits[q3], n) + writeBuf(stream.buf, digits[r3], n) + writeBuf(stream.buf, digits[r2], n+3) + writeBuf(stream.buf, digits[r1], n+6) + stream.n = n + 9 + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + n = writeFirstBuf(stream.buf, digits[q4], n) + writeBuf(stream.buf, digits[r4], n) + writeBuf(stream.buf, digits[r3], n+3) + writeBuf(stream.buf, digits[r2], n+6) + writeBuf(stream.buf, digits[r1], n+9) + stream.n = n + 12 + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + n = writeFirstBuf(stream.buf, digits[q5], n) + } else { + n = writeFirstBuf(stream.buf, digits[q6], n) + r6 := q5 - q6*1000 + writeBuf(stream.buf, digits[r6], n) + n += 3 + } + writeBuf(stream.buf, digits[r5], n) + writeBuf(stream.buf, digits[r4], n+3) + writeBuf(stream.buf, digits[r3], n+6) + writeBuf(stream.buf, digits[r2], n+9) + writeBuf(stream.buf, digits[r1], n+12) + stream.n = n + 15 +} + +// WriteInt64 write int64 to stream +func (stream *Stream) WriteInt64(nval int64) { + stream.ensure(20) + n := stream.n + var val uint64 + if nval < 0 { + val = uint64(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint64(nval) + } + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + n = writeFirstBuf(stream.buf, digits[q3], n) + writeBuf(stream.buf, digits[r3], n) + writeBuf(stream.buf, digits[r2], n+3) + writeBuf(stream.buf, digits[r1], n+6) + stream.n = n + 9 + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + n = writeFirstBuf(stream.buf, digits[q4], n) + writeBuf(stream.buf, digits[r4], n) + writeBuf(stream.buf, digits[r3], n+3) + writeBuf(stream.buf, digits[r2], n+6) + writeBuf(stream.buf, digits[r1], n+9) + stream.n = n + 12 + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + n = writeFirstBuf(stream.buf, digits[q5], n) + } else { + stream.buf[n] = byte(q6 + '0') + n++ + r6 := q5 - q6*1000 + writeBuf(stream.buf, digits[r6], n) + n += 3 + } + writeBuf(stream.buf, digits[r5], n) + writeBuf(stream.buf, digits[r4], n+3) + writeBuf(stream.buf, digits[r3], n+6) + writeBuf(stream.buf, digits[r2], n+9) + writeBuf(stream.buf, digits[r1], n+12) + stream.n = n + 15 +} + +// WriteInt write int to stream +func (stream *Stream) WriteInt(val int) { + stream.WriteInt64(int64(val)) +} + +// WriteUint write uint to stream +func (stream *Stream) WriteUint(val uint) { + stream.WriteUint64(uint64(val)) +} diff --git a/vendor/github.com/json-iterator/go/feature_stream_string.go b/vendor/github.com/json-iterator/go/feature_stream_string.go new file mode 100644 index 00000000..334282f0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream_string.go @@ -0,0 +1,396 @@ +package jsoniter + +import ( + "unicode/utf8" +) + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML