Compare commits

..

No commits in common. "master" and "v0.3" have entirely different histories.
master ... v0.3

18354 changed files with 4713323 additions and 460449 deletions

7
.github/CODEOWNERS vendored
View file

@ -1,7 +0,0 @@
# GitHub code owners
# See https://help.github.com/articles/about-codeowners/
#
# KEEP THIS FILE SORTED. Order is important. Last match takes precedence.
* @mrunalp @runcom
pkg/storage/** @nalind @runcom @rhatdan

View file

@ -1,58 +0,0 @@
<!--
If you are reporting a new issue, make sure that we do not have any duplicates
already open. You can ensure this by searching the issue list for this
repository. If there is a duplicate, please close your issue and add a comment
to the existing issue instead.
If you suspect your issue is a bug, please edit your issue description to
include the BUG REPORT INFORMATION shown below. If you fail to provide this
information within 7 days, we cannot debug your issue and will close it. We
will, however, reopen it if you later provide the information.
For more information about reporting issues, see
https://github.com/kubernetes-incubator/cri-o/blob/master/CONTRIBUTING.md#reporting-issues
---------------------------------------------------
GENERAL SUPPORT INFORMATION
---------------------------------------------------
The GitHub issue tracker is for bug reports and feature requests.
General support for **CRI-O** can be found at the following locations:
- IRC - #cri-o channel on irc.freenode.org
- Slack - kubernetes.slack.com #sig-node channel
- Post a question on StackOverflow, using the CRI-O tag
---------------------------------------------------
BUG REPORT INFORMATION
---------------------------------------------------
Use the commands below to provide key information from your environment:
You do NOT have to include this information if this is a FEATURE REQUEST
-->
**Description**
<!--
Briefly describe the problem you are having in a few paragraphs.
-->
**Steps to reproduce the issue:**
1.
2.
3.
**Describe the results you received:**
**Describe the results you expected:**
**Additional information you deem important (e.g. issue happens only occasionally):**
**Output of `crio --version`:**
```
(paste your output here)
```
**Additional environment details (AWS, VirtualBox, physical, etc.):**

View file

@ -1,23 +0,0 @@
<!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/kubernetes-incubator/cri-o/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
**- How I did it**
**- How to verify it**
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->

15
.gitignore vendored
View file

@ -1,18 +1,19 @@
/.artifacts/
/_output/
/conmon/conmon
/conmon/conmon.o
/docs/*.[158]
/docs/*.[158].gz
/crio.conf
/kpod
/ocic
/ocid
/ocid.conf
*.o
*.orig
/pause/pause
/pause/pause.o
/bin/
*.rej
/test/bin2img/bin2img
/test/checkseccomp/checkseccomp
/test/copyimg/copyimg
Vagrantfile
.vagrant/
.vscode/
/test/testdata/redis-image

View file

@ -1,10 +0,0 @@
Aleksa Sarai <asarai@suse.de> <asarai@suse.com>
Antonio Murdaca <runcom@redhat.com> <runcom@users.noreply.github.com>
CuiHaozhi <cuihaozhi@chinacloud.com.cn> <cuihz@wise2c.com>
Daniel J Walsh <dwalsh@redhat.com>
Haiyan Meng <hmeng@redhat.com> <haiyanalady@gmail.com>
Lorenzo Fontana <lo@linux.com> <fontanalorenz@gmail.com>
Mrunal Patel <mrunalp@gmail.com> <mpatel@redhat.com>
Mrunal Patel <mrunalp@gmail.com> <mrunal@me.com>
Pengfei Ni <feiskyer@gmail.com> <feiskyer@users.noreply.github.com>
Tobias Klauser <tklauser@distanz.ch> <tobias.klauser@gmail.com>

View file

@ -4,41 +4,20 @@ set -o errexit
set -o nounset
set -o pipefail
# Create the linter path for use later
LINTER=${GOPATH}/bin/gometalinter
# Make sure gometalinter is installed
if [ ! -f ${LINTER} ]; then
echo >&2 "gometalinter must be installed. Please run 'make install.tools' and try again"
exit 1
fi
PKGS=$(find . -type d -not -path . -a -not -iwholename '*.git*' -a -not -iname '.tool' -a -not -iwholename '*vendor*' -a -not -iname 'hack' -a -not -iwholename '*.artifacts*' -a -not -iwholename '*contrib*' -a -not -iwholename '*test*' -a -not -iwholename '*logo*' -a -not -iwholename '*conmon*' -a -not -iwholename '*completions*' -a -not -iwholename '*docs*' -a -not -iwholename '*pause*')
# Execute the linter
${LINTER} \
--concurrency=4\
--enable-gc\
--vendored-linters\
--deadline=600s --disable-all\
--enable=deadcode\
--enable=errcheck\
--enable=goconst\
--enable=gofmt\
--enable=golint\
--enable=ineffassign\
--enable=interfacer\
--enable=megacheck\
--enable=misspell\
--enable=structcheck\
--enable=varcheck\
--enable=vet\
--enable=vetshadow\
--exclude='error return value not checked.*\(errcheck\)$'\
--exclude='declaration of.*err.*shadows declaration.*\(vetshadow\)$'\
--exclude='.*_test\.go:.*error return value not checked.*\(errcheck\)$'\
--exclude='duplicate of.*_test.go.*\(dupl\)$'\
--exclude='cmd\/client\/.*\.go.*\(dupl\)$'\
--exclude='vendor\/.*'\
--exclude='server\/seccomp\/.*\.go.*$'\
${PKGS[@]}
for d in $(find . -type d -not -iwholename '*.git*' -a -not -iname '.tool' -a -not -iwholename '*vendor*' -a -not -iwholename '*.artifacts*' -a -not -iwholename '*contrib*' -a -not -iwholename '*test*' -a -not -iwholename '*logo*' -a -not -iwholename '*conmon*' -a -not -iwholename '*completions*' -a -not -iwholename '*docs*' -a -not -iwholename '*pause*'); do
${GOPATH}/bin/gometalinter \
--exclude='error return value not checked.*(Close|Log|Print|RemoveAll).*\(errcheck\)$' \
--exclude='declaration of.*err.*shadows declaration.*\(vetshadow\)$' \
--exclude='.*_test\.go:.*error return value not checked.*\(errcheck\)$' \
--exclude='duplicate of.*_test.go.*\(dupl\)$' \
--exclude='cmd\/client\/.*\.go.*\(dupl\)$' \
--exclude='vendor\/.*' \
--exclude='server\/seccomp\/.*\.go.*$' \
--disable=aligncheck \
--disable=gotype \
--disable=gas \
--cyclo-over=80 \
--dupl-threshold=100 \
--tests \
--deadline=120s "${d}"
done

View file

@ -1,4 +1,9 @@
language: go
go:
- 1.6.x
- 1.7.x
- 1.8.x
- tip
sudo: required
@ -8,53 +13,20 @@ services:
before_install:
- sudo apt-get -qq update
- sudo apt-get -qq install btrfs-tools libdevmapper-dev libgpgme11-dev libapparmor-dev libseccomp-dev
- sudo apt-get -qq install autoconf automake bison e2fslibs-dev libfuse-dev libtool liblzma-dev gettext
install:
- make install.tools
- OSTREE_VERSION=v2017.9
- git clone https://github.com/ostreedev/ostree ${TRAVIS_BUILD_DIR}/ostree
- pushd ${TRAVIS_BUILD_DIR}/ostree
- git checkout $OSTREE_VERSION
- ./autogen.sh --prefix=/usr/local
- make all
- sudo make install
- popd
before_script:
- export PATH=$HOME/gopath/bin:$PATH
- export LD_LIBRARY_PATH=/usr/local/lib${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
jobs:
include:
- stage: Build and Verify
script:
- make .gitvalidation
- make gofmt
- make lint
- make testunit
- make docs
- make
go: 1.8.x
- stage: Build and Verify
script:
- make .gitvalidation
- make gofmt
- make lint
- make testunit
- make docs
- make
go: 1.9.x
- script:
- make .gitvalidation
- make testunit
- make docs
- make
go: tip
- stage: Integration Test
script:
- make integration
go: 1.8.x
script:
- make .gitvalidation
- make gofmt
- make lint
- make integration
- make docs
- make
notifications:
irc: "chat.freenode.net#cri-o"
irc: "chat.freenode.net#ocid"

View file

@ -1,142 +0,0 @@
# Contributing to CRI-O
We'd love to have you join the community! Below summarizes the processes
that we follow.
## Topics
* [Reporting Issues](#reporting-issues)
* [Submitting Pull Requests](#submitting-pull-requests)
* [Communications](#communications)
* [Becoming a Maintainer](#becoming-a-maintainer)
## Reporting Issues
Before reporting an issue, check our backlog of
[open issues](https://github.com/kubernetes-incubator/cri-o/issues)
to see if someone else has already reported it. If so, feel free to add
your scenario, or additional information, to the discussion. Or simply
"subscribe" to it to be notified when it is updated.
If you find a new issue with the project we'd love to hear about it! The most
important aspect of a bug report is that it includes enough information for
us to reproduce it. So, please include as much detail as possible and try
to remove the extra stuff that doesn't really relate to the issue itself.
The easier it is for us to reproduce it, the faster it'll be fixed!
Please don't include any private/sensitive information in your issue!
## Submitting Pull Requests
No Pull Request (PR) is too small! Typos, additional comments in the code,
new testcases, bug fixes, new features, more documentation, ... it's all
welcome!
While bug fixes can first be identified via an "issue", that is not required.
It's ok to just open up a PR with the fix, but make sure you include the same
information you would have included in an issue - like how to reproduce it.
PRs for new features should include some background on what use cases the
new code is trying to address. When possible and when it makes sense, try to break-up
larger PRs into smaller ones - it's easier to review smaller
code changes. But only if those smaller ones make sense as stand-alone PRs.
Regardless of the type of PR, all PRs should include:
* well documented code changes
* additional testcases. Ideally, they should fail w/o your code change applied
* documentation changes
Squash your commits into logical pieces of work that might want to be reviewed
separate from the rest of the PRs. But, squashing down to just one commit is ok
too since in the end the entire PR will be reviewed anyway. When in doubt,
squash.
PRs that fix issues should include a reference like `Closes #XXXX` in the
commit message so that github will automatically close the referenced issue
when the PR is merged.
<!--
All PRs require at least two LGTMs (Looks Good To Me) from maintainers.
-->
### Sign your PRs
The sign-off is a line at the end of the explanation for the patch. Your
signature certifies that you wrote the patch or otherwise have the right to pass
it on as an open-source patch. The rules are simple: if you can certify
the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@email.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.
## Communications
For general questions, or discussions, please use the
IRC group on `irc.freenode.net` called `cri-o`
that has been setup.
For discussions around issues/bugs and features, you can use the github
[issues](https://github.com/kubernetes-incubator/cri-o/issues)
and
[PRs](https://github.com/kubernetes-incubator/cri-o/pulls)
tracking system.
<!--
## Becoming a Maintainer
To become a maintainer you must first be nominated by an existing maintainer.
If a majority (>50%) of maintainers agree then the proposal is adopted and
you will be added to the list.
Removing a maintainer requires at least 75% of the remaining maintainers
approval, or if the person requests to be removed then it is automatic.
Normally, a maintainer will only be removed if they are considered to be
inactive for a long period of time or are viewed as disruptive to the community.
The current list of maintainers can be found in the
[MAINTAINERS](MAINTAINERS) file.
-->

View file

@ -1,30 +1,20 @@
FROM golang:1.8
FROM golang:1.7
# libseccomp in jessie is not _quite_ new enough -- need backports version
RUN echo 'deb http://httpredir.debian.org/debian jessie-backports main' > /etc/apt/sources.list.d/backports.list
RUN apt-get update && apt-get install -y \
apparmor \
autoconf \
automake \
bison \
build-essential \
curl \
e2fslibs-dev \
gawk \
gettext \
iptables \
pkg-config \
libaio-dev \
libcap-dev \
libfuse-dev \
libostree-dev \
libprotobuf-dev \
libprotobuf-c0-dev \
libseccomp2/jessie-backports \
libseccomp-dev/jessie-backports \
libtool \
libudev-dev \
protobuf-c-compiler \
protobuf-compiler \
python-minimal \
@ -34,11 +24,7 @@ RUN apt-get update && apt-get install -y \
libdevmapper1.02.1 \
libdevmapper-dev \
libgpgme11-dev \
liblzma-dev \
netcat \
socat \
--no-install-recommends \
bsdmainutils \
&& apt-get clean
# install bats
@ -57,7 +43,7 @@ RUN mkdir -p /usr/src/criu \
&& rm -rf /usr/src/criu
# Install runc
ENV RUNC_COMMIT c6e4a1ebeb1a72b529c6f1b6ee2b1ae5b868b14f
ENV RUNC_COMMIT v1.0.0-rc3
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
@ -65,54 +51,29 @@ RUN set -x \
&& git fetch origin --tags \
&& git checkout -q "$RUNC_COMMIT" \
&& make static BUILDTAGS="seccomp selinux" \
&& cp runc /usr/bin/runc \
&& cp runc /usr/local/bin/runc \
&& rm -rf "$GOPATH"
# Install CNI plugins
ENV CNI_COMMIT dcf7368eeab15e2affc6256f0bb1e84dd46a34de
ENV CNI_COMMIT d4bbce1865270cd2d2be558d6a23e63d314fe769
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/containernetworking/plugins.git "$GOPATH/src/github.com/containernetworking/plugins" \
&& cd "$GOPATH/src/github.com/containernetworking/plugins" \
&& git clone https://github.com/containernetworking/cni.git "$GOPATH/src/github.com/containernetworking/cni" \
&& cd "$GOPATH/src/github.com/containernetworking/cni" \
&& git checkout -q "$CNI_COMMIT" \
&& ./build.sh \
&& mkdir -p /opt/cni/bin \
&& cp bin/* /opt/cni/bin/ \
&& rm -rf "$GOPATH"
# Install custom CNI bridge test plugin
# XXX: this plugin is meant to be a replacement for the old "test_plugin_args.bash"
# we need this in testing because sandbox_run now gather IP address and the mock
# plugin wasn't able to properly setup the net ns.
# The bridge is based on the same commit as the one above.
#ENV CNI_COMMIT 6bfe036c38c8e1410f1acaa4b2ee16f1851472e4
ENV CNI_TEST_BRANCH custom-bridge
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/runcom/plugins.git "$GOPATH/src/github.com/containernetworking/plugins" \
&& cd "$GOPATH/src/github.com/containernetworking/plugins" \
&& git checkout -q "$CNI_TEST_BRANCH" \
&& ./build.sh \
&& mkdir -p /opt/cni/bin \
&& cp bin/bridge /opt/cni/bin/bridge-custom \
&& rm -rf "$GOPATH"
# Install crictl
ENV CRICTL_COMMIT b42fc3f364dd48f649d55926c34492beeb9b2e99
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/kubernetes-incubator/cri-tools.git "$GOPATH/src/github.com/kubernetes-incubator/cri-tools" \
&& cd "$GOPATH/src/github.com/kubernetes-incubator/cri-tools" \
&& git checkout -q "$CRICTL_COMMIT" \
&& go install github.com/kubernetes-incubator/cri-tools/cmd/crictl \
&& cp "$GOPATH"/bin/crictl /usr/bin/ \
&& rm -rf "$GOPATH"
# Make sure we have some policy for pulling images
RUN mkdir -p /etc/containers
COPY test/policy.json /etc/containers/policy.json
COPY test/redhat_sigstore.yaml /etc/containers/registries.d/registry.access.redhat.com.yaml
WORKDIR /go/src/github.com/kubernetes-incubator/cri-o
ADD . /go/src/github.com/kubernetes-incubator/cri-o
RUN make copyimg \
&& mkdir -p .artifacts/redis-image \
&& ./test/copyimg/copyimg --import-from=docker://redis --export-to=dir:.artifacts/redis-image --signature-policy ./test/policy.json

154
Makefile
View file

@ -1,28 +1,18 @@
GO ?= go
EPOCH_TEST_COMMIT ?= 1cc5a27
EPOCH_TEST_COMMIT ?= 78aae688e2932f0cfc2a23e28ad30b58c6b8577f
PROJECT := github.com/kubernetes-incubator/cri-o
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
CRIO_IMAGE := crio_dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
CRIO_INSTANCE := crio_dev
OCID_IMAGE := ocid_dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
OCID_INSTANCE := ocid_dev
PREFIX ?= ${DESTDIR}/usr/local
BINDIR ?= ${PREFIX}/bin
LIBEXECDIR ?= ${PREFIX}/libexec
MANDIR ?= ${PREFIX}/share/man
ETCDIR ?= ${DESTDIR}/etc
ETCDIR_CRIO ?= ${ETCDIR}/crio
BUILDTAGS ?= seccomp $(shell hack/btrfs_tag.sh) $(shell hack/libdm_installed.sh) $(shell hack/libdm_no_deferred_remove_tag.sh) $(shell hack/btrfs_installed_tag.sh) $(shell hack/ostree_tag.sh) $(shell hack/selinux_tag.sh)
CRICTL_CONFIG_DIR=${DESTDIR}/etc
ETCDIR_OCID ?= ${ETCDIR}/ocid
BUILDTAGS := selinux seccomp $(shell hack/btrfs_tag.sh) $(shell hack/libdm_tag.sh)
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
OCIUMOUNTINSTALLDIR=$(PREFIX)/share/oci-umount/oci-umount.d
SELINUXOPT ?= $(shell test -x /usr/sbin/selinuxenabled && selinuxenabled && echo -Z)
PACKAGES ?= $(shell go list -tags "${BUILDTAGS}" ./... | grep -v github.com/kubernetes-incubator/cri-o/vendor)
COMMIT_NO := $(shell git rev-parse HEAD 2> /dev/null || true)
GIT_COMMIT := $(if $(shell git status --porcelain --untracked-files=no),"${COMMIT_NO}-dirty","${COMMIT_NO}")
BUILD_INFO := $(shell date +%s)
# If GOPATH not specified, use one in the local directory
ifeq ($(GOPATH),)
@ -34,11 +24,8 @@ GOPKGBASEDIR := $(shell dirname "$(GOPKGDIR)")
# Update VPATH so make finds .gopathok
VPATH := $(VPATH):$(GOPATH)
SHRINKFLAGS := -s -w
BASE_LDFLAGS := ${SHRINKFLAGS} -X main.gitCommit=${GIT_COMMIT} -X main.buildInfo=${BUILD_INFO}
LDFLAGS := -ldflags '${BASE_LDFLAGS}'
all: binaries crio.conf docs
all: binaries ocid.conf docs
default: help
@ -46,7 +33,7 @@ help:
@echo "Usage: make <target>"
@echo
@echo " * 'install' - Install binaries to system locations"
@echo " * 'binaries' - Build crio, conmon and pause"
@echo " * 'binaries' - Build ocid, conmon and ocic"
@echo " * 'integration' - Execute integration tests"
@echo " * 'clean' - Clean artifacts"
@echo " * 'lint' - Execute the source code linter"
@ -64,8 +51,7 @@ lint: .gopathok
@./.tool/lint
gofmt:
find . -name '*.go' ! -path './vendor/*' -exec gofmt -s -w {} \+
git diff --exit-code
@./hack/verify-gofmt.sh
conmon:
$(MAKE) -C $@
@ -73,97 +59,103 @@ conmon:
pause:
$(MAKE) -C $@
test/bin2img/bin2img: .gopathok $(wildcard test/bin2img/*.go)
$(GO) build -i $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/bin2img
bin2img: .gopathok $(wildcard test/bin2img/*.go)
go build -tags "$(BUILDTAGS)" -o test/bin2img/$@ $(PROJECT)/test/bin2img
test/copyimg/copyimg: .gopathok $(wildcard test/copyimg/*.go)
$(GO) build -i $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/copyimg
copyimg: .gopathok $(wildcard test/copyimg/*.go)
go build -tags "$(BUILDTAGS)" -o test/copyimg/$@ $(PROJECT)/test/copyimg
test/checkseccomp/checkseccomp: .gopathok $(wildcard test/checkseccomp/*.go)
$(GO) build -i $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/checkseccomp
checkseccomp: .gopathok $(wildcard test/checkseccomp/*.go)
go build -o test/checkseccomp/$@ $(PROJECT)/test/checkseccomp
crio: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/crio $(PROJECT))
$(GO) build -i $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o bin/$@ $(PROJECT)/cmd/crio
ocid: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/ocid $(PROJECT))
$(GO) build -o $@ \
-tags "$(BUILDTAGS)" \
$(PROJECT)/cmd/ocid
crio.conf: crio
./bin/crio --config="" config --default > crio.conf
ocic: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/ocic $(PROJECT))
$(GO) build -o $@ $(PROJECT)/cmd/ocic
kpod: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/kpod $(PROJECT))
$(GO) build -o $@ $(PROJECT)/cmd/kpod
ocid.conf: ocid
./ocid --config="" config --default > ocid.conf
clean:
ifneq ($(GOPATH),)
rm -f "$(GOPATH)/.gopathok"
endif
rm -rf _output
rm -f docs/*.5 docs/*.8
rm -f docs/*.1 docs/*.5 docs/*.8
rm -fr test/testdata/redis-image
find . -name \*~ -delete
find . -name \#\* -delete
rm -f bin/crio
rm -f ocic ocid kpod
make -C conmon clean
make -C pause clean
rm -f test/bin2img/bin2img
rm -f test/copyimg/copyimg
rm -f test/checkseccomp/checkseccomp
crioimage:
docker build -t ${CRIO_IMAGE} .
ocidimage:
docker build -t ${OCID_IMAGE} .
dbuild: crioimage
docker run --name=${CRIO_INSTANCE} -e BUILDTAGS --privileged -v ${PWD}:/go/src/${PROJECT} --rm ${CRIO_IMAGE} make binaries
dbuild: ocidimage
docker run --name=${OCID_INSTANCE} --privileged ${OCID_IMAGE} -v ${PWD}:/go/src/${PROJECT} --rm make binaries
integration: crioimage
docker run -e STORAGE_OPTIONS="--storage-driver=vfs" -e TESTFLAGS -e TRAVIS -t --privileged --rm -v ${CURDIR}:/go/src/${PROJECT} ${CRIO_IMAGE} make localintegration
integration: ocidimage
docker run -e TESTFLAGS -e TRAVIS -t --privileged --rm -v ${CURDIR}:/go/src/${PROJECT} ${OCID_IMAGE} make localintegration
testunit:
$(GO) test -tags "$(BUILDTAGS)" -cover $(PACKAGES)
localintegration: clean binaries test-binaries
localintegration: binaries
./test/test_runner.sh ${TESTFLAGS}
binaries: crio conmon pause
test-binaries: test/bin2img/bin2img test/copyimg/copyimg test/checkseccomp/checkseccomp
binaries: ocid ocic kpod conmon pause bin2img copyimg checkseccomp
MANPAGES_MD := $(wildcard docs/*.md)
MANPAGES := $(MANPAGES_MD:%.md=%)
docs/%.1: docs/%.1.md .gopathok
$(GOPATH)/bin/go-md2man -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@
docs/%.5: docs/%.5.md .gopathok
(go-md2man -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@) || ($(GOPATH)/bin/go-md2man -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@)
$(GOPATH)/bin/go-md2man -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@
docs/%.8: docs/%.8.md .gopathok
(go-md2man -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@) || ($(GOPATH)/bin/go-md2man -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@)
$(GOPATH)/bin/go-md2man -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@
docs: $(MANPAGES)
install: .gopathok install.bin install.man
install: .gopathok
install -D -m 755 ocid $(BINDIR)/ocid
install -D -m 755 ocic $(BINDIR)/ocic
install -D -m 755 kpod $(BINDIR)/kpod
install -D -m 755 conmon/conmon $(LIBEXECDIR)/ocid/conmon
install -D -m 755 pause/pause $(LIBEXECDIR)/ocid/pause
install -d -m 755 $(MANDIR)/man1
install -d -m 755 $(MANDIR)/man5
install -d -m 755 $(MANDIR)/man8
install -m 644 $(filter %.1,$(MANPAGES)) -t $(MANDIR)/man1
install -m 644 $(filter %.5,$(MANPAGES)) -t $(MANDIR)/man5
install -m 644 $(filter %.8,$(MANPAGES)) -t $(MANDIR)/man8
install.bin:
install ${SELINUXOPT} -D -m 755 bin/crio $(BINDIR)/crio
install ${SELINUXOPT} -D -m 755 bin/conmon $(LIBEXECDIR)/crio/conmon
install ${SELINUXOPT} -D -m 755 bin/pause $(LIBEXECDIR)/crio/pause
install.man:
install ${SELINUXOPT} -d -m 755 $(MANDIR)/man5
install ${SELINUXOPT} -d -m 755 $(MANDIR)/man8
install ${SELINUXOPT} -m 644 $(filter %.5,$(MANPAGES)) -t $(MANDIR)/man5
install ${SELINUXOPT} -m 644 $(filter %.8,$(MANPAGES)) -t $(MANDIR)/man8
install.config: crio.conf
install ${SELINUXOPT} -D -m 644 crio.conf $(ETCDIR_CRIO)/crio.conf
install ${SELINUXOPT} -D -m 644 seccomp.json $(ETCDIR_CRIO)/seccomp.json
install ${SELINUXOPT} -D -m 644 crio-umount.conf $(OCIUMOUNTINSTALLDIR)/crio-umount.conf
install ${SELINUXOPT} -D -m 644 crictl.yaml $(CRICTL_CONFIG_DIR)
install.config:
install -D -m 644 ocid.conf $(ETCDIR_OCID)/ocid.conf
install -D -m 644 seccomp.json $(ETCDIR_OCID)/seccomp.json
install.completions:
install ${SELINUXOPT} -d -m 755 ${BASHINSTALLDIR}
install -d -m 755 ${BASHINSTALLDIR}
install -m 644 -D completions/bash/kpod ${BASHINSTALLDIR}
install.systemd:
install ${SELINUXOPT} -D -m 644 contrib/systemd/crio.service $(PREFIX)/lib/systemd/system/crio.service
ln -sf crio.service $(PREFIX)/lib/systemd/system/cri-o.service
install ${SELINUXOPT} -D -m 644 contrib/systemd/crio-shutdown.service $(PREFIX)/lib/systemd/system/crio-shutdown.service
install -D -m 644 contrib/systemd/ocid.service $(PREFIX)/lib/systemd/system/ocid.service
install -D -m 644 contrib/systemd/ocid-shutdown.service $(PREFIX)/lib/systemd/system/ocid-shutdown.service
uninstall:
rm -f $(BINDIR)/crio
rm -f $(LIBEXECDIR)/crio/conmon
rm -f $(LIBEXECDIR)/crio/pause
rm -f $(BINDIR)/ocid
rm -f $(BINDIR)/ocic
rm -f $(LIBEXECDIR)/ocid/conmon
rm -f $(LIBEXECDIR)/ocid/pause
for i in $(filter %.1,$(MANPAGES)); do \
rm -f $(MANDIR)/man8/$$(basename $${i}); \
done
@ -178,9 +170,9 @@ uninstall:
# When this is running in travis, it will only check the travis commit range
.gitvalidation: .gopathok
ifeq ($(TRAVIS),true)
GIT_CHECK_EXCLUDE="./vendor" $(GOPATH)/bin/git-validation -q -run DCO,short-subject,dangling-whitespace
$(GOPATH)/bin/git-validation -q -run DCO,short-subject
else
GIT_CHECK_EXCLUDE="./vendor" $(GOPATH)/bin/git-validation -v -run DCO,short-subject,dangling-whitespace -range $(EPOCH_TEST_COMMIT)..HEAD
$(GOPATH)/bin/git-validation -v -run DCO,short-subject -range $(EPOCH_TEST_COMMIT)..HEAD
endif
.PHONY: install.tools
@ -195,9 +187,6 @@ install.tools: .install.gitvalidation .install.gometalinter .install.md2man
.install.gometalinter: .gopathok
if [ ! -x "$(GOPATH)/bin/gometalinter" ]; then \
go get -u github.com/alecthomas/gometalinter; \
cd $(GOPATH)/src/github.com/alecthomas/gometalinter; \
git checkout 23261fa046586808612c61da7a81d75a658e0814; \
go install github.com/alecthomas/gometalinter; \
$(GOPATH)/bin/gometalinter --install; \
fi
@ -206,18 +195,13 @@ install.tools: .install.gitvalidation .install.gometalinter .install.md2man
go get -u github.com/cpuguy83/go-md2man; \
fi
.install.ostree: .gopathok
if ! pkg-config ostree-1 2> /dev/null ; then \
git clone https://github.com/ostreedev/ostree $(GOPATH)/src/github.com/ostreedev/ostree ; \
cd $(GOPATH)/src/github.com/ostreedev/ostree ; \
./autogen.sh --prefix=/usr/local; \
make all install; \
fi
.PHONY: \
bin2img \
binaries \
checkseccomp \
clean \
conmon \
copyimg \
default \
docs \
gofmt \

4
OWNERS
View file

@ -1,9 +1,7 @@
approvers:
assignees:
- mrunalp
- runcom
- cyphar
- mikebrow
- feiskyer
- sameo
- rhatdan
- nalind

123
README.md
View file

@ -1,32 +1,18 @@
![CRI-O logo](https://cdn.rawgit.com/kubernetes-incubator/cri-o/master/logo/crio-logo.svg)
# CRI-O - OCI-based implementation of Kubernetes Container Runtime Interface
![cri-o logo](https://cdn.rawgit.com/kubernetes-incubator/cri-o/master/logo/crio-logo.svg)
# cri-o - OCI-based implementation of Kubernetes Container Runtime Interface
[![Build Status](https://img.shields.io/travis/kubernetes-incubator/cri-o.svg?maxAge=2592000&style=flat-square)](https://travis-ci.org/kubernetes-incubator/cri-o)
[![Go Report Card](https://goreportcard.com/badge/github.com/kubernetes-incubator/cri-o?style=flat-square)](https://goreportcard.com/report/github.com/kubernetes-incubator/cri-o)
### Status: Stable
## Compatibility matrix: CRI-O <-> Kubernetes clusters
| Version - Branch | Kubernetes branch/version | Maintenance status |
|----------------------------|-------------------------------|--------------------|
| CRI-O 1.0.x - release-1.0 | Kubernetes 1.7 branch, v1.7.x | = |
| CRI-O 1.8.x - release-1.8 | Kubernetes 1.8 branch, v1.8.x | = |
| CRI-O 1.9.x - release-1.9 | Kubernetes 1.9 branch, v1.9.x | = |
| CRI-O HEAD - master | Kubernetes master branch | ✓ |
Key:
* `✓` Changes in main Kubernetes repo about CRI are actively implemented in CRI-O
* `=` Maintenance is manual, only bugs will be patched.
### Status: pre-alpha
## What is the scope of this project?
CRI-O is meant to provide an integration path between OCI conformant runtimes and the kubelet.
Specifically, it implements the Kubelet [Container Runtime Interface (CRI)](https://github.com/kubernetes/community/blob/master/contributors/devel/container-runtime-interface.md) using OCI conformant runtimes.
The scope of CRI-O is tied to the scope of the CRI.
cri-o is meant to provide an integration path between OCI conformant runtimes and the kubelet.
Specifically, it implements the Kubelet Container Runtime Interface (CRI) using OCI conformant runtimes.
The scope of cri-o is tied to the scope of the CRI.
At a high level, we expect the scope of CRI-O to be restricted to the following functionalities:
At a high level, we expect the scope of cri-o to be restricted to the following functionalities:
* Support multiple image formats including the existing Docker image format
* Support for multiple means to download images including trust & image verification
@ -38,7 +24,7 @@ At a high level, we expect the scope of CRI-O to be restricted to the following
## What is not in scope for this project?
* Building, signing and pushing images to various image storages
* A CLI utility for interacting with CRI-O. Any CLIs built as part of this project are only meant for testing this project and there will be no guarantees on the backward compatibility with it.
* A CLI utility for interacting with cri-o. Any CLIs built as part of this project are only meant for testing this project and there will be no guarantees on the backwards compatibility with it.
This is an implementation of the Kubernetes Container Runtime Interface (CRI) that will allow Kubernetes to directly launch and manage Open Container Initiative (OCI) containers.
@ -50,44 +36,13 @@ The plan is to use OCI projects and best of breed libraries for different aspect
It is currently in active development in the Kubernetes community through the [design proposal](https://github.com/kubernetes/kubernetes/pull/26788). Questions and issues should be raised in the Kubernetes [sig-node Slack channel](https://kubernetes.slack.com/archives/sig-node).
## Commands
| Command | Description | Demo|
| ---------------------------------------------------- | --------------------------------------------------------------------------|-----|
| [crio(8)](/docs/crio.8.md) | OCI Kubernetes Container Runtime daemon ||
Note that kpod and its container management and debugging commands have moved to a separate repository, located [here](https://github.com/projectatomic/libpod).
## Configuration
| File | Description |
| ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
| [crio.conf(5)](/docs/crio.conf.5.md) | CRI-O Configuation file |
## OCI Hooks Support
[CRI-O configures OCI Hooks to run when launching a container](./hooks.md)
## CRI-O Usage Transfer
[Useful information for ops and dev transfer as it relates to infrastructure that utilizes CRI-O](/transfer.md)
## Communication
For async communication and long running discussions please use issues and pull requests on the github repo. This will be the best place to discuss design and implementation.
For sync communication we have an IRC channel #CRI-O, on chat.freenode.net, that everyone is welcome to join and chat about development.
## Getting started
### Runtime dependencies
### Prerequisites
- runc, Clear Containers runtime, or any other OCI compatible runtime
- socat
- iproute
- iptables
`runc` version 1.0.0.rc1 or greater is expected to be installed on the system. It is picked up as the default runtime by ocid.
Latest version of `runc` is expected to be installed on the system. It is picked up as the default runtime by CRI-O.
### Build and Run Dependencies
### Build Dependencies
**Required**
@ -97,30 +52,23 @@ Fedora, CentOS, RHEL, and related distributions:
yum install -y \
btrfs-progs-devel \
device-mapper-devel \
git \
glib2-devel \
glibc-devel \
glibc-static \
go \
golang-github-cpuguy83-go-md2man \
gpgme-devel \
libassuan-devel \
libgpg-error-devel \
libseccomp-devel \
libselinux-devel \
ostree-devel \
pkgconfig \
runc \
skopeo-containers
runc
```
Debian, Ubuntu, and related distributions:
```bash
apt-get install -y \
apt install -y \
btrfs-tools \
git \
golang-go \
libassuan-dev \
libdevmapper-dev \
libglib2.0-dev \
@ -130,18 +78,10 @@ apt-get install -y \
libseccomp-dev \
libselinux1-dev \
pkg-config \
go-md2man \
runc \
skopeo-containers
runc
```
Debian, Ubuntu, and related distributions will also need a copy of the development libraries for `ostree`, either in the form of the `libostree-dev` package from the [flatpak](https://launchpad.net/~alexlarsson/+archive/ubuntu/flatpak) PPA, or built [from source](https://github.com/ostreedev/ostree) (more on that [here](https://ostree.readthedocs.io/en/latest/#building)).
If using an older release or a long-term support release, be careful to double-check that the version of `runc` is new enough (running `runc --version` should produce `spec: 1.0.0`), or else build your own.
**NOTE**
Be careful to double-check that the version of golang is new enough, version 1.8.x or higher is required. If needed, golang kits are avaliable at https://golang.org/dl/
If using an older release or a long-term support release, be careful to double-check that the version of `runc` is new enough, or else build your own.
**Optional**
@ -152,13 +92,13 @@ Fedora, CentOS, RHEL, and related distributions:
Debian, Ubuntu, and related distributions:
```bash
apt-get install -y \
apt install -y \
libapparmor-dev
```
### Get Source Code
As with other Go projects, CRI-O must be cloned into a directory structure like:
As with other Go projects, cri-o must be cloned into a directory structure like:
```
GOPATH
@ -192,7 +132,7 @@ make
sudo make install
```
Otherwise, if you do not want to build `CRI-O` with seccomp support you can add `BUILDTAGS=""` when running make.
Otherwise, if you do not want to build `cri-o` with seccomp support you can add `BUILDTAGS=""` when running make.
```bash
make BUILDTAGS=""
@ -201,7 +141,7 @@ sudo make install
#### Build Tags
`CRI-O` supports optional build tags for compiling support of various features.
`cri-o` supports optional build tags for compiling support of various features.
To add build tags to the make option the `BUILDTAGS` variable must be set.
```bash
@ -227,26 +167,19 @@ your system.
### Running with kubernetes
You can run a local version of kubernetes with CRI-O using `local-up-cluster.sh`:
You can run a local version of kubernetes with cri-o using `local-up-cluster.sh`:
1. Clone the [kubernetes repository](https://github.com/kubernetes/kubernetes)
1. Start the CRI-O daemon (`crio`)
1. From the kubernetes project directory, run:
```shell
CGROUP_DRIVER=systemd \
CONTAINER_RUNTIME=remote \
CONTAINER_RUNTIME_ENDPOINT='/var/run/crio/crio.sock --runtime-request-timeout=15m' \
./hack/local-up-cluster.sh
```
1. Start the cri-o daemon (`ocid`)
1. From the kubernetes project directory, run: `CONTAINER_RUNTIME=remote CONTAINER_RUNTIME_ENDPOINT='/var/run/ocid.sock --runtime-request-timeout=15m' ./hack/local-up-cluster.sh`
To run a full cluster, see [the instructions](kubernetes.md).
### Current Roadmap
1. Basic pod/container lifecycle, basic image pull (done)
1. Support for tty handling and state management (done)
1. Basic integration with kubelet once client side changes are ready (done)
1. Support for log management, networking integration using CNI, pluggable image/storage management (done)
1. Support for exec/attach (done)
1. Target fully automated kubernetes testing without failures [e2e status](https://github.com/kubernetes-incubator/cri-o/issues/533)
1. Track upstream k8s releases
1. Basic pod/container lifecycle, basic image pull (already works)
1. Support for tty handling and state management
1. Basic integration with kubelet once client side changes are ready
1. Support for log management, networking integration using CNI, pluggable image/storage management
1. Support for exec/attach
1. Target fully automated kubernetes testing without failures

View file

@ -1,103 +0,0 @@
package client
import (
"encoding/json"
"fmt"
"net"
"net/http"
"syscall"
"time"
"github.com/kubernetes-incubator/cri-o/types"
)
const (
maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path)
)
// CrioClient is an interface to get information from crio daemon endpoint.
type CrioClient interface {
DaemonInfo() (types.CrioInfo, error)
ContainerInfo(string) (*types.ContainerInfo, error)
}
type crioClientImpl struct {
client *http.Client
crioSocketPath string
}
func configureUnixTransport(tr *http.Transport, proto, addr string) error {
if len(addr) > maxUnixSocketPathSize {
return fmt.Errorf("Unix socket path %q is too long", addr)
}
// No need for compression in local communications.
tr.DisableCompression = true
tr.Dial = func(_, _ string) (net.Conn, error) {
return net.DialTimeout(proto, addr, 32*time.Second)
}
return nil
}
// New returns a crio client
func New(crioSocketPath string) (CrioClient, error) {
tr := new(http.Transport)
configureUnixTransport(tr, "unix", crioSocketPath)
c := &http.Client{
Transport: tr,
}
return &crioClientImpl{
client: c,
crioSocketPath: crioSocketPath,
}, nil
}
func (c *crioClientImpl) getRequest(path string) (*http.Request, error) {
req, err := http.NewRequest("GET", path, nil)
if err != nil {
return nil, err
}
// For local communications over a unix socket, it doesn't matter what
// the host is. We just need a valid and meaningful host name.
req.Host = "crio"
req.URL.Host = c.crioSocketPath
req.URL.Scheme = "http"
return req, nil
}
// DaemonInfo return cri-o daemon info from the cri-o
// info endpoint.
func (c *crioClientImpl) DaemonInfo() (types.CrioInfo, error) {
info := types.CrioInfo{}
req, err := c.getRequest("/info")
if err != nil {
return info, err
}
resp, err := c.client.Do(req)
if err != nil {
return info, err
}
defer resp.Body.Close()
if err := json.NewDecoder(resp.Body).Decode(&info); err != nil {
return info, err
}
return info, nil
}
// ContainerInfo returns container info by querying
// the cri-o container endpoint.
func (c *crioClientImpl) ContainerInfo(id string) (*types.ContainerInfo, error) {
req, err := c.getRequest("/containers/" + id)
if err != nil {
return nil, err
}
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
cInfo := types.ContainerInfo{}
if err := json.NewDecoder(resp.Body).Decode(&cInfo); err != nil {
return nil, err
}
return &cInfo, nil
}

View file

@ -1,194 +0,0 @@
package main
import (
"os"
"text/template"
"github.com/kubernetes-incubator/cri-o/server"
"github.com/urfave/cli"
)
var commentedConfigTemplate = template.Must(template.New("config").Parse(`
# The "crio" table contains all of the server options.
[crio]
# root is a path to the "root directory". CRIO stores all of its data,
# including container images, in this directory.
root = "{{ .Root }}"
# run is a path to the "run directory". CRIO stores all of its state
# in this directory.
runroot = "{{ .RunRoot }}"
# storage_driver select which storage driver is used to manage storage
# of images and containers.
storage_driver = "{{ .Storage }}"
# storage_option is used to pass an option to the storage driver.
storage_option = [
{{ range $opt := .StorageOptions }}{{ printf "\t%q,\n" $opt }}{{ end }}]
# The "crio.api" table contains settings for the kubelet/gRPC interface.
[crio.api]
# listen is the path to the AF_LOCAL socket on which crio will listen.
listen = "{{ .Listen }}"
# stream_address is the IP address on which the stream server will listen
stream_address = "{{ .StreamAddress }}"
# stream_port is the port on which the stream server will listen
stream_port = "{{ .StreamPort }}"
# file_locking is whether file-based locking will be used instead of
# in-memory locking
file_locking = {{ .FileLocking }}
# The "crio.runtime" table contains settings pertaining to the OCI
# runtime used and options for how to set up and manage the OCI runtime.
[crio.runtime]
# runtime is the OCI compatible runtime used for trusted container workloads.
# This is a mandatory setting as this runtime will be the default one
# and will also be used for untrusted container workloads if
# runtime_untrusted_workload is not set.
runtime = "{{ .Runtime }}"
# runtime_untrusted_workload is the OCI compatible runtime used for untrusted
# container workloads. This is an optional setting, except if
# default_container_trust is set to "untrusted".
runtime_untrusted_workload = "{{ .RuntimeUntrustedWorkload }}"
# default_workload_trust is the default level of trust crio puts in container
# workloads. It can either be "trusted" or "untrusted", and the default
# is "trusted".
# Containers can be run through different container runtimes, depending on
# the trust hints we receive from kubelet:
# - If kubelet tags a container workload as untrusted, crio will try first to
# run it through the untrusted container workload runtime. If it is not set,
# crio will use the trusted runtime.
# - If kubelet does not provide any information about the container workload trust
# level, the selected runtime will depend on the default_container_trust setting.
# If it is set to "untrusted", then all containers except for the host privileged
# ones, will be run by the runtime_untrusted_workload runtime. Host privileged
# containers are by definition trusted and will always use the trusted container
# runtime. If default_container_trust is set to "trusted", crio will use the trusted
# container runtime for all containers.
default_workload_trust = "{{ .DefaultWorkloadTrust }}"
# no_pivot instructs the runtime to not use pivot_root, but instead use MS_MOVE
no_pivot = {{ .NoPivot }}
# conmon is the path to conmon binary, used for managing the runtime.
conmon = "{{ .Conmon }}"
# conmon_env is the environment variable list for conmon process,
# used for passing necessary environment variable to conmon or runtime.
conmon_env = [
{{ range $env := .ConmonEnv }}{{ printf "\t%q,\n" $env }}{{ end }}]
# selinux indicates whether or not SELinux will be used for pod
# separation on the host. If you enable this flag, SELinux must be running
# on the host.
selinux = {{ .SELinux }}
# seccomp_profile is the seccomp json profile path which is used as the
# default for the runtime.
seccomp_profile = "{{ .SeccompProfile }}"
# apparmor_profile is the apparmor profile name which is used as the
# default for the runtime.
apparmor_profile = "{{ .ApparmorProfile }}"
# cgroup_manager is the cgroup management implementation to be used
# for the runtime.
cgroup_manager = "{{ .CgroupManager }}"
# hooks_dir_path is the oci hooks directory for automatically executed hooks
hooks_dir_path = "{{ .HooksDirPath }}"
# default_mounts is the mounts list to be mounted for the container when created
default_mounts = [
{{ range $mount := .DefaultMounts }}{{ printf "\t%q, \n" $mount }}{{ end }}]
# pids_limit is the number of processes allowed in a container
pids_limit = {{ .PidsLimit }}
# enable using a shared PID namespace for containers in a pod
enable_shared_pid_namespace = {{ .EnableSharedPIDNamespace }}
# log_size_max is the max limit for the container log size in bytes.
# Negative values indicate that no limit is imposed.
log_size_max = {{ .LogSizeMax }}
# The "crio.image" table contains settings pertaining to the
# management of OCI images.
[crio.image]
# default_transport is the prefix we try prepending to an image name if the
# image name as we receive it can't be parsed as a valid source reference
default_transport = "{{ .DefaultTransport }}"
# pause_image is the image which we use to instantiate infra containers.
pause_image = "{{ .PauseImage }}"
# pause_command is the command to run in a pause_image to have a container just
# sit there. If the image contains the necessary information, this value need
# not be specified.
pause_command = "{{ .PauseCommand }}"
# signature_policy is the name of the file which decides what sort of policy we
# use when deciding whether or not to trust an image that we've pulled.
# Outside of testing situations, it is strongly advised that this be left
# unspecified so that the default system-wide policy will be used.
signature_policy = "{{ .SignaturePolicyPath }}"
# image_volumes controls how image volumes are handled.
# The valid values are mkdir and ignore.
image_volumes = "{{ .ImageVolumes }}"
# insecure_registries is used to skip TLS verification when pulling images.
insecure_registries = [
{{ range $opt := .InsecureRegistries }}{{ printf "\t%q,\n" $opt }}{{ end }}]
# registries is used to specify a comma separated list of registries to be used
# when pulling an unqualified image (e.g. fedora:rawhide).
registries = [
{{ range $opt := .Registries }}{{ printf "\t%q,\n" $opt }}{{ end }}]
# The "crio.network" table contains settings pertaining to the
# management of CNI plugins.
[crio.network]
# network_dir is is where CNI network configuration
# files are stored.
network_dir = "{{ .NetworkDir }}"
# plugin_dir is is where CNI plugin binaries are stored.
plugin_dir = "{{ .PluginDir }}"
`))
// TODO: Currently ImageDir isn't really used, so we haven't added it to this
// template. Add it once the storage code has been merged.
var configCommand = cli.Command{
Name: "config",
Usage: "generate crio configuration files",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "default",
Usage: "output the default configuration",
},
},
Action: func(c *cli.Context) error {
// At this point, app.Before has already parsed the user's chosen
// config file. So no need to handle that here.
config := c.App.Metadata["config"].(*server.Config)
if c.Bool("default") {
config = server.DefaultConfig()
}
// Output the commented config.
return commentedConfigTemplate.ExecuteTemplate(os.Stdout, "config", config)
},
}

View file

@ -1,20 +0,0 @@
// +build linux
package main
import (
systemdDaemon "github.com/coreos/go-systemd/daemon"
"github.com/sirupsen/logrus"
)
func sdNotify() {
if _, err := systemdDaemon.SdNotify(true, "READY=1"); err != nil {
logrus.Warnf("Failed to sd_notify systemd: %v", err)
}
}
// notifySystem sends a message to the host when the server is ready to be used
func notifySystem() {
// Tell the init daemon we are accepting requests
go sdNotify()
}

View file

@ -1,540 +0,0 @@
package main
import (
"context"
"fmt"
"net"
"net/http"
_ "net/http/pprof"
"os"
"os/signal"
"path/filepath"
"sort"
"strings"
"time"
"github.com/containers/storage/pkg/reexec"
"github.com/kubernetes-incubator/cri-o/lib"
"github.com/kubernetes-incubator/cri-o/server"
"github.com/kubernetes-incubator/cri-o/version"
"github.com/opencontainers/selinux/go-selinux"
"github.com/sirupsen/logrus"
"github.com/soheilhy/cmux"
"github.com/urfave/cli"
"golang.org/x/sys/unix"
"google.golang.org/grpc"
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// gitCommit is the commit that the binary is being built from.
// It will be populated by the Makefile.
var gitCommit = ""
func validateConfig(config *server.Config) error {
switch config.ImageVolumes {
case lib.ImageVolumesMkdir:
case lib.ImageVolumesIgnore:
case lib.ImageVolumesBind:
default:
return fmt.Errorf("Unrecognized image volume type specified")
}
// This needs to match the read buffer size in conmon
if config.LogSizeMax >= 0 && config.LogSizeMax < 8192 {
return fmt.Errorf("log size max should be negative or >= 8192")
}
return nil
}
func mergeConfig(config *server.Config, ctx *cli.Context) error {
// Don't parse the config if the user explicitly set it to "".
if path := ctx.GlobalString("config"); path != "" {
if err := config.UpdateFromFile(path); err != nil {
if ctx.GlobalIsSet("config") || !os.IsNotExist(err) {
return err
}
// We don't error out if --config wasn't explicitly set and the
// default doesn't exist. But we will log a warning about it, so
// the user doesn't miss it.
logrus.Warnf("default configuration file does not exist: %s", server.CrioConfigPath)
}
}
// Override options set with the CLI.
if ctx.GlobalIsSet("conmon") {
config.Conmon = ctx.GlobalString("conmon")
}
if ctx.GlobalIsSet("pause-command") {
config.PauseCommand = ctx.GlobalString("pause-command")
}
if ctx.GlobalIsSet("pause-image") {
config.PauseImage = ctx.GlobalString("pause-image")
}
if ctx.GlobalIsSet("signature-policy") {
config.SignaturePolicyPath = ctx.GlobalString("signature-policy")
}
if ctx.GlobalIsSet("root") {
config.Root = ctx.GlobalString("root")
}
if ctx.GlobalIsSet("runroot") {
config.RunRoot = ctx.GlobalString("runroot")
}
if ctx.GlobalIsSet("storage-driver") {
config.Storage = ctx.GlobalString("storage-driver")
}
if ctx.GlobalIsSet("storage-opt") {
config.StorageOptions = ctx.GlobalStringSlice("storage-opt")
}
if ctx.GlobalIsSet("file-locking") {
config.FileLocking = ctx.GlobalBool("file-locking")
}
if ctx.GlobalIsSet("insecure-registry") {
config.InsecureRegistries = ctx.GlobalStringSlice("insecure-registry")
}
if ctx.GlobalIsSet("registry") {
config.Registries = ctx.GlobalStringSlice("registry")
}
if ctx.GlobalIsSet("default-transport") {
config.DefaultTransport = ctx.GlobalString("default-transport")
}
if ctx.GlobalIsSet("listen") {
config.Listen = ctx.GlobalString("listen")
}
if ctx.GlobalIsSet("stream-address") {
config.StreamAddress = ctx.GlobalString("stream-address")
}
if ctx.GlobalIsSet("stream-port") {
config.StreamPort = ctx.GlobalString("stream-port")
}
if ctx.GlobalIsSet("runtime") {
config.Runtime = ctx.GlobalString("runtime")
}
if ctx.GlobalIsSet("selinux") {
config.SELinux = ctx.GlobalBool("selinux")
}
if ctx.GlobalIsSet("seccomp-profile") {
config.SeccompProfile = ctx.GlobalString("seccomp-profile")
}
if ctx.GlobalIsSet("apparmor-profile") {
config.ApparmorProfile = ctx.GlobalString("apparmor-profile")
}
if ctx.GlobalIsSet("cgroup-manager") {
config.CgroupManager = ctx.GlobalString("cgroup-manager")
}
if ctx.GlobalIsSet("hooks-dir-path") {
config.HooksDirPath = ctx.GlobalString("hooks-dir-path")
}
if ctx.GlobalIsSet("default-mounts") {
config.DefaultMounts = ctx.GlobalStringSlice("default-mounts")
}
if ctx.GlobalIsSet("pids-limit") {
config.PidsLimit = ctx.GlobalInt64("pids-limit")
}
if ctx.GlobalIsSet("enable-shared-pid-namespace") {
config.EnableSharedPIDNamespace = ctx.GlobalBool("enable-shared-pid-namespace")
}
if ctx.GlobalIsSet("log-size-max") {
config.LogSizeMax = ctx.GlobalInt64("log-size-max")
}
if ctx.GlobalIsSet("cni-config-dir") {
config.NetworkDir = ctx.GlobalString("cni-config-dir")
}
if ctx.GlobalIsSet("cni-plugin-dir") {
config.PluginDir = ctx.GlobalString("cni-plugin-dir")
}
if ctx.GlobalIsSet("image-volumes") {
config.ImageVolumes = lib.ImageVolumesType(ctx.GlobalString("image-volumes"))
}
return nil
}
func catchShutdown(gserver *grpc.Server, sserver *server.Server, hserver *http.Server, signalled *bool) {
sig := make(chan os.Signal, 10)
signal.Notify(sig, unix.SIGINT, unix.SIGTERM)
go func() {
for s := range sig {
switch s {
case unix.SIGINT:
logrus.Debugf("Caught SIGINT")
case unix.SIGTERM:
logrus.Debugf("Caught SIGTERM")
default:
continue
}
*signalled = true
gserver.GracefulStop()
hserver.Shutdown(context.Background())
sserver.StopStreamServer()
sserver.StopExitMonitor()
if err := sserver.Shutdown(); err != nil {
logrus.Warnf("error shutting down main service %v", err)
}
return
}
}()
}
func main() {
if reexec.Init() {
return
}
app := cli.NewApp()
var v []string
v = append(v, version.Version)
if gitCommit != "" {
v = append(v, fmt.Sprintf("commit: %s", gitCommit))
}
app.Name = "crio"
app.Usage = "crio server"
app.Version = strings.Join(v, "\n")
app.Metadata = map[string]interface{}{
"config": server.DefaultConfig(),
}
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "config",
Value: server.CrioConfigPath,
Usage: "path to configuration file",
},
cli.StringFlag{
Name: "conmon",
Usage: "path to the conmon executable",
},
cli.StringFlag{
Name: "listen",
Usage: "path to crio socket",
},
cli.StringFlag{
Name: "stream-address",
Usage: "bind address for streaming socket",
},
cli.StringFlag{
Name: "stream-port",
Usage: "bind port for streaming socket (default: \"10010\")",
},
cli.StringFlag{
Name: "log",
Value: "",
Usage: "set the log file path where internal debug information is written",
},
cli.StringFlag{
Name: "log-format",
Value: "text",
Usage: "set the format used by logs ('text' (default), or 'json')",
},
cli.StringFlag{
Name: "log-level",
Usage: "log messages above specified level: debug, info (default), warn, error, fatal or panic",
},
cli.StringFlag{
Name: "pause-command",
Usage: "name of the pause command in the pause image",
},
cli.StringFlag{
Name: "pause-image",
Usage: "name of the pause image",
},
cli.StringFlag{
Name: "signature-policy",
Usage: "path to signature policy file",
},
cli.StringFlag{
Name: "root",
Usage: "crio root dir",
},
cli.StringFlag{
Name: "runroot",
Usage: "crio state dir",
},
cli.StringFlag{
Name: "storage-driver",
Usage: "storage driver",
},
cli.StringSliceFlag{
Name: "storage-opt",
Usage: "storage driver option",
},
cli.BoolFlag{
Name: "file-locking",
Usage: "enable or disable file-based locking",
},
cli.StringSliceFlag{
Name: "insecure-registry",
Usage: "whether to disable TLS verification for the given registry",
},
cli.StringSliceFlag{
Name: "registry",
Usage: "registry to be prepended when pulling unqualified images, can be specified multiple times",
},
cli.StringFlag{
Name: "default-transport",
Usage: "default transport",
},
cli.StringFlag{
Name: "runtime",
Usage: "OCI runtime path",
},
cli.StringFlag{
Name: "seccomp-profile",
Usage: "default seccomp profile path",
},
cli.StringFlag{
Name: "apparmor-profile",
Usage: "default apparmor profile name (default: \"crio-default\")",
},
cli.BoolFlag{
Name: "selinux",
Usage: "enable selinux support",
},
cli.StringFlag{
Name: "cgroup-manager",
Usage: "cgroup manager (cgroupfs or systemd)",
},
cli.Int64Flag{
Name: "pids-limit",
Value: lib.DefaultPidsLimit,
Usage: "maximum number of processes allowed in a container",
},
cli.BoolFlag{
Name: "enable-shared-pid-namespace",
Usage: "enable using a shared PID namespace for containers in a pod",
},
cli.Int64Flag{
Name: "log-size-max",
Value: lib.DefaultLogSizeMax,
Usage: "maximum log size in bytes for a container",
},
cli.StringFlag{
Name: "cni-config-dir",
Usage: "CNI configuration files directory",
},
cli.StringFlag{
Name: "cni-plugin-dir",
Usage: "CNI plugin binaries directory",
},
cli.StringFlag{
Name: "image-volumes",
Value: string(lib.ImageVolumesMkdir),
Usage: "image volume handling ('mkdir', 'bind', or 'ignore')",
},
cli.StringFlag{
Name: "hooks-dir-path",
Usage: "set the OCI hooks directory path",
Value: lib.DefaultHooksDirPath,
Hidden: true,
},
cli.StringSliceFlag{
Name: "default-mounts",
Usage: "add one or more default mount paths in the form host:container",
Hidden: true,
},
cli.BoolFlag{
Name: "profile",
Usage: "enable pprof remote profiler on localhost:6060",
},
cli.IntFlag{
Name: "profile-port",
Value: 6060,
Usage: "port for the pprof profiler",
},
cli.BoolFlag{
Name: "enable-metrics",
Usage: "enable metrics endpoint for the servier on localhost:9090",
},
cli.IntFlag{
Name: "metrics-port",
Value: 9090,
Usage: "port for the metrics endpoint",
},
}
sort.Sort(cli.FlagsByName(app.Flags))
sort.Sort(cli.FlagsByName(configCommand.Flags))
app.Commands = []cli.Command{
configCommand,
}
app.Before = func(c *cli.Context) error {
// Load the configuration file.
config := c.App.Metadata["config"].(*server.Config)
if err := mergeConfig(config, c); err != nil {
return err
}
if err := validateConfig(config); err != nil {
return err
}
cf := &logrus.TextFormatter{
TimestampFormat: "2006-01-02 15:04:05.000000000Z07:00",
FullTimestamp: true,
}
logrus.SetFormatter(cf)
if loglevel := c.GlobalString("log-level"); loglevel != "" {
level, err := logrus.ParseLevel(loglevel)
if err != nil {
return err
}
logrus.SetLevel(level)
}
if path := c.GlobalString("log"); path != "" {
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND|os.O_SYNC, 0666)
if err != nil {
return err
}
logrus.SetOutput(f)
}
switch c.GlobalString("log-format") {
case "text":
// retain logrus's default.
case "json":
logrus.SetFormatter(new(logrus.JSONFormatter))
default:
return fmt.Errorf("unknown log-format %q", c.GlobalString("log-format"))
}
return nil
}
app.Action = func(c *cli.Context) error {
if c.GlobalBool("profile") {
profilePort := c.GlobalInt("profile-port")
profileEndpoint := fmt.Sprintf("localhost:%v", profilePort)
go func() {
http.ListenAndServe(profileEndpoint, nil)
}()
}
args := c.Args()
if len(args) > 0 {
for _, command := range app.Commands {
if args[0] == command.Name {
break
}
}
return fmt.Errorf("command %q not supported", args[0])
}
config := c.App.Metadata["config"].(*server.Config)
if !config.SELinux {
selinux.SetDisabled()
}
if _, err := os.Stat(config.Runtime); os.IsNotExist(err) {
// path to runtime does not exist
return fmt.Errorf("invalid --runtime value %q", err)
}
if err := os.MkdirAll(filepath.Dir(config.Listen), 0755); err != nil {
return err
}
// Remove the socket if it already exists
if _, err := os.Stat(config.Listen); err == nil {
if err := os.Remove(config.Listen); err != nil {
logrus.Fatal(err)
}
}
lis, err := net.Listen("unix", config.Listen)
if err != nil {
logrus.Fatalf("failed to listen: %v", err)
}
s := grpc.NewServer()
service, err := server.New(config)
if err != nil {
logrus.Fatal(err)
}
if c.GlobalBool("enable-metrics") {
metricsPort := c.GlobalInt("metrics-port")
me, err := service.CreateMetricsEndpoint()
if err != nil {
logrus.Fatalf("Failed to create metrics endpoint: %v", err)
}
l, err := net.Listen("tcp", fmt.Sprintf(":%v", metricsPort))
if err != nil {
logrus.Fatalf("Failed to create listener for metrics: %v", err)
}
go func() {
if err := http.Serve(l, me); err != nil {
logrus.Fatalf("Failed to serve metrics endpoint: %v", err)
}
}()
}
runtime.RegisterRuntimeServiceServer(s, service)
runtime.RegisterImageServiceServer(s, service)
// after the daemon is done setting up we can notify systemd api
notifySystem()
go func() {
service.StartExitMonitor()
}()
m := cmux.New(lis)
grpcL := m.Match(cmux.HTTP2HeaderField("content-type", "application/grpc"))
httpL := m.Match(cmux.HTTP1Fast())
infoMux := service.GetInfoMux()
srv := &http.Server{
Handler: infoMux,
ReadTimeout: 5 * time.Second,
}
graceful := false
catchShutdown(s, service, srv, &graceful)
go s.Serve(grpcL)
go srv.Serve(httpL)
serverCloseCh := make(chan struct{})
go func() {
defer close(serverCloseCh)
if err := m.Serve(); err != nil {
if graceful && strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") {
err = nil
} else {
logrus.Errorf("Failed to serve grpc request: %v", err)
}
}
}()
streamServerCloseCh := service.StreamingServerCloseChan()
serverExitMonitorCh := service.ExitMonitorCloseChan()
select {
case <-streamServerCloseCh:
case <-serverExitMonitorCh:
case <-serverCloseCh:
}
service.Shutdown()
<-streamServerCloseCh
logrus.Debug("closed stream server")
<-serverExitMonitorCh
logrus.Debug("closed exit monitor")
<-serverCloseCh
logrus.Debug("closed main server")
return nil
}
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
}
}

16
cmd/kpod/README.md Normal file
View file

@ -0,0 +1,16 @@
# kpod - Simple debugging tool for pods and images
kpod is a simple client only tool to help with debugging issues when daemons such as CRI runtime and the kubelet are not responding or
failing. A shared API layer could be created to share code between the daemon and kpod. kpod does not require any daemon running. kpod
utilizes the same underlying components that ocid uses i.e. containers/image, container/storage, oci-runtime-tool/generate, runc or
any other OCI compatible runtime. kpod shares state with ocid and so has the capability to debug pods/images created by ocid.
## Use cases
1. List pods.
2. Launch simple pods (that require no daemon support).
3. Exec commands in a container in a pod.
4. Launch additional containers in a pod.
5. List images.
6. Remove images not in use.
7. Pull images.
8. Check image size.
9. Report pod disk resource usage.

16
cmd/kpod/launch.go Normal file
View file

@ -0,0 +1,16 @@
package main
import (
"fmt"
"github.com/urfave/cli"
)
// TODO implement
var launchCommand = cli.Command{
Name: "launch",
Usage: "launch a pod",
Action: func(context *cli.Context) error {
return fmt.Errorf("this functionality is not yet implemented")
},
}

23
cmd/kpod/main.go Normal file
View file

@ -0,0 +1,23 @@
package main
import (
"os"
"github.com/Sirupsen/logrus"
"github.com/urfave/cli"
)
func main() {
app := cli.NewApp()
app.Name = "kpod"
app.Usage = "manage pods and images"
app.Version = "0.0.1"
app.Commands = []cli.Command{
launchCommand,
}
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
}
}

605
cmd/ocic/container.go Normal file
View file

@ -0,0 +1,605 @@
package main
import (
"fmt"
"log"
"net/url"
"os"
"strings"
"time"
"github.com/urfave/cli"
"golang.org/x/net/context"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/client/unversioned/remotecommand"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
)
var containerCommand = cli.Command{
Name: "container",
Aliases: []string{"ctr"},
Subcommands: []cli.Command{
createContainerCommand,
startContainerCommand,
stopContainerCommand,
removeContainerCommand,
containerStatusCommand,
listContainersCommand,
execSyncCommand,
execCommand,
},
}
type createOptions struct {
// configPath is path to the config for container
configPath string
// name sets the container name
name string
// podID of the container
podID string
// labels for the container
labels map[string]string
}
var createContainerCommand = cli.Command{
Name: "create",
Usage: "create a container",
Flags: []cli.Flag{
cli.StringFlag{
Name: "pod",
Usage: "the id of the pod sandbox to which the container belongs",
},
cli.StringFlag{
Name: "config",
Value: "config.json",
Usage: "the path of a container config file",
},
cli.StringFlag{
Name: "name",
Value: "",
Usage: "the name of the container",
},
cli.StringSliceFlag{
Name: "label",
Usage: "add key=value labels to the container",
},
},
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewRuntimeServiceClient(conn)
if !context.IsSet("pod") {
return fmt.Errorf("Please specify the id of the pod sandbox to which the container belongs via the --pod option")
}
opts := createOptions{
configPath: context.String("config"),
name: context.String("name"),
podID: context.String("pod"),
labels: make(map[string]string),
}
for _, l := range context.StringSlice("label") {
pair := strings.Split(l, "=")
if len(pair) != 2 {
return fmt.Errorf("incorrectly specified label: %v", l)
}
opts.labels[pair[0]] = pair[1]
}
// Test RuntimeServiceClient.CreateContainer
err = CreateContainer(client, opts)
if err != nil {
return fmt.Errorf("Creating container failed: %v", err)
}
return nil
},
}
var startContainerCommand = cli.Command{
Name: "start",
Usage: "start a container",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "id of the container",
},
},
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewRuntimeServiceClient(conn)
err = StartContainer(client, context.String("id"))
if err != nil {
return fmt.Errorf("Starting the container failed: %v", err)
}
return nil
},
}
var stopContainerCommand = cli.Command{
Name: "stop",
Usage: "stop a container",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "id of the container",
},
},
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewRuntimeServiceClient(conn)
err = StopContainer(client, context.String("id"))
if err != nil {
return fmt.Errorf("Stopping the container failed: %v", err)
}
return nil
},
}
var removeContainerCommand = cli.Command{
Name: "remove",
Usage: "remove a container",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "id of the container",
},
},
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewRuntimeServiceClient(conn)
err = RemoveContainer(client, context.String("id"))
if err != nil {
return fmt.Errorf("Removing the container failed: %v", err)
}
return nil
},
}
var containerStatusCommand = cli.Command{
Name: "status",
Usage: "get the status of a container",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "id of the container",
},
},
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewRuntimeServiceClient(conn)
err = ContainerStatus(client, context.String("id"))
if err != nil {
return fmt.Errorf("Getting the status of the container failed: %v", err)
}
return nil
},
}
var execSyncCommand = cli.Command{
Name: "execsync",
Usage: "exec a command synchronously in a container",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "id of the container",
},
cli.Int64Flag{
Name: "timeout",
Value: 0,
Usage: "timeout for the command",
},
},
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewRuntimeServiceClient(conn)
err = ExecSync(client, context.String("id"), context.Args(), context.Int64("timeout"))
if err != nil {
return fmt.Errorf("execing command in container failed: %v", err)
}
return nil
},
}
var execCommand = cli.Command{
Name: "exec",
Usage: "prepare a streaming endpoint to execute a command in the container",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "id of the container",
},
cli.BoolFlag{
Name: "tty",
Usage: "whether to use tty",
},
cli.BoolFlag{
Name: "stdin",
Usage: "whether to stream to stdin",
},
cli.BoolFlag{
Name: "url",
Usage: "do not exec command, just prepare streaming endpoint",
},
},
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewRuntimeServiceClient(conn)
err = Exec(client, context.String("id"), context.Bool("tty"), context.Bool("stdin"), context.Bool("url"), context.Args())
if err != nil {
return fmt.Errorf("execing command in container failed: %v", err)
}
return nil
},
}
type listOptions struct {
// id of the container
id string
// podID of the container
podID string
// state of the container
state string
// quiet is for listing just container IDs
quiet bool
// labels are selectors for the container
labels map[string]string
}
var listContainersCommand = cli.Command{
Name: "list",
Usage: "list containers",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "quiet",
Usage: "list only container IDs",
},
cli.StringFlag{
Name: "id",
Value: "",
Usage: "filter by container id",
},
cli.StringFlag{
Name: "pod",
Value: "",
Usage: "filter by container pod id",
},
cli.StringFlag{
Name: "state",
Value: "",
Usage: "filter by container state",
},
cli.StringSliceFlag{
Name: "label",
Usage: "filter by key=value label",
},
},
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewRuntimeServiceClient(conn)
opts := listOptions{
id: context.String("id"),
podID: context.String("pod"),
state: context.String("state"),
quiet: context.Bool("quiet"),
labels: make(map[string]string),
}
for _, l := range context.StringSlice("label") {
pair := strings.Split(l, "=")
if len(pair) != 2 {
return fmt.Errorf("incorrectly specified label: %v", l)
}
opts.labels[pair[0]] = pair[1]
}
err = ListContainers(client, opts)
if err != nil {
return fmt.Errorf("listing containers failed: %v", err)
}
return nil
},
}
// CreateContainer sends a CreateContainerRequest to the server, and parses
// the returned CreateContainerResponse.
func CreateContainer(client pb.RuntimeServiceClient, opts createOptions) error {
config, err := loadContainerConfig(opts.configPath)
if err != nil {
return err
}
// Override the name by the one specified through CLI
if opts.name != "" {
config.Metadata.Name = opts.name
}
for k, v := range opts.labels {
config.Labels[k] = v
}
r, err := client.CreateContainer(context.Background(), &pb.CreateContainerRequest{
PodSandboxId: opts.podID,
Config: config,
// TODO(runcom): this is missing PodSandboxConfig!!!
// we should/could find a way to retrieve it from the fs and set it here
})
if err != nil {
return err
}
fmt.Println(r.ContainerId)
return nil
}
// StartContainer sends a StartContainerRequest to the server, and parses
// the returned StartContainerResponse.
func StartContainer(client pb.RuntimeServiceClient, ID string) error {
if ID == "" {
return fmt.Errorf("ID cannot be empty")
}
_, err := client.StartContainer(context.Background(), &pb.StartContainerRequest{
ContainerId: ID,
})
if err != nil {
return err
}
fmt.Println(ID)
return nil
}
// StopContainer sends a StopContainerRequest to the server, and parses
// the returned StopContainerResponse.
func StopContainer(client pb.RuntimeServiceClient, ID string) error {
if ID == "" {
return fmt.Errorf("ID cannot be empty")
}
_, err := client.StopContainer(context.Background(), &pb.StopContainerRequest{
ContainerId: ID,
})
if err != nil {
return err
}
fmt.Println(ID)
return nil
}
// RemoveContainer sends a RemoveContainerRequest to the server, and parses
// the returned RemoveContainerResponse.
func RemoveContainer(client pb.RuntimeServiceClient, ID string) error {
if ID == "" {
return fmt.Errorf("ID cannot be empty")
}
_, err := client.RemoveContainer(context.Background(), &pb.RemoveContainerRequest{
ContainerId: ID,
})
if err != nil {
return err
}
fmt.Println(ID)
return nil
}
// ContainerStatus sends a ContainerStatusRequest to the server, and parses
// the returned ContainerStatusResponse.
func ContainerStatus(client pb.RuntimeServiceClient, ID string) error {
if ID == "" {
return fmt.Errorf("ID cannot be empty")
}
r, err := client.ContainerStatus(context.Background(), &pb.ContainerStatusRequest{
ContainerId: ID})
if err != nil {
return err
}
fmt.Printf("ID: %s\n", r.Status.Id)
if r.Status.Metadata != nil {
if r.Status.Metadata.Name != "" {
fmt.Printf("Name: %s\n", r.Status.Metadata.Name)
}
fmt.Printf("Attempt: %v\n", r.Status.Metadata.Attempt)
}
// TODO(mzylowski): print it prettier
fmt.Printf("Status: %s\n", r.Status.State)
ctm := time.Unix(0, r.Status.CreatedAt)
fmt.Printf("Created: %v\n", ctm)
stm := time.Unix(0, r.Status.StartedAt)
fmt.Printf("Started: %v\n", stm)
ftm := time.Unix(0, r.Status.FinishedAt)
fmt.Printf("Finished: %v\n", ftm)
fmt.Printf("Exit Code: %v\n", r.Status.ExitCode)
return nil
}
// ExecSync sends an ExecSyncRequest to the server, and parses
// the returned ExecSyncResponse.
func ExecSync(client pb.RuntimeServiceClient, ID string, cmd []string, timeout int64) error {
if ID == "" {
return fmt.Errorf("ID cannot be empty")
}
r, err := client.ExecSync(context.Background(), &pb.ExecSyncRequest{
ContainerId: ID,
Cmd: cmd,
Timeout: timeout,
})
if err != nil {
return err
}
fmt.Println("Stdout:")
fmt.Println(string(r.Stdout))
fmt.Println("Stderr:")
fmt.Println(string(r.Stderr))
fmt.Printf("Exit code: %v\n", r.ExitCode)
return nil
}
// Exec sends an ExecRequest to the server, and parses
// the returned ExecResponse.
func Exec(client pb.RuntimeServiceClient, ID string, tty bool, stdin bool, urlOnly bool, cmd []string) error {
if ID == "" {
return fmt.Errorf("ID cannot be empty")
}
r, err := client.Exec(context.Background(), &pb.ExecRequest{
ContainerId: ID,
Cmd: cmd,
Tty: tty,
Stdin: stdin,
})
if err != nil {
return err
}
if urlOnly {
fmt.Println("URL:")
fmt.Println(r.Url)
return nil
}
execURL, err := url.Parse(r.Url)
if err != nil {
return err
}
streamExec, err := remotecommand.NewExecutor(&restclient.Config{}, "GET", execURL)
if err != nil {
return err
}
options := remotecommand.StreamOptions{
SupportedProtocols: remotecommandserver.SupportedStreamingProtocols,
Stdout: os.Stdout,
Stderr: os.Stderr,
Tty: tty,
}
if stdin {
options.Stdin = os.Stdin
}
return streamExec.Stream(options)
}
// ListContainers sends a ListContainerRequest to the server, and parses
// the returned ListContainerResponse.
func ListContainers(client pb.RuntimeServiceClient, opts listOptions) error {
filter := &pb.ContainerFilter{}
if opts.id != "" {
filter.Id = opts.id
}
if opts.podID != "" {
filter.PodSandboxId = opts.podID
}
if opts.state != "" {
st := &pb.ContainerStateValue{}
st.State = pb.ContainerState_CONTAINER_UNKNOWN
switch opts.state {
case "created":
st.State = pb.ContainerState_CONTAINER_CREATED
filter.State = st
case "running":
st.State = pb.ContainerState_CONTAINER_RUNNING
filter.State = st
case "stopped":
st.State = pb.ContainerState_CONTAINER_EXITED
filter.State = st
default:
log.Fatalf("--state should be one of created, running or stopped")
}
}
if opts.labels != nil {
filter.LabelSelector = opts.labels
}
r, err := client.ListContainers(context.Background(), &pb.ListContainersRequest{
Filter: filter,
})
if err != nil {
return err
}
for _, c := range r.GetContainers() {
if opts.quiet {
fmt.Println(c.Id)
continue
}
fmt.Printf("ID: %s\n", c.Id)
fmt.Printf("Pod: %s\n", c.PodSandboxId)
if c.Metadata != nil {
if c.Metadata.Name != "" {
fmt.Printf("Name: %s\n", c.Metadata.Name)
}
fmt.Printf("Attempt: %v\n", c.Metadata.Attempt)
}
fmt.Printf("Status: %s\n", c.State)
if c.Image != nil {
fmt.Printf("Image: %s\n", c.Image.Image)
}
ctm := time.Unix(0, c.CreatedAt)
fmt.Printf("Created: %v\n", ctm)
if c.Labels != nil {
fmt.Println("Labels:")
for _, k := range getSortedKeys(c.Labels) {
fmt.Printf("\t%s -> %s\n", k, c.Labels[k])
}
}
if c.Annotations != nil {
fmt.Println("Annotations:")
for _, k := range getSortedKeys(c.Annotations) {
fmt.Printf("\t%s -> %s\n", k, c.Annotations[k])
}
}
fmt.Println()
}
return nil
}

173
cmd/ocic/image.go Normal file
View file

@ -0,0 +1,173 @@
package main
import (
"fmt"
"github.com/urfave/cli"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
var imageCommand = cli.Command{
Name: "image",
Subcommands: []cli.Command{
pullImageCommand,
listImageCommand,
imageStatusCommand,
removeImageCommand,
},
}
var pullImageCommand = cli.Command{
Name: "pull",
Usage: "pull an image",
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewImageServiceClient(conn)
_, err = PullImage(client, context.Args().Get(0))
if err != nil {
return fmt.Errorf("pulling image failed: %v", err)
}
return nil
},
}
var listImageCommand = cli.Command{
Name: "list",
Usage: "list images",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "quiet",
Usage: "list only image IDs",
},
},
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewImageServiceClient(conn)
r, err := ListImages(client, context.Args().Get(0))
if err != nil {
return fmt.Errorf("listing images failed: %v", err)
}
quiet := context.Bool("quiet")
for _, image := range r.Images {
if quiet {
fmt.Printf("%s\n", image.Id)
continue
}
fmt.Printf("ID: %s\n", image.Id)
for _, tag := range image.RepoTags {
fmt.Printf("Tag: %s\n", tag)
}
for _, digest := range image.RepoDigests {
fmt.Printf("Digest: %s\n", digest)
}
if image.Size_ != 0 {
fmt.Printf("Size: %d\n", image.Size_)
}
}
return nil
},
}
var imageStatusCommand = cli.Command{
Name: "status",
Usage: "return the status of an image",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Usage: "id of the image",
},
},
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewImageServiceClient(conn)
r, err := ImageStatus(client, context.String("id"))
if err != nil {
return fmt.Errorf("image status request failed: %v", err)
}
image := r.Image
if image == nil {
return fmt.Errorf("no such image present")
}
fmt.Printf("ID: %s\n", image.Id)
for _, tag := range image.RepoTags {
fmt.Printf("Tag: %s\n", tag)
}
for _, digest := range image.RepoDigests {
fmt.Printf("Digest: %s\n", digest)
}
fmt.Printf("Size: %d\n", image.Size_)
return nil
},
}
var removeImageCommand = cli.Command{
Name: "remove",
Usage: "remove an image",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "id of the image",
},
},
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewImageServiceClient(conn)
_, err = RemoveImage(client, context.String("id"))
if err != nil {
return fmt.Errorf("removing the image failed: %v", err)
}
return nil
},
}
// PullImage sends a PullImageRequest to the server, and parses
// the returned PullImageResponse.
func PullImage(client pb.ImageServiceClient, image string) (*pb.PullImageResponse, error) {
return client.PullImage(context.Background(), &pb.PullImageRequest{Image: &pb.ImageSpec{Image: image}})
}
// ListImages sends a ListImagesRequest to the server, and parses
// the returned ListImagesResponse.
func ListImages(client pb.ImageServiceClient, image string) (*pb.ListImagesResponse, error) {
return client.ListImages(context.Background(), &pb.ListImagesRequest{Filter: &pb.ImageFilter{Image: &pb.ImageSpec{Image: image}}})
}
// ImageStatus sends an ImageStatusRequest to the server, and parses
// the returned ImageStatusResponse.
func ImageStatus(client pb.ImageServiceClient, image string) (*pb.ImageStatusResponse, error) {
return client.ImageStatus(context.Background(), &pb.ImageStatusRequest{Image: &pb.ImageSpec{Image: image}})
}
// RemoveImage sends a RemoveImageRequest to the server, and parses
// the returned RemoveImageResponse.
func RemoveImage(client pb.ImageServiceClient, image string) (*pb.RemoveImageResponse, error) {
if image == "" {
return nil, fmt.Errorf("ID cannot be empty")
}
return client.RemoveImage(context.Background(), &pb.RemoveImageRequest{Image: &pb.ImageSpec{Image: image}})
}

95
cmd/ocic/main.go Normal file
View file

@ -0,0 +1,95 @@
package main
import (
"encoding/json"
"fmt"
"net"
"os"
"time"
"github.com/Sirupsen/logrus"
"github.com/urfave/cli"
"google.golang.org/grpc"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
func getClientConnection(context *cli.Context) (*grpc.ClientConn, error) {
conn, err := grpc.Dial(context.GlobalString("connect"), grpc.WithInsecure(), grpc.WithTimeout(context.GlobalDuration("timeout")),
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout("unix", addr, timeout)
}))
if err != nil {
return nil, fmt.Errorf("failed to connect: %v", err)
}
return conn, nil
}
func openFile(path string) (*os.File, error) {
f, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
return nil, fmt.Errorf("config at %s not found", path)
}
return nil, err
}
return f, nil
}
func loadPodSandboxConfig(path string) (*pb.PodSandboxConfig, error) {
f, err := openFile(path)
if err != nil {
return nil, err
}
defer f.Close()
var config pb.PodSandboxConfig
if err := json.NewDecoder(f).Decode(&config); err != nil {
return nil, err
}
return &config, nil
}
func loadContainerConfig(path string) (*pb.ContainerConfig, error) {
f, err := openFile(path)
if err != nil {
return nil, err
}
defer f.Close()
var config pb.ContainerConfig
if err := json.NewDecoder(f).Decode(&config); err != nil {
return nil, err
}
return &config, nil
}
func main() {
app := cli.NewApp()
app.Name = "ocic"
app.Usage = "client for ocid"
app.Version = "0.3"
app.Commands = []cli.Command{
podSandboxCommand,
containerCommand,
runtimeVersionCommand,
imageCommand,
}
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "connect",
Value: "/var/run/ocid.sock",
Usage: "Socket to connect to",
},
cli.DurationFlag{
Name: "timeout",
Value: 10 * time.Second,
Usage: "Timeout of connecting to server",
},
}
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
}
}

387
cmd/ocic/sandbox.go Normal file
View file

@ -0,0 +1,387 @@
package main
import (
"fmt"
"log"
"sort"
"strings"
"time"
"github.com/urfave/cli"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
var podSandboxCommand = cli.Command{
Name: "pod",
Subcommands: []cli.Command{
runPodSandboxCommand,
stopPodSandboxCommand,
removePodSandboxCommand,
podSandboxStatusCommand,
listPodSandboxCommand,
},
}
var runPodSandboxCommand = cli.Command{
Name: "run",
Usage: "run a pod",
Flags: []cli.Flag{
cli.StringFlag{
Name: "config",
Value: "",
Usage: "the path of a pod sandbox config file",
},
cli.StringFlag{
Name: "name",
Value: "",
Usage: "the name of the pod sandbox",
},
cli.StringSliceFlag{
Name: "label",
Usage: "add key=value labels to the container",
},
},
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewRuntimeServiceClient(conn)
opts := createOptions{
configPath: context.String("config"),
name: context.String("name"),
labels: make(map[string]string),
}
for _, l := range context.StringSlice("label") {
pair := strings.Split(l, "=")
if len(pair) != 2 {
return fmt.Errorf("incorrectly specified label: %v", l)
}
opts.labels[pair[0]] = pair[1]
}
// Test RuntimeServiceClient.RunPodSandbox
err = RunPodSandbox(client, opts)
if err != nil {
return fmt.Errorf("Creating the pod sandbox failed: %v", err)
}
return nil
},
}
var stopPodSandboxCommand = cli.Command{
Name: "stop",
Usage: "stop a pod sandbox",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "id of the pod sandbox",
},
},
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewRuntimeServiceClient(conn)
err = StopPodSandbox(client, context.String("id"))
if err != nil {
return fmt.Errorf("stopping the pod sandbox failed: %v", err)
}
return nil
},
}
var removePodSandboxCommand = cli.Command{
Name: "remove",
Usage: "remove a pod sandbox",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "id of the pod sandbox",
},
},
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewRuntimeServiceClient(conn)
err = RemovePodSandbox(client, context.String("id"))
if err != nil {
return fmt.Errorf("removing the pod sandbox failed: %v", err)
}
return nil
},
}
var podSandboxStatusCommand = cli.Command{
Name: "status",
Usage: "return the status of a pod",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "id of the pod",
},
},
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewRuntimeServiceClient(conn)
err = PodSandboxStatus(client, context.String("id"))
if err != nil {
return fmt.Errorf("getting the pod sandbox status failed: %v", err)
}
return nil
},
}
var listPodSandboxCommand = cli.Command{
Name: "list",
Usage: "list pod sandboxes",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "filter by pod sandbox id",
},
cli.StringFlag{
Name: "state",
Value: "",
Usage: "filter by pod sandbox state",
},
cli.StringSliceFlag{
Name: "label",
Usage: "filter by key=value label",
},
cli.BoolFlag{
Name: "quiet",
Usage: "list only pod IDs",
},
},
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewRuntimeServiceClient(conn)
opts := listOptions{
id: context.String("id"),
state: context.String("state"),
quiet: context.Bool("quiet"),
labels: make(map[string]string),
}
for _, l := range context.StringSlice("label") {
pair := strings.Split(l, "=")
if len(pair) != 2 {
return fmt.Errorf("incorrectly specified label: %v", l)
}
opts.labels[pair[0]] = pair[1]
}
err = ListPodSandboxes(client, opts)
if err != nil {
return fmt.Errorf("listing pod sandboxes failed: %v", err)
}
return nil
},
}
// RunPodSandbox sends a RunPodSandboxRequest to the server, and parses
// the returned RunPodSandboxResponse.
func RunPodSandbox(client pb.RuntimeServiceClient, opts createOptions) error {
config, err := loadPodSandboxConfig(opts.configPath)
if err != nil {
return err
}
// Override the name by the one specified through CLI
if opts.name != "" {
config.Metadata.Name = opts.name
}
for k, v := range opts.labels {
config.Labels[k] = v
}
r, err := client.RunPodSandbox(context.Background(), &pb.RunPodSandboxRequest{Config: config})
if err != nil {
return err
}
fmt.Println(r.PodSandboxId)
return nil
}
// StopPodSandbox sends a StopPodSandboxRequest to the server, and parses
// the returned StopPodSandboxResponse.
func StopPodSandbox(client pb.RuntimeServiceClient, ID string) error {
if ID == "" {
return fmt.Errorf("ID cannot be empty")
}
_, err := client.StopPodSandbox(context.Background(), &pb.StopPodSandboxRequest{PodSandboxId: ID})
if err != nil {
return err
}
fmt.Println(ID)
return nil
}
// RemovePodSandbox sends a RemovePodSandboxRequest to the server, and parses
// the returned RemovePodSandboxResponse.
func RemovePodSandbox(client pb.RuntimeServiceClient, ID string) error {
if ID == "" {
return fmt.Errorf("ID cannot be empty")
}
_, err := client.RemovePodSandbox(context.Background(), &pb.RemovePodSandboxRequest{PodSandboxId: ID})
if err != nil {
return err
}
fmt.Println(ID)
return nil
}
// PodSandboxStatus sends a PodSandboxStatusRequest to the server, and parses
// the returned PodSandboxStatusResponse.
func PodSandboxStatus(client pb.RuntimeServiceClient, ID string) error {
if ID == "" {
return fmt.Errorf("ID cannot be empty")
}
r, err := client.PodSandboxStatus(context.Background(), &pb.PodSandboxStatusRequest{PodSandboxId: ID})
if err != nil {
return err
}
fmt.Printf("ID: %s\n", r.Status.Id)
if r.Status.Metadata != nil {
if r.Status.Metadata.Name != "" {
fmt.Printf("Name: %s\n", r.Status.Metadata.Name)
}
if r.Status.Metadata.Uid != "" {
fmt.Printf("UID: %s\n", r.Status.Metadata.Uid)
}
if r.Status.Metadata.Namespace != "" {
fmt.Printf("Namespace: %s\n", r.Status.Metadata.Namespace)
}
fmt.Printf("Attempt: %v\n", r.Status.Metadata.Attempt)
}
fmt.Printf("Status: %s\n", r.Status.State)
ctm := time.Unix(0, r.Status.CreatedAt)
fmt.Printf("Created: %v\n", ctm)
fmt.Printf("Network namespace: %s\n", r.Status.Linux.Namespaces.Network)
if r.Status.Network != nil {
fmt.Printf("IP Address: %v\n", r.Status.Network.Ip)
}
if r.Status.Labels != nil {
fmt.Println("Labels:")
for _, k := range getSortedKeys(r.Status.Labels) {
fmt.Printf("\t%s -> %s\n", k, r.Status.Labels[k])
}
}
if r.Status.Annotations != nil {
fmt.Println("Annotations:")
for _, k := range getSortedKeys(r.Status.Annotations) {
fmt.Printf("\t%s -> %s\n", k, r.Status.Annotations[k])
}
}
return nil
}
// ListPodSandboxes sends a ListPodSandboxRequest to the server, and parses
// the returned ListPodSandboxResponse.
func ListPodSandboxes(client pb.RuntimeServiceClient, opts listOptions) error {
filter := &pb.PodSandboxFilter{}
if opts.id != "" {
filter.Id = opts.id
}
if opts.state != "" {
st := &pb.PodSandboxStateValue{}
st.State = pb.PodSandboxState_SANDBOX_NOTREADY
switch opts.state {
case "ready":
st.State = pb.PodSandboxState_SANDBOX_READY
filter.State = st
case "notready":
st.State = pb.PodSandboxState_SANDBOX_NOTREADY
filter.State = st
default:
log.Fatalf("--state should be ready or notready")
}
}
if opts.labels != nil {
filter.LabelSelector = opts.labels
}
r, err := client.ListPodSandbox(context.Background(), &pb.ListPodSandboxRequest{
Filter: filter,
})
if err != nil {
return err
}
for _, pod := range r.Items {
if opts.quiet {
fmt.Println(pod.Id)
continue
}
fmt.Printf("ID: %s\n", pod.Id)
if pod.Metadata != nil {
if pod.Metadata.Name != "" {
fmt.Printf("Name: %s\n", pod.Metadata.Name)
}
if pod.Metadata.Uid != "" {
fmt.Printf("UID: %s\n", pod.Metadata.Uid)
}
if pod.Metadata.Namespace != "" {
fmt.Printf("Namespace: %s\n", pod.Metadata.Namespace)
}
fmt.Printf("Attempt: %v\n", pod.Metadata.Attempt)
}
fmt.Printf("Status: %s\n", pod.State)
ctm := time.Unix(0, pod.CreatedAt)
fmt.Printf("Created: %v\n", ctm)
if pod.Labels != nil {
fmt.Println("Labels:")
for _, k := range getSortedKeys(pod.Labels) {
fmt.Printf("\t%s -> %s\n", k, pod.Labels[k])
}
}
if pod.Annotations != nil {
fmt.Println("Annotations:")
for _, k := range getSortedKeys(pod.Annotations) {
fmt.Printf("\t%s -> %s\n", k, pod.Annotations[k])
}
}
fmt.Println()
}
return nil
}
func getSortedKeys(m map[string]string) []string {
var keys []string
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}

41
cmd/ocic/system.go Normal file
View file

@ -0,0 +1,41 @@
package main
import (
"fmt"
"github.com/urfave/cli"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
var runtimeVersionCommand = cli.Command{
Name: "runtimeversion",
Usage: "get runtime version information",
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewRuntimeServiceClient(conn)
// Test RuntimeServiceClient.Version
version := "v1alpha1"
err = Version(client, version)
if err != nil {
return fmt.Errorf("Getting the runtime version failed: %v", err)
}
return nil
},
}
// Version sends a VersionRequest to the server, and parses the returned VersionResponse.
func Version(client pb.RuntimeServiceClient, version string) error {
r, err := client.Version(context.Background(), &pb.VersionRequest{Version: version})
if err != nil {
return err
}
fmt.Printf("VersionResponse: Version: %s, RuntimeName: %s, RuntimeVersion: %s, RuntimeApiVersion: %s\n", r.Version, r.RuntimeName, r.RuntimeVersion, r.RuntimeApiVersion)
return nil
}

133
cmd/ocid/config.go Normal file
View file

@ -0,0 +1,133 @@
package main
import (
"os"
"text/template"
"github.com/kubernetes-incubator/cri-o/server"
"github.com/urfave/cli"
)
var commentedConfigTemplate = template.Must(template.New("config").Parse(`
# The "ocid" table contains all of the server options.
[ocid]
# root is a path to the "root directory". OCID stores all of its data,
# including container images, in this directory.
root = "{{ .Root }}"
# run is a path to the "run directory". OCID stores all of its state
# in this directory.
runroot = "{{ .RunRoot }}"
# storage_driver select which storage driver is used to manage storage
# of images and containers.
storage_driver = "{{ .Storage }}"
# storage_option is used to pass an option to the storage driver.
storage_option = [
{{ range $opt := .StorageOptions }}{{ printf "\t%q,\n" $opt }}{{ end }}]
# The "ocid.api" table contains settings for the kubelet/gRPC
# interface (which is also used by ocic).
[ocid.api]
# listen is the path to the AF_LOCAL socket on which ocid will listen.
listen = "{{ .Listen }}"
# The "ocid.runtime" table contains settings pertaining to the OCI
# runtime used and options for how to set up and manage the OCI runtime.
[ocid.runtime]
# runtime is a path to the OCI runtime which ocid will be using.
runtime = "{{ .Runtime }}"
# runtime_host_privileged is a path to the OCI runtime which ocid
# will be using for host privileged operations.
# If this string is empty, ocid will not try to use the "runtime"
# for all operations.
runtime_host_privileged = "{{ .RuntimeHostPrivileged }}"
# conmon is the path to conmon binary, used for managing the runtime.
conmon = "{{ .Conmon }}"
# conmon_env is the environment variable list for conmon process,
# used for passing necessary environment variable to conmon or runtime.
conmon_env = [
{{ range $env := .ConmonEnv }}{{ printf "\t%q,\n" $env }}{{ end }}]
# selinux indicates whether or not SELinux will be used for pod
# separation on the host. If you enable this flag, SELinux must be running
# on the host.
selinux = {{ .SELinux }}
# seccomp_profile is the seccomp json profile path which is used as the
# default for the runtime.
seccomp_profile = "{{ .SeccompProfile }}"
# apparmor_profile is the apparmor profile name which is used as the
# default for the runtime.
apparmor_profile = "{{ .ApparmorProfile }}"
# cgroup_manager is the cgroup management implementation to be used
# for the runtime.
cgroup_manager = "{{ .CgroupManager }}"
# The "ocid.image" table contains settings pertaining to the
# management of OCI images.
[ocid.image]
# default_transport is the prefix we try prepending to an image name if the
# image name as we receive it can't be parsed as a valid source reference
default_transport = "{{ .DefaultTransport }}"
# pause_image is the image which we use to instantiate infra containers.
pause_image = "{{ .PauseImage }}"
# pause_command is the command to run in a pause_image to have a container just
# sit there. If the image contains the necessary information, this value need
# not be specified.
pause_command = "{{ .PauseCommand }}"
# signature_policy is the name of the file which decides what sort of policy we
# use when deciding whether or not to trust an image that we've pulled.
# Outside of testing situations, it is strongly advised that this be left
# unspecified so that the default system-wide policy will be used.
signature_policy = "{{ .SignaturePolicyPath }}"
# The "ocid.network" table contains settings pertaining to the
# management of CNI plugins.
[ocid.network]
# network_dir is is where CNI network configuration
# files are stored.
network_dir = "{{ .NetworkDir }}"
# plugin_dir is is where CNI plugin binaries are stored.
plugin_dir = "{{ .PluginDir }}"
`))
// TODO: Currently ImageDir isn't really used, so we haven't added it to this
// template. Add it once the storage code has been merged.
var configCommand = cli.Command{
Name: "config",
Usage: "generate ocid configuration files",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "default",
Usage: "output the default configuration",
},
},
Action: func(c *cli.Context) error {
// At this point, app.Before has already parsed the user's chosen
// config file. So no need to handle that here.
config := c.App.Metadata["config"].(*server.Config)
if c.Bool("default") {
config = server.DefaultConfig()
}
// Output the commented config.
return commentedConfigTemplate.ExecuteTemplate(os.Stdout, "config", config)
},
}

20
cmd/ocid/daemon_linux.go Normal file
View file

@ -0,0 +1,20 @@
// +build linux
package main
import (
"github.com/Sirupsen/logrus"
systemdDaemon "github.com/coreos/go-systemd/daemon"
)
func sdNotify() {
if _, err := systemdDaemon.SdNotify(true, "READY=1"); err != nil {
logrus.Warnf("Failed to sd_notify systemd: %v", err)
}
}
// notifySystem sends a message to the host when the server is ready to be used
func notifySystem() {
// Tell the init daemon we are accepting requests
go sdNotify()
}

331
cmd/ocid/main.go Normal file
View file

@ -0,0 +1,331 @@
package main
import (
"fmt"
"net"
"os"
"os/signal"
"sort"
"strings"
"syscall"
"runtime/pprof"
"github.com/Sirupsen/logrus"
"github.com/containers/storage/pkg/reexec"
"github.com/kubernetes-incubator/cri-o/server"
"github.com/opencontainers/selinux/go-selinux"
"github.com/urfave/cli"
"google.golang.org/grpc"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
const ociConfigPath = "/etc/ocid/ocid.conf"
func mergeConfig(config *server.Config, ctx *cli.Context) error {
// Don't parse the config if the user explicitly set it to "".
if path := ctx.GlobalString("config"); path != "" {
if err := config.FromFile(path); err != nil {
if ctx.GlobalIsSet("config") || !os.IsNotExist(err) {
return err
}
// We don't error out if --config wasn't explicitly set and the
// default doesn't exist. But we will log a warning about it, so
// the user doesn't miss it.
logrus.Warnf("default configuration file does not exist: %s", ociConfigPath)
}
}
// Override options set with the CLI.
if ctx.GlobalIsSet("conmon") {
config.Conmon = ctx.GlobalString("conmon")
}
if ctx.GlobalIsSet("pause-command") {
config.PauseCommand = ctx.GlobalString("pause-command")
}
if ctx.GlobalIsSet("pause-image") {
config.PauseImage = ctx.GlobalString("pause-image")
}
if ctx.GlobalIsSet("signature-policy") {
config.SignaturePolicyPath = ctx.GlobalString("signature-policy")
}
if ctx.GlobalIsSet("root") {
config.Root = ctx.GlobalString("root")
}
if ctx.GlobalIsSet("runroot") {
config.RunRoot = ctx.GlobalString("runroot")
}
if ctx.GlobalIsSet("storage-driver") {
config.Storage = ctx.GlobalString("storage-driver")
}
if ctx.GlobalIsSet("storage-opt") {
config.StorageOptions = ctx.GlobalStringSlice("storage-opt")
}
if ctx.GlobalIsSet("default-transport") {
config.DefaultTransport = ctx.GlobalString("default-transport")
}
if ctx.GlobalIsSet("listen") {
config.Listen = ctx.GlobalString("listen")
}
if ctx.GlobalIsSet("runtime") {
config.Runtime = ctx.GlobalString("runtime")
}
if ctx.GlobalIsSet("selinux") {
config.SELinux = ctx.GlobalBool("selinux")
}
if ctx.GlobalIsSet("seccomp-profile") {
config.SeccompProfile = ctx.GlobalString("seccomp-profile")
}
if ctx.GlobalIsSet("apparmor-profile") {
config.ApparmorProfile = ctx.GlobalString("apparmor-profile")
}
if ctx.GlobalIsSet("cgroup-manager") {
config.CgroupManager = ctx.GlobalString("cgroup-manager")
}
if ctx.GlobalIsSet("cni-config-dir") {
config.NetworkDir = ctx.GlobalString("cni-config-dir")
}
if ctx.GlobalIsSet("cni-plugin-dir") {
config.PluginDir = ctx.GlobalString("cni-plugin-dir")
}
return nil
}
func catchShutdown(gserver *grpc.Server, sserver *server.Server, signalled *bool) {
sig := make(chan os.Signal, 10)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
go func() {
for s := range sig {
switch s {
case syscall.SIGINT:
logrus.Debugf("Caught SIGINT")
case syscall.SIGTERM:
logrus.Debugf("Caught SIGTERM")
default:
continue
}
*signalled = true
gserver.GracefulStop()
return
}
}()
}
func main() {
if reexec.Init() {
return
}
app := cli.NewApp()
app.Name = "ocid"
app.Usage = "ocid server"
app.Version = "0.3"
app.Metadata = map[string]interface{}{
"config": server.DefaultConfig(),
}
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "config",
Value: ociConfigPath,
Usage: "path to configuration file",
},
cli.StringFlag{
Name: "conmon",
Usage: "path to the conmon executable",
},
cli.BoolFlag{
Name: "debug",
Usage: "enable debug output for logging",
},
cli.StringFlag{
Name: "listen",
Usage: "path to ocid socket",
},
cli.StringFlag{
Name: "log",
Value: "",
Usage: "set the log file path where internal debug information is written",
},
cli.StringFlag{
Name: "log-format",
Value: "text",
Usage: "set the format used by logs ('text' (default), or 'json')",
},
cli.StringFlag{
Name: "pause-command",
Usage: "name of the pause command in the pause image",
},
cli.StringFlag{
Name: "pause-image",
Usage: "name of the pause image",
},
cli.StringFlag{
Name: "signature-policy",
Usage: "path to signature policy file",
},
cli.StringFlag{
Name: "root",
Usage: "ocid root dir",
},
cli.StringFlag{
Name: "runroot",
Usage: "ocid state dir",
},
cli.StringFlag{
Name: "storage-driver",
Usage: "storage driver",
},
cli.StringSliceFlag{
Name: "storage-opt",
Usage: "storage driver option",
},
cli.StringFlag{
Name: "default-transport",
Usage: "default transport",
},
cli.StringFlag{
Name: "runtime",
Usage: "OCI runtime path",
},
cli.StringFlag{
Name: "seccomp-profile",
Usage: "default seccomp profile path",
},
cli.StringFlag{
Name: "apparmor-profile",
Usage: "default apparmor profile name (default: \"ocid-default\")",
},
cli.BoolFlag{
Name: "selinux",
Usage: "enable selinux support",
},
cli.StringFlag{
Name: "cgroup-manager",
Usage: "cgroup manager (cgroupfs or systemd)",
},
cli.StringFlag{
Name: "cni-config-dir",
Usage: "CNI configuration files directory",
},
cli.StringFlag{
Name: "cni-plugin-dir",
Usage: "CNI plugin binaries directory",
},
cli.StringFlag{
Name: "cpu-profile",
Usage: "set the CPU profile file path",
},
}
sort.Sort(cli.FlagsByName(app.Flags))
sort.Sort(cli.FlagsByName(configCommand.Flags))
app.Commands = []cli.Command{
configCommand,
}
app.Before = func(c *cli.Context) error {
// Load the configuration file.
config := c.App.Metadata["config"].(*server.Config)
if err := mergeConfig(config, c); err != nil {
return err
}
cf := &logrus.TextFormatter{
TimestampFormat: "2006-01-02 15:04:05.000000000Z07:00",
FullTimestamp: true,
}
logrus.SetFormatter(cf)
if c.GlobalBool("debug") {
logrus.SetLevel(logrus.DebugLevel)
}
if path := c.GlobalString("log"); path != "" {
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND|os.O_SYNC, 0666)
if err != nil {
return err
}
logrus.SetOutput(f)
}
switch c.GlobalString("log-format") {
case "text":
// retain logrus's default.
case "json":
logrus.SetFormatter(new(logrus.JSONFormatter))
default:
return fmt.Errorf("unknown log-format %q", c.GlobalString("log-format"))
}
return nil
}
app.Action = func(c *cli.Context) error {
if cp := c.GlobalString("cpu-profile"); cp != "" {
f, err := os.Create(cp)
if err != nil {
return fmt.Errorf("invalid --cpu-profile value %q", err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
config := c.App.Metadata["config"].(*server.Config)
if !config.SELinux {
selinux.SetDisabled()
}
if _, err := os.Stat(config.Runtime); os.IsNotExist(err) {
// path to runtime does not exist
return fmt.Errorf("invalid --runtime value %q", err)
}
// Remove the socket if it already exists
if _, err := os.Stat(config.Listen); err == nil {
if err := os.Remove(config.Listen); err != nil {
logrus.Fatal(err)
}
}
lis, err := net.Listen("unix", config.Listen)
if err != nil {
logrus.Fatalf("failed to listen: %v", err)
}
s := grpc.NewServer()
service, err := server.New(config)
if err != nil {
logrus.Fatal(err)
}
graceful := false
catchShutdown(s, service, &graceful)
runtime.RegisterRuntimeServiceServer(s, service)
runtime.RegisterImageServiceServer(s, service)
// after the daemon is done setting up we can notify systemd api
notifySystem()
err = s.Serve(lis)
if graceful && strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") {
err = nil
}
if err2 := service.Shutdown(); err2 != nil {
logrus.Infof("error shutting down layer storage: %v", err2)
}
if err != nil {
logrus.Fatal(err)
}
return nil
}
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
}
}

View file

@ -1,3 +1,55 @@
# Kubernetes Community Code of Conduct
## Kubernetes Community Code of Conduct
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
### Contributor Code of Conduct
As contributors and maintainers of this project, and in the interest of fostering
an open and welcoming community, we pledge to respect all people who contribute
through reporting issues, posting feature requests, updating documentation,
submitting pull requests or patches, and other activities.
We are committed to making participation in this project a harassment-free experience for
everyone, regardless of level of experience, gender, gender identity and expression,
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
religion, or nationality.
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery
* Personal attacks
* Trolling or insulting/derogatory comments
* Public or private harassment
* Publishing other's private information, such as physical or electronic addresses,
without explicit permission
* Other unethical or unprofessional conduct.
Project maintainers have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are not
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
commit themselves to fairly and consistently applying these principles to every aspect
of managing this project. Project maintainers who do not follow or enforce the Code of
Conduct may be permanently removed from the project team.
This code of conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting one or more maintainers.
This Code of Conduct is adapted from the Contributor Covenant
(http://contributor-covenant.org), version 1.2.0, available at
http://contributor-covenant.org/version/1/2/0/
### Kubernetes Events Code of Conduct
Kubernetes events are working conferences intended for professional networking and collaboration in the
Kubernetes community. Attendees are expected to behave according to professional standards and in accordance
with their employer's policies on appropriate workplace behavior.
While at Kubernetes events or related social networking opportunities, attendees should not engage in
discriminatory or offensive speech or actions regarding gender, sexuality, race, or religion. Speakers should
be especially aware of these concerns.
The Kubernetes team does not condone any statements by speakers contrary to these standards. The Kubernetes
team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to
be engaging in discriminatory or offensive speech or actions.
Please bring any concerns to to the immediate attention of Kubernetes event staff

89
completions/bash/kpod Normal file
View file

@ -0,0 +1,89 @@
#! /bin/bash
: ${PROG:=$(basename ${BASH_SOURCE})}
_complete_() {
local options_with_args=$1
local boolean_options="$2 -h --help"
case "$prev" in
$options_with_args)
return
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
;;
esac
}
_kpod_launch() {
local options_with_args="
"
local boolean_options="
"
_complete_ "$options_with_args" "$boolean_options"
}
_kpod_kpod() {
local options_with_args="
"
local boolean_options="
--version -v
--help -h
"
commands="
launch
"
case "$prev" in
$main_options_with_args_glob )
return
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
;;
*)
COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) )
;;
esac
}
_cli_bash_autocomplete() {
local cur opts base
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
COMPREPLY=()
local cur prev words cword
_get_comp_words_by_ref -n : cur prev words cword
local command=${PROG} cpos=0
local counter=1
counter=1
while [ $counter -lt $cword ]; do
case "!${words[$counter]}" in
*)
command=$(echo "${words[$counter]}" | sed 's/-/_/g')
cpos=$counter
(( cpos++ ))
break
;;
esac
(( counter++ ))
done
local completions_func=_kpod_${command}
declare -F $completions_func >/dev/null && $completions_func
eval "$previous_extglob_setting"
return 0
}
complete -F _cli_bash_autocomplete $PROG

View file

@ -2,11 +2,11 @@ src = $(wildcard *.c)
obj = $(src:.c=.o)
override LIBS += $(shell pkg-config --libs glib-2.0)
override CFLAGS += -std=c99 -Os -Wall -Wextra $(shell pkg-config --cflags glib-2.0)
override CFLAGS += -std=c99 -Wall -Wextra $(shell pkg-config --cflags glib-2.0)
conmon: $(obj)
$(CC) -o ../bin/$@ $^ $(CFLAGS) $(LIBS)
$(CC) -o $@ $^ $(CFLAGS) $(LIBS)
.PHONY: clean
clean:
rm -f $(obj) ../bin/conmon
rm -f $(obj) conmon

File diff suppressed because it is too large Load diff

View file

@ -1,15 +0,0 @@
{
"cniVersion": "0.2.0",
"name": "crio-bridge",
"type": "bridge",
"bridge": "cni0",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"subnet": "10.88.0.0/16",
"routes": [
{ "dst": "0.0.0.0/0" }
]
}
}

View file

@ -0,0 +1,15 @@
{
"cniVersion": "0.2.0",
"name": "ocid-bridge",
"type": "bridge",
"bridge": "cni0",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"subnet": "10.88.0.0/16",
"routes": [
{ "dst": "0.0.0.0/0" }
]
}
}

View file

@ -2,15 +2,15 @@
There are a wide variety of different [CNI][cni] network configurations. This
directory just contains some example configurations that can be used as the
basis for your own configurations (distributions should package these files in
basis for your own configurations (distibutions should package these files in
example directories).
To use these configurations, place them in `/etc/cni/net.d` (or the directory
specified by `crio.network.network_dir` in your `crio.conf`).
specified by `ocid.network.network_dir` in your `ocid.conf`).
In addition, you need to install the [CNI plugins][cni] necessary into
`/opt/cni/bin` (or the directory specified by `crio.network.plugin_dir`). The
`/opt/cni/bin` (or the directory specified by `ocid.network.plugin_dir`). The
two plugins necessary for the example CNI configurations are `loopback` and
`bridge`.
[cni]: https://github.com/containernetworking/plugins
[cni]: https://github.com/containernetworking/cni

14
contrib/rpm/Makefile Normal file
View file

@ -0,0 +1,14 @@
.PHONY: dist
dist: ocid.spec
spectool -g ocid.spec
.PHONY: rpm
rpm: dist
rpmbuild --define "_sourcedir `pwd`" --define "_specdir `pwd`" \
--define "_rpmdir `pwd`" --define "_srcrpmdir `pwd`" -ba ocid.spec
all: rpm
clean:
rm -f *rpm *gz
rm -rf x86_64

71
contrib/rpm/ocid.spec Normal file
View file

@ -0,0 +1,71 @@
%define debug_package %{nil}
%global provider github
%global provider_tld com
%global project kubernetes-incubator
%global repo cri-o
%global Name ocid
# https://github.com/kubernetes-incubator/cri-o
%global provider_prefix %{provider}.%{provider_tld}/%{project}/%{repo}
%global import_path %{provider_prefix}
%global commit 8ba639952a95f2e24cc98987689138b67545576c
%global shortcommit %(c=%{commit}; echo ${c:0:7})
Name: %{Name}
Version: 0.0.1
Release: 1.git%{shortcommit}%{?dist}
Summary: Kubelet Container Runtime Interface (CRI) for OCI runtimes.
Group: Applications/Text
License: Apache 2.0
URL: https://%{provider_prefix}
Source0: https://%{provider_prefix}/archive/%{commit}/%{repo}-%{shortcommit}.tar.gz
BuildRequires: golang-github-cpuguy83-go-md2man
%description
The ocid package provides an implementation of the
Kubelet Container Runtime Interface (CRI) using OCI conformant runtimes.
ocid provides following functionalities:
Support multiple image formats including the existing Docker image format
Support for multiple means to download images including trust & image verification
Container image management (managing image layers, overlay filesystems, etc)
Container process lifecycle management
Monitoring and logging required to satisfy the CRI
Resource isolation as required by the CRI
%prep
%setup -q -n %{repo}-%{commit}
%build
make all
%install
%make_install
%make_install install.systemd
#define license tag if not already defined
%{!?_licensedir:%global license %doc}
%files
%{_bindir}/ocid
%{_bindir}/ocic
%{_mandir}/man5/ocid.conf.5*
%{_mandir}/man8/ocid.8*
%{_sysconfdir}/ocid.conf
%dir /%{_libexecdir}/ocid
/%{_libexecdir}/ocid/conmon
/%{_libexecdir}/ocid/pause
%{_unitdir}/ocid.service
%doc README.md
%license LICENSE
%preun
%systemd_preun %{Name}
%postun
%systemd_postun_with_restart %{Name}
%changelog
* Mon Oct 31 2016 Dan Walsh <dwalsh@redhat.com> - 0.0.1
- Initial RPM release

View file

@ -1,29 +0,0 @@
FROM centos
ENV VERSION=0 RELEASE=1 ARCH=x86_64
LABEL com.redhat.component="cri-o" \
name="$FGC/cri-o" \
version="$VERSION" \
release="$RELEASE.$DISTTAG" \
architecture="$ARCH" \
usage="atomic install --system --system-package=no crio && systemctl start crio" \
summary="The cri-o daemon as a system container." \
maintainer="Yu Qi Zhang <jzehrarnyg@gmail.com>" \
atomic.type="system"
RUN yum-config-manager --nogpgcheck --add-repo https://cbs.centos.org/repos/virt7-container-common-candidate/x86_64/os/ && \
yum install --disablerepo=extras --nogpgcheck --setopt=tsflags=nodocs -y iptables cri-o socat iproute runc && \
rpm -V iptables cri-o iproute runc && \
yum clean all && \
mkdir -p /exports/hostfs/etc/crio /exports/hostfs/opt/cni/bin/ /exports/hostfs/var/lib/containers/storage/ && \
cp /etc/crio/* /exports/hostfs/etc/crio && \
if test -e /usr/libexec/cni; then cp -Lr /usr/libexec/cni/* /exports/hostfs/opt/cni/bin/; fi
RUN sed -i '/storage_option =/s/.*/&\n"overlay.override_kernel_check=1",/' /exports/hostfs/etc/crio/crio.conf
COPY manifest.json tmpfiles.template config.json.template service.template /exports/
COPY set_mounts.sh /
COPY run.sh /usr/bin/
CMD ["/usr/bin/run.sh"]

View file

@ -1,57 +0,0 @@
# cri-o
This is the cri-o daemon as a system container.
## Building the image from source:
```
# git clone https://github.com/projectatomic/atomic-system-containers
# cd atomic-system-containers/cri-o
# docker build -t crio .
```
## Running the system container, with the atomic CLI:
Pull from registry into ostree:
```
# atomic pull --storage ostree $REGISTRY/crio
```
Or alternatively, pull from local docker:
```
# atomic pull --storage ostree docker:crio:latest
```
Install the container:
Currently we recommend using --system-package=no to avoid having rpmbuild create an rpm file
during installation. This flag will tell the atomic CLI to fall back to copying files to the
host instead.
```
# atomic install --system --system-package=no --name=crio ($REGISTRY)/crio
```
Start as a systemd service:
```
# systemctl start crio
```
Stopping the service
```
# systemctl stop crio
```
Removing the container
```
# atomic uninstall crio
```
## Binary version
You can find the image automatically built as: registry.centos.org/projectatomic/cri-o:latest

View file

@ -1,41 +0,0 @@
# This is for the purpose of building containers on the CentOS Community Container
# Pipeline. The containers are built, tested and delivered to registry.centos.org and
# lifecycled as well. A corresponding entry must exist in the container index itself,
# located at https://github.com/CentOS/container-index/tree/master/index.d
# You can know more at the following links:
# * https://github.com/CentOS/container-pipeline-service/blob/master/README.md
# * https://github.com/CentOS/container-index/blob/master/README.rst
# * https://wiki.centos.org/ContainerPipeline
# This will be part of the name of the container. It should match the job-id in index entry
job-id: cri-o
#the following are optional, can be left blank
#defaults, where applicable are filled in
#nulecule-file : nulecule
# This flag tells the container pipeline to skip user defined tests on their container
test-skip : True
# This is path of the script that initiates the user defined tests. It must be able to
# return an exit code.
test-script : null
# This is the path of custom build script.
build-script : null
# This is the path of the custom delivery script
delivery-script : null
# This flag tells the pipeline to deliver this container to docker hub.
docker-index : True
# This flag can be used to enable or disable the custom delivery
custom-delivery : False
# This flag can be used to enable or disable delivery of container to local registry
local-delivery : True
Upstreams :
- ref :
url :

View file

@ -1,427 +0,0 @@
{
"ociVersion": "1.0.0",
"platform": {
"arch": "amd64",
"os": "linux"
},
"process": {
"args": [
"/usr/bin/run.sh"
],
"capabilities": {
"ambient": [
"CAP_CHOWN",
"CAP_FOWNER",
"CAP_FSETID",
"CAP_KILL",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETPCAP",
"CAP_LINUX_IMMUTABLE",
"CAP_NET_BIND_SERVICE",
"CAP_NET_BROADCAST",
"CAP_NET_ADMIN",
"CAP_NET_RAW",
"CAP_IPC_LOCK",
"CAP_IPC_OWNER",
"CAP_SYS_MODULE",
"CAP_SYS_RAWIO",
"CAP_SYS_CHROOT",
"CAP_SYS_PTRACE",
"CAP_SYS_PACCT",
"CAP_SYS_ADMIN",
"CAP_SYS_BOOT",
"CAP_SYS_NICE",
"CAP_SYS_RESOURCE",
"CAP_SYS_TIME",
"CAP_SYS_TTY_CONFIG",
"CAP_MKNOD",
"CAP_LEASE",
"CAP_AUDIT_WRITE",
"CAP_AUDIT_CONTROL",
"CAP_SETFCAP",
"CAP_DAC_OVERRIDE",
"CAP_MAC_OVERRIDE",
"CAP_DAC_READ_SEARCH",
"CAP_MAC_ADMIN",
"CAP_SYSLOG",
"CAP_WAKE_ALARM",
"CAP_BLOCK_SUSPEND"
],
"bounding": [
"CAP_CHOWN",
"CAP_FOWNER",
"CAP_FSETID",
"CAP_KILL",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETPCAP",
"CAP_LINUX_IMMUTABLE",
"CAP_NET_BIND_SERVICE",
"CAP_NET_BROADCAST",
"CAP_NET_ADMIN",
"CAP_NET_RAW",
"CAP_IPC_LOCK",
"CAP_IPC_OWNER",
"CAP_SYS_MODULE",
"CAP_SYS_RAWIO",
"CAP_SYS_CHROOT",
"CAP_SYS_PTRACE",
"CAP_SYS_PACCT",
"CAP_SYS_ADMIN",
"CAP_SYS_BOOT",
"CAP_SYS_NICE",
"CAP_SYS_RESOURCE",
"CAP_SYS_TIME",
"CAP_SYS_TTY_CONFIG",
"CAP_MKNOD",
"CAP_LEASE",
"CAP_AUDIT_WRITE",
"CAP_AUDIT_CONTROL",
"CAP_SETFCAP",
"CAP_DAC_OVERRIDE",
"CAP_MAC_OVERRIDE",
"CAP_DAC_READ_SEARCH",
"CAP_MAC_ADMIN",
"CAP_SYSLOG",
"CAP_WAKE_ALARM",
"CAP_BLOCK_SUSPEND"
],
"effective": [
"CAP_CHOWN",
"CAP_FOWNER",
"CAP_FSETID",
"CAP_KILL",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETPCAP",
"CAP_LINUX_IMMUTABLE",
"CAP_NET_BIND_SERVICE",
"CAP_NET_BROADCAST",
"CAP_NET_ADMIN",
"CAP_NET_RAW",
"CAP_IPC_LOCK",
"CAP_IPC_OWNER",
"CAP_SYS_MODULE",
"CAP_SYS_RAWIO",
"CAP_SYS_CHROOT",
"CAP_SYS_PTRACE",
"CAP_SYS_PACCT",
"CAP_SYS_ADMIN",
"CAP_SYS_BOOT",
"CAP_SYS_NICE",
"CAP_SYS_RESOURCE",
"CAP_SYS_TIME",
"CAP_SYS_TTY_CONFIG",
"CAP_MKNOD",
"CAP_LEASE",
"CAP_AUDIT_WRITE",
"CAP_AUDIT_CONTROL",
"CAP_SETFCAP",
"CAP_DAC_OVERRIDE",
"CAP_MAC_OVERRIDE",
"CAP_DAC_READ_SEARCH",
"CAP_MAC_ADMIN",
"CAP_SYSLOG",
"CAP_WAKE_ALARM",
"CAP_BLOCK_SUSPEND"
],
"inheritable": [
"CAP_CHOWN",
"CAP_FOWNER",
"CAP_FSETID",
"CAP_KILL",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETPCAP",
"CAP_LINUX_IMMUTABLE",
"CAP_NET_BIND_SERVICE",
"CAP_NET_BROADCAST",
"CAP_NET_ADMIN",
"CAP_NET_RAW",
"CAP_IPC_LOCK",
"CAP_IPC_OWNER",
"CAP_SYS_MODULE",
"CAP_SYS_RAWIO",
"CAP_SYS_CHROOT",
"CAP_SYS_PTRACE",
"CAP_SYS_PACCT",
"CAP_SYS_ADMIN",
"CAP_SYS_BOOT",
"CAP_SYS_NICE",
"CAP_SYS_RESOURCE",
"CAP_SYS_TIME",
"CAP_SYS_TTY_CONFIG",
"CAP_MKNOD",
"CAP_LEASE",
"CAP_AUDIT_WRITE",
"CAP_AUDIT_CONTROL",
"CAP_SETFCAP",
"CAP_DAC_OVERRIDE",
"CAP_MAC_OVERRIDE",
"CAP_DAC_READ_SEARCH",
"CAP_MAC_ADMIN",
"CAP_SYSLOG",
"CAP_WAKE_ALARM",
"CAP_BLOCK_SUSPEND"
],
"permitted": [
"CAP_CHOWN",
"CAP_FOWNER",
"CAP_FSETID",
"CAP_KILL",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETPCAP",
"CAP_LINUX_IMMUTABLE",
"CAP_NET_BIND_SERVICE",
"CAP_NET_BROADCAST",
"CAP_NET_ADMIN",
"CAP_NET_RAW",
"CAP_IPC_LOCK",
"CAP_IPC_OWNER",
"CAP_SYS_MODULE",
"CAP_SYS_RAWIO",
"CAP_SYS_CHROOT",
"CAP_SYS_PTRACE",
"CAP_SYS_PACCT",
"CAP_SYS_ADMIN",
"CAP_SYS_BOOT",
"CAP_SYS_NICE",
"CAP_SYS_RESOURCE",
"CAP_SYS_TIME",
"CAP_SYS_TTY_CONFIG",
"CAP_MKNOD",
"CAP_LEASE",
"CAP_AUDIT_WRITE",
"CAP_AUDIT_CONTROL",
"CAP_SETFCAP",
"CAP_DAC_OVERRIDE",
"CAP_MAC_OVERRIDE",
"CAP_DAC_READ_SEARCH",
"CAP_MAC_ADMIN",
"CAP_SYSLOG",
"CAP_WAKE_ALARM",
"CAP_BLOCK_SUSPEND"
]
},
"selinuxLabel": "system_u:system_r:container_runtime_t:s0",
"cwd": "/",
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin:/root/go/bin",
"TERM=xterm",
"LOG_LEVEL=$LOG_LEVEL",
"NAME=$NAME"
],
"noNewPrivileges": false,
"terminal": false,
"user": {
"gid": 0,
"uid": 0
}
},
"root": {
"path": "rootfs",
"readonly": true
},
"hooks": {},
"linux": {
"namespaces": [
{
"type": "mount"
}
],
"resources": {
"devices": [
{
"access": "rwm",
"allow": true
}
]
},
"rootfsPropagation": "private"
},
"mounts": [
{
"destination": "/tmp",
"options": [
"private",
"bind",
"rw",
"mode=755"
],
"source": "/tmp",
"type": "bind"
},
{
"destination": "/etc",
"options": [
"rbind",
"rprivate",
"rw",
"mode=755"
],
"source": "/etc",
"type": "bind"
},
{
"destination": "/lib/modules",
"options": [
"rbind",
"rprivate",
"rw",
"mode=755"
],
"source": "/lib/modules",
"type": "bind"
},
{
"destination": "/root",
"options": [
"rbind",
"rprivate",
"rw",
"mode=755"
],
"source": "/root",
"type": "bind"
},
{
"destination": "/home",
"options": [
"rbind",
"rprivate",
"rw",
"mode=755"
],
"source": "/home",
"type": "bind"
},
{
"destination": "/mnt",
"options": [
"rbind",
"rw",
"rprivate",
"mode=755"
],
"source": "/mnt",
"type": "bind"
},
{
"type": "bind",
"source": "${RUN_DIRECTORY}",
"destination": "/run",
"options": [
"rshared",
"rbind",
"rw",
"mode=755"
]
},
{
"type": "bind",
"source": "${RUN_DIRECTORY}/systemd",
"destination": "/run/systemd",
"options": [
"rslave",
"bind",
"rw",
"mode=755"
]
},
{
"destination": "/var/log",
"options": [
"rbind",
"rslave",
"rw"
],
"source": "/var/log",
"type": "bind"
},
{
"destination": "/var/lib",
"options": [
"rbind",
"rprivate",
"rw"
],
"source": "${STATE_DIRECTORY}",
"type": "bind"
},
{
"destination": "/var/lib/containers/storage",
"options": [
"rbind",
"rshared",
"rw"
],
"source": "${VAR_LIB_CONTAINERS_STORAGE}",
"type": "bind"
},
{
"destination": "/var/lib/origin",
"options": [
"rshared",
"bind",
"rw"
],
"source": "${VAR_LIB_ORIGIN}",
"type": "bind"
},
{
"destination": "/var/lib/kubelet",
"options": [
"rshared",
"bind",
"rw"
],
"source": "${VAR_LIB_KUBE}",
"type": "bind"
},
{
"destination": "/opt/cni",
"options": [
"rbind",
"rprivate",
"ro",
"mode=755"
],
"source": "${OPT_CNI}",
"type": "bind"
},
{
"destination": "/dev",
"options": [
"rprivate",
"rbind",
"rw",
"mode=755"
],
"source": "/dev",
"type": "bind"
},
{
"destination": "/sys",
"options": [
"rprivate",
"rbind",
"rw",
"mode=755"
],
"source": "/sys",
"type": "bind"
},
{
"destination": "/proc",
"options": [
"rbind",
"rw",
"mode=755"
],
"source": "/proc",
"type": "proc"
}
]
}

View file

@ -1,10 +0,0 @@
{
"version": "1.0",
"defaultValues": {
"LOG_LEVEL" : "info",
"OPT_CNI" : "/opt/cni",
"VAR_LIB_CONTAINERS_STORAGE" : "/var/lib/containers/storage",
"VAR_LIB_ORIGIN" : "/var/lib/origin",
"VAR_LIB_KUBE" : "/var/lib/kubelet"
}
}

View file

@ -1,11 +0,0 @@
#!/bin/sh
# Ensure that new process maintain this SELinux label
PID=$$
LABEL=`tr -d '\000' < /proc/$PID/attr/current`
printf %s $LABEL > /proc/self/attr/exec
test -e /etc/sysconfig/crio-storage && source /etc/sysconfig/crio-storage
test -e /etc/sysconfig/crio-network && source /etc/sysconfig/crio-network
exec /usr/bin/crio --log-level=$LOG_LEVEL

View file

@ -1,20 +0,0 @@
[Unit]
Description=crio daemon
After=network.target
[Service]
Type=notify
ExecStartPre=/bin/sh $DESTDIR/rootfs/set_mounts.sh
ExecStart=$EXEC_START
ExecStop=$EXEC_STOP
Restart=on-failure
WorkingDirectory=$DESTDIR
RuntimeDirectory=${NAME}
TasksMax=infinity
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
TimeoutStartSec=0
[Install]
WantedBy=multi-user.target

View file

@ -1,7 +0,0 @@
#!/bin/sh
findmnt /var/lib/containers/storage > /dev/null || mount --rbind --make-shared /var/lib/containers/storage /var/lib/containers/storage
findmnt /var/lib/origin > /dev/null || mount --bind --make-shared /var/lib/origin /var/lib/origin
findmnt /var/lib/kubelet > /dev/null || mount --bind --make-shared /var/lib/kubelet /var/lib/kubelet
mount --make-shared /run
findmnt /run/systemd > /dev/null || mount --bind --make-rslave /run/systemd /run/systemd

View file

@ -1,5 +0,0 @@
d ${RUN_DIRECTORY}/crio - - - - -
d /etc/crio - - - - -
Z /etc/crio - - - - -
d ${STATE_DIRECTORY}/origin - - - - -
d ${STATE_DIRECTORY}/kubelet - - - - -

View file

@ -1,30 +0,0 @@
FROM registry.fedoraproject.org/fedora:27
ENV VERSION=0 RELEASE=1 ARCH=x86_64
LABEL com.redhat.component="cri-o" \
name="$FGC/cri-o" \
version="$VERSION" \
release="$RELEASE.$DISTTAG" \
architecture="$ARCH" \
usage="atomic install --system --system-package=no crio && systemctl start crio" \
summary="The cri-o daemon as a system container." \
maintainer="Yu Qi Zhang <jzehrarnyg@gmail.com>" \
atomic.type="system"
COPY README.md /
RUN dnf install --enablerepo=updates-testing --setopt=tsflags=nodocs -y iptables cri-o socat iproute runc && \
rpm -V iptables cri-o iproute runc && \
dnf clean all && \
mkdir -p /exports/hostfs/etc/crio /exports/hostfs/opt/cni/bin/ /exports/hostfs/var/lib/containers/storage/ && \
cp /etc/crio/* /exports/hostfs/etc/crio && \
if test -e /usr/libexec/cni; then cp -Lr /usr/libexec/cni/* /exports/hostfs/opt/cni/bin/; fi
RUN sed -i '/storage_option =/s/.*/&\n"overlay.override_kernel_check=1",/' /exports/hostfs/etc/crio/crio.conf
COPY manifest.json tmpfiles.template config.json.template service.template /exports/
COPY set_mounts.sh /
COPY run.sh /usr/bin/
CMD ["/usr/bin/run.sh"]

View file

@ -1,53 +0,0 @@
# cri-o
This is the cri-o daemon as a system container.
## Building the image from source:
```
# git clone https://github.com/projectatomic/atomic-system-containers
# cd atomic-system-containers/cri-o
# docker build -t crio .
```
## Running the system container, with the atomic CLI:
Pull from registry into ostree:
```
# atomic pull --storage ostree $REGISTRY/crio
```
Or alternatively, pull from local docker:
```
# atomic pull --storage ostree docker:crio:latest
```
Install the container:
Currently we recommend using --system-package=no to avoid having rpmbuild create an rpm file
during installation. This flag will tell the atomic CLI to fall back to copying files to the
host instead.
```
# atomic install --system --system-package=no --name=crio ($REGISTRY)/crio
```
Start as a systemd service:
```
# systemctl start crio
```
Stopping the service
```
# systemctl stop crio
```
Removing the container
```
# atomic uninstall crio
```

View file

@ -1,432 +0,0 @@
{
"ociVersion": "1.0.0",
"platform": {
"arch": "amd64",
"os": "linux"
},
"process": {
"args": [
"/usr/bin/run.sh"
],
"selinuxLabel": "system_u:system_r:container_runtime_t:s0",
"capabilities": {
"ambient": [
"CAP_CHOWN",
"CAP_FOWNER",
"CAP_FSETID",
"CAP_KILL",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETPCAP",
"CAP_LINUX_IMMUTABLE",
"CAP_NET_BIND_SERVICE",
"CAP_NET_BROADCAST",
"CAP_NET_ADMIN",
"CAP_NET_RAW",
"CAP_IPC_LOCK",
"CAP_IPC_OWNER",
"CAP_SYS_MODULE",
"CAP_SYS_RAWIO",
"CAP_SYS_CHROOT",
"CAP_SYS_PTRACE",
"CAP_SYS_PACCT",
"CAP_SYS_ADMIN",
"CAP_SYS_BOOT",
"CAP_SYS_NICE",
"CAP_SYS_RESOURCE",
"CAP_SYS_TIME",
"CAP_SYS_TTY_CONFIG",
"CAP_MKNOD",
"CAP_LEASE",
"CAP_AUDIT_WRITE",
"CAP_AUDIT_CONTROL",
"CAP_SETFCAP",
"CAP_DAC_OVERRIDE",
"CAP_MAC_OVERRIDE",
"CAP_DAC_READ_SEARCH",
"CAP_MAC_ADMIN",
"CAP_SYSLOG",
"CAP_WAKE_ALARM",
"CAP_BLOCK_SUSPEND",
"CAP_AUDIT_READ"
],
"bounding": [
"CAP_CHOWN",
"CAP_FOWNER",
"CAP_FSETID",
"CAP_KILL",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETPCAP",
"CAP_LINUX_IMMUTABLE",
"CAP_NET_BIND_SERVICE",
"CAP_NET_BROADCAST",
"CAP_NET_ADMIN",
"CAP_NET_RAW",
"CAP_IPC_LOCK",
"CAP_IPC_OWNER",
"CAP_SYS_MODULE",
"CAP_SYS_RAWIO",
"CAP_SYS_CHROOT",
"CAP_SYS_PTRACE",
"CAP_SYS_PACCT",
"CAP_SYS_ADMIN",
"CAP_SYS_BOOT",
"CAP_SYS_NICE",
"CAP_SYS_RESOURCE",
"CAP_SYS_TIME",
"CAP_SYS_TTY_CONFIG",
"CAP_MKNOD",
"CAP_LEASE",
"CAP_AUDIT_WRITE",
"CAP_AUDIT_CONTROL",
"CAP_SETFCAP",
"CAP_DAC_OVERRIDE",
"CAP_MAC_OVERRIDE",
"CAP_DAC_READ_SEARCH",
"CAP_MAC_ADMIN",
"CAP_SYSLOG",
"CAP_WAKE_ALARM",
"CAP_BLOCK_SUSPEND",
"CAP_AUDIT_READ"
],
"effective": [
"CAP_CHOWN",
"CAP_FOWNER",
"CAP_FSETID",
"CAP_KILL",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETPCAP",
"CAP_LINUX_IMMUTABLE",
"CAP_NET_BIND_SERVICE",
"CAP_NET_BROADCAST",
"CAP_NET_ADMIN",
"CAP_NET_RAW",
"CAP_IPC_LOCK",
"CAP_IPC_OWNER",
"CAP_SYS_MODULE",
"CAP_SYS_RAWIO",
"CAP_SYS_CHROOT",
"CAP_SYS_PTRACE",
"CAP_SYS_PACCT",
"CAP_SYS_ADMIN",
"CAP_SYS_BOOT",
"CAP_SYS_NICE",
"CAP_SYS_RESOURCE",
"CAP_SYS_TIME",
"CAP_SYS_TTY_CONFIG",
"CAP_MKNOD",
"CAP_LEASE",
"CAP_AUDIT_WRITE",
"CAP_AUDIT_CONTROL",
"CAP_SETFCAP",
"CAP_DAC_OVERRIDE",
"CAP_MAC_OVERRIDE",
"CAP_DAC_READ_SEARCH",
"CAP_MAC_ADMIN",
"CAP_SYSLOG",
"CAP_WAKE_ALARM",
"CAP_BLOCK_SUSPEND",
"CAP_AUDIT_READ"
],
"inheritable": [
"CAP_CHOWN",
"CAP_FOWNER",
"CAP_FSETID",
"CAP_KILL",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETPCAP",
"CAP_LINUX_IMMUTABLE",
"CAP_NET_BIND_SERVICE",
"CAP_NET_BROADCAST",
"CAP_NET_ADMIN",
"CAP_NET_RAW",
"CAP_IPC_LOCK",
"CAP_IPC_OWNER",
"CAP_SYS_MODULE",
"CAP_SYS_RAWIO",
"CAP_SYS_CHROOT",
"CAP_SYS_PTRACE",
"CAP_SYS_PACCT",
"CAP_SYS_ADMIN",
"CAP_SYS_BOOT",
"CAP_SYS_NICE",
"CAP_SYS_RESOURCE",
"CAP_SYS_TIME",
"CAP_SYS_TTY_CONFIG",
"CAP_MKNOD",
"CAP_LEASE",
"CAP_AUDIT_WRITE",
"CAP_AUDIT_CONTROL",
"CAP_SETFCAP",
"CAP_DAC_OVERRIDE",
"CAP_MAC_OVERRIDE",
"CAP_DAC_READ_SEARCH",
"CAP_MAC_ADMIN",
"CAP_SYSLOG",
"CAP_WAKE_ALARM",
"CAP_BLOCK_SUSPEND",
"CAP_AUDIT_READ"
],
"permitted": [
"CAP_CHOWN",
"CAP_FOWNER",
"CAP_FSETID",
"CAP_KILL",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETPCAP",
"CAP_LINUX_IMMUTABLE",
"CAP_NET_BIND_SERVICE",
"CAP_NET_BROADCAST",
"CAP_NET_ADMIN",
"CAP_NET_RAW",
"CAP_IPC_LOCK",
"CAP_IPC_OWNER",
"CAP_SYS_MODULE",
"CAP_SYS_RAWIO",
"CAP_SYS_CHROOT",
"CAP_SYS_PTRACE",
"CAP_SYS_PACCT",
"CAP_SYS_ADMIN",
"CAP_SYS_BOOT",
"CAP_SYS_NICE",
"CAP_SYS_RESOURCE",
"CAP_SYS_TIME",
"CAP_SYS_TTY_CONFIG",
"CAP_MKNOD",
"CAP_LEASE",
"CAP_AUDIT_WRITE",
"CAP_AUDIT_CONTROL",
"CAP_SETFCAP",
"CAP_DAC_OVERRIDE",
"CAP_MAC_OVERRIDE",
"CAP_DAC_READ_SEARCH",
"CAP_MAC_ADMIN",
"CAP_SYSLOG",
"CAP_WAKE_ALARM",
"CAP_BLOCK_SUSPEND",
"CAP_AUDIT_READ"
]
},
"cwd": "/",
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin:/root/go/bin",
"TERM=xterm",
"LOG_LEVEL=$LOG_LEVEL",
"NAME=$NAME"
],
"noNewPrivileges": false,
"terminal": false,
"user": {
"gid": 0,
"uid": 0
}
},
"root": {
"path": "rootfs",
"readonly": true
},
"hooks": {},
"linux": {
"namespaces": [
{
"type": "mount"
}
],
"resources": {
"devices": [
{
"access": "rwm",
"allow": true
}
]
},
"rootfsPropagation": "private"
},
"mounts": [
{
"destination": "/tmp",
"options": [
"private",
"bind",
"rw",
"mode=755"
],
"source": "/tmp",
"type": "bind"
},
{
"destination": "/etc",
"options": [
"rbind",
"rprivate",
"rw",
"mode=755"
],
"source": "/etc",
"type": "bind"
},
{
"destination": "/lib/modules",
"options": [
"rbind",
"rprivate",
"rw",
"mode=755"
],
"source": "/lib/modules",
"type": "bind"
},
{
"destination": "/root",
"options": [
"rbind",
"rprivate",
"rw",
"mode=755"
],
"source": "/root",
"type": "bind"
},
{
"destination": "/home",
"options": [
"rbind",
"rprivate",
"rw",
"mode=755"
],
"source": "/home",
"type": "bind"
},
{
"destination": "/mnt",
"options": [
"rbind",
"rw",
"rprivate",
"mode=755"
],
"source": "/mnt",
"type": "bind"
},
{
"type": "bind",
"source": "${RUN_DIRECTORY}",
"destination": "/run",
"options": [
"rshared",
"rbind",
"rw",
"mode=755"
]
},
{
"type": "bind",
"source": "${RUN_DIRECTORY}/systemd",
"destination": "/run/systemd",
"options": [
"rslave",
"bind",
"rw",
"mode=755"
]
},
{
"destination": "/var/log",
"options": [
"rbind",
"rslave",
"rw"
],
"source": "/var/log",
"type": "bind"
},
{
"destination": "/var/lib",
"options": [
"rbind",
"rprivate",
"rw"
],
"source": "${STATE_DIRECTORY}",
"type": "bind"
},
{
"destination": "/var/lib/containers/storage",
"options": [
"rbind",
"rshared",
"rw"
],
"source": "${VAR_LIB_CONTAINERS_STORAGE}",
"type": "bind"
},
{
"destination": "/var/lib/origin",
"options": [
"rshared",
"bind",
"rw"
],
"source": "${VAR_LIB_ORIGIN}",
"type": "bind"
},
{
"destination": "/var/lib/kubelet",
"options": [
"rshared",
"bind",
"rw"
],
"source": "${VAR_LIB_KUBE}",
"type": "bind"
},
{
"destination": "/opt/cni",
"options": [
"rbind",
"rprivate",
"ro",
"mode=755"
],
"source": "${OPT_CNI}",
"type": "bind"
},
{
"destination": "/dev",
"options": [
"rprivate",
"rbind",
"rw",
"mode=755"
],
"source": "/dev",
"type": "bind"
},
{
"destination": "/sys",
"options": [
"rprivate",
"rbind",
"rw",
"mode=755"
],
"source": "/sys",
"type": "bind"
},
{
"destination": "/proc",
"options": [
"rbind",
"rw",
"mode=755"
],
"source": "/proc",
"type": "proc"
}
]
}

View file

@ -1,10 +0,0 @@
{
"version": "1.0",
"defaultValues": {
"LOG_LEVEL" : "info",
"OPT_CNI" : "/opt/cni",
"VAR_LIB_CONTAINERS_STORAGE" : "/var/lib/containers/storage",
"VAR_LIB_ORIGIN" : "/var/lib/origin",
"VAR_LIB_KUBE" : "/var/lib/kubelet"
}
}

View file

@ -1,11 +0,0 @@
#!/bin/sh
# Ensure that new process maintain this SELinux label
PID=$$
LABEL=`tr -d '\000' < /proc/$PID/attr/current`
printf %s $LABEL > /proc/self/attr/exec
test -e /etc/sysconfig/crio-storage && source /etc/sysconfig/crio-storage
test -e /etc/sysconfig/crio-network && source /etc/sysconfig/crio-network
exec /usr/bin/crio --log-level=$LOG_LEVEL

View file

@ -1,20 +0,0 @@
[Unit]
Description=crio daemon
After=network.target
[Service]
Type=notify
ExecStartPre=/bin/sh $DESTDIR/rootfs/set_mounts.sh
ExecStart=$EXEC_START
ExecStop=$EXEC_STOP
Restart=on-failure
WorkingDirectory=$DESTDIR
RuntimeDirectory=${NAME}
TasksMax=infinity
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
TimeoutStartSec=0
[Install]
WantedBy=multi-user.target

View file

@ -1,7 +0,0 @@
#!/bin/sh
findmnt /var/lib/containers/storage > /dev/null || mount --rbind --make-shared /var/lib/containers/storage /var/lib/containers/storage
findmnt /var/lib/origin > /dev/null || mount --bind --make-shared /var/lib/origin /var/lib/origin
findmnt /var/lib/kubelet > /dev/null || mount --bind --make-shared /var/lib/kubelet /var/lib/kubelet
mount --make-shared /run
findmnt /run/systemd > /dev/null || mount --bind --make-rslave /run/systemd /run/systemd

View file

@ -1,5 +0,0 @@
d ${RUN_DIRECTORY}/crio - - - - -
d /etc/crio - - - - -
Z /etc/crio - - - - -
d ${STATE_DIRECTORY}/origin - - - - -
d ${STATE_DIRECTORY}/kubelet - - - - -

View file

@ -1,41 +0,0 @@
#oit## This file is managed by the OpenShift Image Tool
#oit## by the OpenShift Continuous Delivery team.
#oit##
#oit## Any yum repos listed in this file will effectively be ignored during CD builds.
#oit## Yum repos must be enabled in the oit configuration files.
#oit## Some aspects of this file may be managed programmatically. For example, the image name, labels (version,
#oit## release, and other), and the base FROM. Changes made directly in distgit may be lost during the next
#oit## reconciliation.
#oit##
FROM rhel7:7-released
RUN \
yum install --setopt=tsflags=nodocs -y socat iptables cri-o iproute runc skopeo-containers container-selinux && \
rpm -V socat iptables cri-o iproute runc skopeo-containers container-selinux && \
yum clean all && \
mkdir -p /exports/hostfs/etc/crio /exports/hostfs/opt/cni/bin/ /exports/hostfs/var/lib/containers/storage/ && \
cp /etc/crio/* /exports/hostfs/etc/crio && \
if test -e /usr/libexec/cni; then cp -Lr /usr/libexec/cni/* /exports/hostfs/opt/cni/bin/; fi
COPY manifest.json tmpfiles.template config.json.template service.template /exports/
COPY set_mounts.sh /
COPY run.sh /usr/bin/
CMD ["/usr/bin/run.sh"]
LABEL \
com.redhat.component="cri-o-docker" \
io.k8s.description="CRI-O is an implementation of the Kubernetes CRI. It is a lightweight, OCI-compliant runtime that is native to kubernetes. CRI-O supports OCI container images and can pull from any container registry." \
maintainer="Jhon Honce <jhonce@redhat.com>" \
name="openshift3/cri-o" \
License="GPLv2+" \
io.k8s.display-name="CRI-O" \
summary="OCI-based implementation of Kubernetes Container Runtime Interface" \
release="0.13.0.0" \
version="v3.8.0" \
architecture="x86_64" \
usage="atomic install --system --system-package=no crio && systemctl start crio" \
vendor="Red Hat" \
io.openshift.tags="cri-o system rhel7" \
atomic.type="system"

View file

@ -1,422 +0,0 @@
{
"ociVersion": "1.0.0",
"platform": {
"arch": "amd64",
"os": "linux"
},
"process": {
"args": [
"/usr/bin/run.sh"
],
"capabilities": {
"ambient": [
"CAP_CHOWN",
"CAP_FOWNER",
"CAP_FSETID",
"CAP_KILL",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETPCAP",
"CAP_LINUX_IMMUTABLE",
"CAP_NET_BIND_SERVICE",
"CAP_NET_BROADCAST",
"CAP_NET_ADMIN",
"CAP_NET_RAW",
"CAP_IPC_LOCK",
"CAP_IPC_OWNER",
"CAP_SYS_MODULE",
"CAP_SYS_RAWIO",
"CAP_SYS_CHROOT",
"CAP_SYS_PTRACE",
"CAP_SYS_PACCT",
"CAP_SYS_ADMIN",
"CAP_SYS_BOOT",
"CAP_SYS_NICE",
"CAP_SYS_RESOURCE",
"CAP_SYS_TIME",
"CAP_SYS_TTY_CONFIG",
"CAP_MKNOD",
"CAP_LEASE",
"CAP_AUDIT_WRITE",
"CAP_AUDIT_CONTROL",
"CAP_SETFCAP",
"CAP_DAC_OVERRIDE",
"CAP_MAC_OVERRIDE",
"CAP_DAC_READ_SEARCH",
"CAP_MAC_ADMIN",
"CAP_SYSLOG",
"CAP_WAKE_ALARM",
"CAP_BLOCK_SUSPEND"
],
"bounding": [
"CAP_CHOWN",
"CAP_FOWNER",
"CAP_FSETID",
"CAP_KILL",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETPCAP",
"CAP_LINUX_IMMUTABLE",
"CAP_NET_BIND_SERVICE",
"CAP_NET_BROADCAST",
"CAP_NET_ADMIN",
"CAP_NET_RAW",
"CAP_IPC_LOCK",
"CAP_IPC_OWNER",
"CAP_SYS_MODULE",
"CAP_SYS_RAWIO",
"CAP_SYS_CHROOT",
"CAP_SYS_PTRACE",
"CAP_SYS_PACCT",
"CAP_SYS_ADMIN",
"CAP_SYS_BOOT",
"CAP_SYS_NICE",
"CAP_SYS_RESOURCE",
"CAP_SYS_TIME",
"CAP_SYS_TTY_CONFIG",
"CAP_MKNOD",
"CAP_LEASE",
"CAP_AUDIT_WRITE",
"CAP_AUDIT_CONTROL",
"CAP_SETFCAP",
"CAP_DAC_OVERRIDE",
"CAP_MAC_OVERRIDE",
"CAP_DAC_READ_SEARCH",
"CAP_MAC_ADMIN",
"CAP_SYSLOG",
"CAP_WAKE_ALARM",
"CAP_BLOCK_SUSPEND"
],
"effective": [
"CAP_CHOWN",
"CAP_FOWNER",
"CAP_FSETID",
"CAP_KILL",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETPCAP",
"CAP_LINUX_IMMUTABLE",
"CAP_NET_BIND_SERVICE",
"CAP_NET_BROADCAST",
"CAP_NET_ADMIN",
"CAP_NET_RAW",
"CAP_IPC_LOCK",
"CAP_IPC_OWNER",
"CAP_SYS_MODULE",
"CAP_SYS_RAWIO",
"CAP_SYS_CHROOT",
"CAP_SYS_PTRACE",
"CAP_SYS_PACCT",
"CAP_SYS_ADMIN",
"CAP_SYS_BOOT",
"CAP_SYS_NICE",
"CAP_SYS_RESOURCE",
"CAP_SYS_TIME",
"CAP_SYS_TTY_CONFIG",
"CAP_MKNOD",
"CAP_LEASE",
"CAP_AUDIT_WRITE",
"CAP_AUDIT_CONTROL",
"CAP_SETFCAP",
"CAP_DAC_OVERRIDE",
"CAP_MAC_OVERRIDE",
"CAP_DAC_READ_SEARCH",
"CAP_MAC_ADMIN",
"CAP_SYSLOG",
"CAP_WAKE_ALARM",
"CAP_BLOCK_SUSPEND"
],
"inheritable": [
"CAP_CHOWN",
"CAP_FOWNER",
"CAP_FSETID",
"CAP_KILL",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETPCAP",
"CAP_LINUX_IMMUTABLE",
"CAP_NET_BIND_SERVICE",
"CAP_NET_BROADCAST",
"CAP_NET_ADMIN",
"CAP_NET_RAW",
"CAP_IPC_LOCK",
"CAP_IPC_OWNER",
"CAP_SYS_MODULE",
"CAP_SYS_RAWIO",
"CAP_SYS_CHROOT",
"CAP_SYS_PTRACE",
"CAP_SYS_PACCT",
"CAP_SYS_ADMIN",
"CAP_SYS_BOOT",
"CAP_SYS_NICE",
"CAP_SYS_RESOURCE",
"CAP_SYS_TIME",
"CAP_SYS_TTY_CONFIG",
"CAP_MKNOD",
"CAP_LEASE",
"CAP_AUDIT_WRITE",
"CAP_AUDIT_CONTROL",
"CAP_SETFCAP",
"CAP_DAC_OVERRIDE",
"CAP_MAC_OVERRIDE",
"CAP_DAC_READ_SEARCH",
"CAP_MAC_ADMIN",
"CAP_SYSLOG",
"CAP_WAKE_ALARM",
"CAP_BLOCK_SUSPEND"
],
"permitted": [
"CAP_CHOWN",
"CAP_FOWNER",
"CAP_FSETID",
"CAP_KILL",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETPCAP",
"CAP_LINUX_IMMUTABLE",
"CAP_NET_BIND_SERVICE",
"CAP_NET_BROADCAST",
"CAP_NET_ADMIN",
"CAP_NET_RAW",
"CAP_IPC_LOCK",
"CAP_IPC_OWNER",
"CAP_SYS_MODULE",
"CAP_SYS_RAWIO",
"CAP_SYS_CHROOT",
"CAP_SYS_PTRACE",
"CAP_SYS_PACCT",
"CAP_SYS_ADMIN",
"CAP_SYS_BOOT",
"CAP_SYS_NICE",
"CAP_SYS_RESOURCE",
"CAP_SYS_TIME",
"CAP_SYS_TTY_CONFIG",
"CAP_MKNOD",
"CAP_LEASE",
"CAP_AUDIT_WRITE",
"CAP_AUDIT_CONTROL",
"CAP_SETFCAP",
"CAP_DAC_OVERRIDE",
"CAP_MAC_OVERRIDE",
"CAP_DAC_READ_SEARCH",
"CAP_MAC_ADMIN",
"CAP_SYSLOG",
"CAP_WAKE_ALARM",
"CAP_BLOCK_SUSPEND"
]
},
"selinuxLabel": "system_u:system_r:container_runtime_t:s0",
"cwd": "/",
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin:/root/go/bin",
"TERM=xterm",
"LOG_LEVEL=$LOG_LEVEL",
"NAME=$NAME"
],
"noNewPrivileges": false,
"terminal": false,
"user": {
"gid": 0,
"uid": 0
}
},
"root": {
"path": "rootfs",
"readonly": true
},
"hooks": {},
"linux": {
"namespaces": [{
"type": "mount"
}],
"resources": {
"devices": [{
"access": "rwm",
"allow": true
}]
},
"rootfsPropagation": "private"
},
"mounts": [{
"destination": "/tmp",
"options": [
"private",
"bind",
"rw",
"mode=755"
],
"source": "/tmp",
"type": "bind"
},
{
"destination": "/etc",
"options": [
"rbind",
"rprivate",
"rw",
"mode=755"
],
"source": "/etc",
"type": "bind"
},
{
"destination": "/lib/modules",
"options": [
"rbind",
"rprivate",
"rw",
"mode=755"
],
"source": "/lib/modules",
"type": "bind"
},
{
"destination": "/root",
"options": [
"rbind",
"rprivate",
"rw",
"mode=755"
],
"source": "/root",
"type": "bind"
},
{
"destination": "/home",
"options": [
"rbind",
"rprivate",
"rw",
"mode=755"
],
"source": "/home",
"type": "bind"
},
{
"destination": "/mnt",
"options": [
"rbind",
"rw",
"rprivate",
"mode=755"
],
"source": "/mnt",
"type": "bind"
},
{
"type": "bind",
"source": "${RUN_DIRECTORY}",
"destination": "/run",
"options": [
"rshared",
"rbind",
"rw",
"mode=755"
]
},
{
"type": "bind",
"source": "${RUN_DIRECTORY}/systemd",
"destination": "/run/systemd",
"options": [
"rslave",
"bind",
"rw",
"mode=755"
]
},
{
"destination": "/var/log",
"options": [
"rbind",
"rslave",
"rw"
],
"source": "/var/log",
"type": "bind"
},
{
"destination": "/var/lib",
"options": [
"rbind",
"rprivate",
"rw"
],
"source": "${STATE_DIRECTORY}",
"type": "bind"
},
{
"destination": "/var/lib/containers/storage",
"options": [
"rbind",
"rshared",
"rw"
],
"source": "${VAR_LIB_CONTAINERS_STORAGE}",
"type": "bind"
},
{
"destination": "/var/lib/origin",
"options": [
"rshared",
"bind",
"rw"
],
"source": "${VAR_LIB_ORIGIN}",
"type": "bind"
},
{
"destination": "/var/lib/kubelet",
"options": [
"rshared",
"bind",
"rw"
],
"source": "${VAR_LIB_KUBE}",
"type": "bind"
},
{
"destination": "/opt/cni",
"options": [
"rbind",
"rprivate",
"ro",
"mode=755"
],
"source": "${OPT_CNI}",
"type": "bind"
},
{
"destination": "/dev",
"options": [
"rprivate",
"rbind",
"rw",
"mode=755"
],
"source": "/dev",
"type": "bind"
},
{
"destination": "/sys",
"options": [
"rprivate",
"rbind",
"rw",
"mode=755"
],
"source": "/sys",
"type": "bind"
},
{
"destination": "/proc",
"options": [
"rbind",
"rw",
"mode=755"
],
"source": "/proc",
"type": "proc"
}
]
}

View file

@ -1,37 +0,0 @@
% CRI-O (1) Container Image Pages
% Jhon Honce
% September 7, 2017
# NAME
cri-o - OCI-based implementation of Kubernetes Container Runtime Interface
# DESCRIPTION
CRI-O is an implementation of the Kubernetes CRI. It is a lightweight, OCI-compliant runtime that is native to kubernetes. CRI-O supports OCI container images and can pull from any container registry.
You can find more information on the CRI-O project at <https://github.com/kubernetes-incubator/cri-o/>
# USAGE
Pull from local docker and install system container:
```
# atomic pull --storage ostree docker:openshift3/cri-o:latest
# atomic install --system --system-package=no --name cri-o openshift3/cri-o
```
Start and enable as a systemd service:
```
# systemctl enable --now cri-o
```
Stopping the service
```
# systemctl stop cri-o
```
Removing the container
```
# atomic uninstall cri-o
```
# SEE ALSO
man systemd(1)

View file

@ -1,10 +0,0 @@
{
"version": "1.0",
"defaultValues": {
"LOG_LEVEL": "info",
"OPT_CNI": "/opt/cni",
"VAR_LIB_CONTAINERS_STORAGE": "/var/lib/containers/storage",
"VAR_LIB_ORIGIN": "/var/lib/origin",
"VAR_LIB_KUBE": "/var/lib/kubelet"
}
}

View file

@ -1,11 +0,0 @@
#!/bin/sh
# Ensure that new process maintain this SELinux label
PID=$$
LABEL=`tr -d '\000' < /proc/$PID/attr/current`
printf %s $LABEL > /proc/self/attr/exec
test -e /etc/sysconfig/crio-storage && source /etc/sysconfig/crio-storage
test -e /etc/sysconfig/crio-network && source /etc/sysconfig/crio-network
exec /usr/bin/crio --log-level=$LOG_LEVEL

View file

@ -1,20 +0,0 @@
[Unit]
Description=crio daemon
After=network.target
[Service]
Type=notify
ExecStartPre=/bin/sh $DESTDIR/rootfs/set_mounts.sh
ExecStart=$EXEC_START
ExecStop=$EXEC_STOP
Restart=on-failure
WorkingDirectory=$DESTDIR
RuntimeDirectory=${NAME}
TasksMax=infinity
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
TimeoutStartSec=0
[Install]
WantedBy=multi-user.target

View file

@ -1,7 +0,0 @@
#!/bin/sh
findmnt /var/lib/containers/storage > /dev/null || mount --rbind --make-shared /var/lib/containers/storage /var/lib/containers/storage
findmnt /var/lib/origin > /dev/null || mount --bind --make-shared /var/lib/origin /var/lib/origin
findmnt /var/lib/kubelet > /dev/null || mount --bind --make-shared /var/lib/kubelet /var/lib/kubelet
mount --make-shared /run
findmnt /run/systemd > /dev/null || mount --bind --make-rslave /run/systemd /run/systemd

View file

@ -1,5 +0,0 @@
d ${RUN_DIRECTORY}/crio - - - - -
d /etc/crio - - - - -
Z /etc/crio - - - - -
d ${STATE_DIRECTORY}/origin - - - - -
d ${STATE_DIRECTORY}/kubelet - - - - -

View file

@ -1,14 +0,0 @@
[Unit]
Description=Shutdown CRIO containers before shutting down the system
Wants=crio.service
After=crio.service
Documentation=man:crio(8)
[Service]
Type=oneshot
ExecStart=/usr/bin/true
ExecStop=mkdir -p /var/lib/crio; touch /var/lib/crio/crio.shutdown
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target

View file

@ -1,24 +0,0 @@
[Unit]
Description=Open Container Initiative Daemon
Documentation=https://github.com/kubernetes-incubator/cri-o
After=network-online.target
[Service]
Type=notify
EnvironmentFile=-/etc/sysconfig/crio-storage
EnvironmentFile=-/etc/sysconfig/crio-network
Environment=GOTRACEBACK=crash
ExecStart=/usr/local/bin/crio \
$CRIO_STORAGE_OPTIONS \
$CRIO_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP $MAINPID
TasksMax=infinity
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
OOMScoreAdjust=-999
TimeoutStartSec=0
Restart=on-abnormal
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,14 @@
[Unit]
Description=Shutdown OCID containers before shutting down the system
Wants=ocid.service
After=ocid.service
Documentation=man:ocid(8)
[Service]
Type=oneshot
ExecStart=/usr/bin/true
ExecStop=mkdir -p /var/lib/ocid; touch /var/lib/ocid/ocid.shutdown
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,23 @@
[Unit]
Description=Open Container Initiative Daemon
Documentation=https://github.com/kubernetes-incubator/cri-o
After=network.target
[Service]
Type=notify
EnvironmentFile=-/etc/sysconfig/ocid-storage
EnvironmentFile=-/etc/sysconfig/ocid-network
Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/ocid \
$OCID_STORAGE_OPTIONS \
$OCID_NETWORK_OPTIONS \
ExecReload=/bin/kill -s HUP $MAINPID
TasksMax=8192
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
TimeoutStartSec=0
Restart=on-abnormal
[Install]
WantedBy=multi-user.target

View file

@ -1,21 +0,0 @@
# Fedora and RHEL Integration and End-to-End Tests
This directory contains playbooks to set up for and run the integration and
end-to-end tests for CRI-O on RHEL and Fedora hosts. Two entrypoints exist:
- `main.yml`: sets up the machine and runs tests
- `results.yml`: gathers test output to `/tmp/artifacts`
When running `main.yml`, three tags are present:
- `setup`: run all tasks to set up the system for testing
- `e2e`: build CRI-O from source and run Kubernetes node E2Es
- `integration`: build CRI-O from source and run the local integration suite
The playbooks assume the following things about your system:
- on RHEL, the server and extras repos are configured and certs are present
- `ansible` is installed and the host is boot-strapped to allow `ansible` to run against it
- the `$GOPATH` is set and present for all shells (*e.g.* written in `/etc/environment`)
- CRI-O is checked out to the correct state at `${GOPATH}/src/github.com/kubernetes-incubator/cri-o`
- the user running the playbook has access to passwordless `sudo`

View file

@ -1,359 +0,0 @@
# config file for ansible -- http://ansible.com/
# ==============================================
# nearly all parameters can be overridden in ansible-playbook
# or with command line flags. ansible will read ANSIBLE_CONFIG,
# ansible.cfg in the current working directory, .ansible.cfg in
# the home directory or /etc/ansible/ansible.cfg, whichever it
# finds first
[defaults]
# some basic default values...
#inventory = inventory
#library = /usr/share/my_modules/
#remote_tmp = $HOME/.ansible/tmp
#local_tmp = .ansible/tmp
#forks = 5
forks = 10
#poll_interval = 15
#sudo_user = root
#ask_sudo_pass = True
ask_sudo_pass = False
#ask_pass = True
ask_pass = False
#transport = smart
#remote_port = 22
#module_lang = C
#module_set_locale = True
# plays will gather facts by default, which contain information about
# the remote system.
#
# smart - gather by default, but don't regather if already gathered
# implicit - gather by default, turn off with gather_facts: False
# explicit - do not gather by default, must say gather_facts: True
#gathering = implicit
gathering = smart
# by default retrieve all facts subsets
# all - gather all subsets
# network - gather min and network facts
# hardware - gather hardware facts (longest facts to retrieve)
# virtual - gather min and virtual facts
# facter - import facts from facter
# ohai - import facts from ohai
# You can combine them using comma (ex: network,virtual)
# You can negate them using ! (ex: !hardware,!facter,!ohai)
# A minimal set of facts is always gathered.
gather_subset = network
# additional paths to search for roles in, colon separated
# N/B: This depends on how ansible is called
#roles_path = $WORKSPACE/kommandir_workspace/roles
# uncomment this to disable SSH key host checking
#host_key_checking = False
host_key_checking = False
# change the default callback
#stdout_callback = skippy
# enable additional callbacks
#callback_whitelist = timer, mail
# Determine whether includes in tasks and handlers are "static" by
# default. As of 2.0, includes are dynamic by default. Setting these
# values to True will make includes behave more like they did in the
# 1.x versions.
task_includes_static = True
handler_includes_static = True
# change this for alternative sudo implementations
#sudo_exe = sudo
# What flags to pass to sudo
# WARNING: leaving out the defaults might create unexpected behaviours
#sudo_flags = -H -S -n
# SSH timeout
#timeout = 10
# default user to use for playbooks if user is not specified
# (/usr/bin/ansible will use current user as default)
#remote_user = root
remote_user = root
# logging is off by default unless this path is defined
# if so defined, consider logrotate
log_path = $ARTIFACTS/main.log
# default module name for /usr/bin/ansible
#module_name = command
# use this shell for commands executed under sudo
# you may need to change this to bin/bash in rare instances
# if sudo is constrained
# executable = /bin/sh
# if inventory variables overlap, does the higher precedence one win
# or are hash values merged together? The default is 'replace' but
# this can also be set to 'merge'.
hash_behaviour = replace
# by default, variables from roles will be visible in the global variable
# scope. To prevent this, the following option can be enabled, and only
# tasks and handlers within the role will see the variables there
private_role_vars = False
# list any Jinja2 extensions to enable here:
#jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n
# if set, always use this private key file for authentication, same as
# if passing --private-key to ansible or ansible-playbook
#private_key_file = /path/to/file
# If set, configures the path to the Vault password file as an alternative to
# specifying --vault-password-file on the command line.
#vault_password_file = /path/to/vault_password_file
# format of string {{ ansible_managed }} available within Jinja2
# templates indicates to users editing templates files will be replaced.
# replacing {file}, {host} and {uid} and strftime codes with proper values.
#ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
# This short version is better used in templates as it won't flag the file as changed every run.
#ansible_managed = Ansible managed: {file} on {host}
# by default, ansible-playbook will display "Skipping [host]" if it determines a task
# should not be run on a host. Set this to "False" if you don't want to see these "Skipping"
# messages. NOTE: the task header will still be shown regardless of whether or not the
# task is skipped.
#display_skipped_hosts = True
display_skipped_hosts = False
# by default, if a task in a playbook does not include a name: field then
# ansible-playbook will construct a header that includes the task's action but
# not the task's args. This is a security feature because ansible cannot know
# if the *module* considers an argument to be no_log at the time that the
# header is printed. If your environment doesn't have a problem securing
# stdout from ansible-playbook (or you have manually specified no_log in your
# playbook on all of the tasks where you have secret information) then you can
# safely set this to True to get more informative messages.
display_args_to_stdout = False
# by default (as of 1.3), Ansible will raise errors when attempting to dereference
# Jinja2 variables that are not set in templates or action lines. Uncomment this line
# to revert the behavior to pre-1.3.
#error_on_undefined_vars = False
# by default (as of 1.6), Ansible may display warnings based on the configuration of the
# system running ansible itself. This may include warnings about 3rd party packages or
# other conditions that should be resolved if possible.
# to disable these warnings, set the following value to False:
system_warnings = False
# by default (as of 1.4), Ansible may display deprecation warnings for language
# features that should no longer be used and will be removed in future versions.
# to disable these warnings, set the following value to False:
deprecation_warnings = False
# (as of 1.8), Ansible can optionally warn when usage of the shell and
# command module appear to be simplified by using a default Ansible module
# instead. These warnings can be silenced by adjusting the following
# setting or adding warn=yes or warn=no to the end of the command line
# parameter string. This will for example suggest using the git module
# instead of shelling out to the git command.
command_warnings = False
# set plugin path directories here, separate with colons
#action_plugins = /usr/share/ansible/plugins/action
#callback_plugins = /usr/share/ansible/plugins/callback
#connection_plugins = /usr/share/ansible/plugins/connection
#lookup_plugins = /usr/share/ansible/plugins/lookup
#vars_plugins = /usr/share/ansible/plugins/vars
#filter_plugins = /usr/share/ansible/plugins/filter
#test_plugins = /usr/share/ansible/plugins/test
#strategy_plugins = /usr/share/ansible/plugins/strategy
# Most callbacks shipped with Ansible are disabled by default
# and need to be whitelisted in your ansible.cfg file in order to function.
callback_whitelist = default
# by default callbacks are not loaded for /bin/ansible, enable this if you
# want, for example, a notification or logging callback to also apply to
# /bin/ansible runs
#bin_ansible_callbacks = False
# don't like cows? that's unfortunate.
# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1
#nocows = 1
# set which cowsay stencil you'd like to use by default. When set to 'random',
# a random stencil will be selected for each task. The selection will be filtered
# against the `cow_whitelist` option below.
#cow_selection = default
#cow_selection = random
# when using the 'random' option for cowsay, stencils will be restricted to this list.
# it should be formatted as a comma-separated list with no spaces between names.
# NOTE: line continuations here are for formatting purposes only, as the INI parser
# in python does not support them.
#cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\
# hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\
# stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www
# don't like colors either?
# set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1
nocolor = 0
# if set to a persistent type (not 'memory', for example 'redis') fact values
# from previous runs in Ansible will be stored. This may be useful when
# wanting to use, for example, IP information from one group of servers
# without having to talk to them in the same playbook run to get their
# current IP information.
#fact_caching = memory
# retry files
# When a playbook fails by default a .retry file will be created in ~/
# You can disable this feature by setting retry_files_enabled to False
# and you can change the location of the files by setting retry_files_save_path
#retry_files_enabled = False
retry_files_enabled = False
# squash actions
# Ansible can optimise actions that call modules with list parameters
# when looping. Instead of calling the module once per with_ item, the
# module is called once with all items at once. Currently this only works
# under limited circumstances, and only with parameters named 'name'.
squash_actions = apk,apt,dnf,package,pacman,pkgng,yum,zypper
# prevents logging of task data, off by default
#no_log = False
# prevents logging of tasks, but only on the targets, data is still logged on the master/controller
no_target_syslog = True
# controls whether Ansible will raise an error or warning if a task has no
# choice but to create world readable temporary files to execute a module on
# the remote machine. This option is False by default for security. Users may
# turn this on to have behaviour more like Ansible prior to 2.1.x. See
# https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user
# for more secure ways to fix this than enabling this option.
#allow_world_readable_tmpfiles = False
# controls the compression level of variables sent to
# worker processes. At the default of 0, no compression
# is used. This value must be an integer from 0 to 9.
#var_compression_level = 9
# controls what compression method is used for new-style ansible modules when
# they are sent to the remote system. The compression types depend on having
# support compiled into both the controller's python and the client's python.
# The names should match with the python Zipfile compression types:
# * ZIP_STORED (no compression. available everywhere)
# * ZIP_DEFLATED (uses zlib, the default)
# These values may be set per host via the ansible_module_compression inventory
# variable
#module_compression = 'ZIP_DEFLATED'
# This controls the cutoff point (in bytes) on --diff for files
# set to 0 for unlimited (RAM may suffer!).
#max_diff_size = 1048576
[privilege_escalation]
#become=True
#become_method=sudo
#become_user=root
become_user=root
#become_ask_pass=False
[paramiko_connection]
# uncomment this line to cause the paramiko connection plugin to not record new host
# keys encountered. Increases performance on new host additions. Setting works independently of the
# host key checking setting above.
#record_host_keys=False
# by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this
# line to disable this behaviour.
#pty=False
[ssh_connection]
# ssh arguments to use
# Leaving off ControlPersist will result in poor performance, so use
# paramiko on older platforms rather than removing it
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o PreferredAuthentications=publickey -o ConnectTimeout=13
# The path to use for the ControlPath sockets. This defaults to
# "%(directory)s/ansible-ssh-%%h-%%p-%%r", however on some systems with
# very long hostnames or very long path names (caused by long user names or
# deeply nested home directories) this can exceed the character limit on
# file socket names (108 characters for most platforms). In that case, you
# may wish to shorten the string below.
#
# Example:
# control_path = %(directory)s/%%h-%%r
#control_path = %(directory)s/ansible-ssh-%%h-%%p-%%r
# Enabling pipelining reduces the number of SSH operations required to
# execute a module on the remote server. This can result in a significant
# performance improvement when enabled, however when using "sudo:" you must
# first disable 'requiretty' in /etc/sudoers
#
# By default, this option is disabled to preserve compatibility with
# sudoers configurations that have requiretty (the default on many distros).
#
#pipelining = False
pipelining=True
# if True, make ansible use scp if the connection type is ssh
# (default is sftp)
#scp_if_ssh = True
# if False, sftp will not use batch mode to transfer files. This may cause some
# types of file transfer failures impossible to catch however, and should
# only be disabled if your sftp version has problems with batch mode
#sftp_batch_mode = False
[accelerate]
#accelerate_port = 5099
#accelerate_timeout = 30
#accelerate_connect_timeout = 5.0
# The daemon timeout is measured in minutes. This time is measured
# from the last activity to the accelerate daemon.
#accelerate_daemon_timeout = 30
# If set to yes, accelerate_multi_key will allow multiple
# private keys to be uploaded to it, though each user must
# have access to the system via SSH to add a new key. The default
# is "no".
#accelerate_multi_key = yes
[selinux]
# file systems that require special treatment when dealing with security context
# the default behaviour that copies the existing context or uses the user default
# needs to be changed to use the file system dependent context.
#special_context_filesystems=nfs,vboxsf,fuse,ramfs
# Set this to yes to allow libvirt_lxc connections to work without SELinux.
#libvirt_lxc_noseclabel = yes
[colors]
#highlight = white
#verbose = blue
#warn = bright purple
#error = red
#debug = dark gray
#deprecate = purple
#skip = cyan
#unreachable = red
#ok = green
#changed = yellow
#diff_add = green
#diff_remove = red
#diff_lines = cyan

View file

@ -1,17 +0,0 @@
---
- name: clone bats source repo
git:
repo: "https://github.com/sstephenson/bats.git"
dest: "{{ ansible_env.GOPATH }}/src/github.com/sstephenson/bats"
- name: install bats
command: "./install.sh /usr/local"
args:
chdir: "{{ ansible_env.GOPATH }}/src/github.com/sstephenson/bats"
- name: link bats
file:
src: /usr/local/bin/bats
dest: /usr/bin/bats
state: link

View file

@ -1,79 +0,0 @@
---
- name: stat the expected cri-o directory
stat:
path: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o"
register: dir_stat
- name: expect cri-o to be cloned already
fail:
msg: "Expected cri-o to be cloned at {{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o but it wasn't!"
when: not dir_stat.stat.exists
- name: install cri-o tools
make:
target: install.tools
chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o"
- name: build cri-o
make:
chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o"
- name: install cri-o
make:
target: install
chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o"
- name: install cri-o systemd files
make:
target: install.systemd
chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o"
- name: install cri-o config
make:
target: install.config
chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o"
- name: install configs
copy:
src: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o/{{ item.src }}"
dest: "{{ item.dest }}"
remote_src: yes
with_items:
- src: contrib/cni/10-crio-bridge.conf
dest: /etc/cni/net.d/10-crio-bridge.conf
- src: contrib/cni/99-loopback.conf
dest: /etc/cni/net.d/99-loopback.conf
- src: test/redhat_sigstore.yaml
dest: /etc/containers/registries.d/registry.access.redhat.com.yaml
- name: run with overlay
replace:
regexp: 'storage_driver = ""'
replace: 'storage_driver = "overlay"'
name: /etc/crio/crio.conf
backup: yes
- name: run with systemd cgroup manager
replace:
regexp: 'cgroup_manager = "cgroupfs"'
replace: 'cgroup_manager = "systemd"'
name: /etc/crio/crio.conf
backup: yes
- name: add docker.io default registry
lineinfile:
dest: /etc/crio/crio.conf
line: '"docker.io"'
insertafter: 'registries = \['
regexp: 'docker\.io'
state: present
- name: add overlay storage opts on RHEL/CentOS
lineinfile:
dest: /etc/crio/crio.conf
line: '"overlay.override_kernel_check=1"'
insertafter: 'storage_option = \['
regexp: 'overlay\.override_kernel_check=1'
state: present
when: ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS'

View file

@ -1,26 +0,0 @@
---
- name: clone cri-tools source repo
git:
repo: "https://github.com/kubernetes-incubator/cri-tools.git"
dest: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-tools"
version: "{{ cri_tools_git_version }}"
force: "{{ force_clone | default(False) | bool}}"
- name: install crictl
command: "/usr/bin/go install github.com/kubernetes-incubator/cri-tools/cmd/crictl"
- name: install critest
command: "/usr/bin/go install github.com/kubernetes-incubator/cri-tools/cmd/critest"
- name: link crictl
file:
src: "{{ ansible_env.GOPATH }}/bin/crictl"
dest: /usr/bin/crictl
state: link
- name: link critest
file:
src: "{{ ansible_env.GOPATH }}/bin/critest"
dest: /usr/bin/critest
state: link

View file

@ -1,67 +0,0 @@
---
- name: clone kubernetes source repo
git:
repo: "https://github.com/{{ k8s_github_fork }}/kubernetes.git"
dest: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes"
# based on kube v1.9.0-alpha.2, update as needed
version: "{{ k8s_git_version }}"
force: "{{ force_clone | default(False) | bool}}"
- name: install etcd
command: "hack/install-etcd.sh"
args:
chdir: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes"
- name: build kubernetes
make:
chdir: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes"
- name: Add custom cluster service file for the e2e testing
copy:
dest: /etc/systemd/system/customcluster.service
content: |
[Unit]
After=network-online.target
Wants=network-online.target
[Service]
WorkingDirectory={{ ansible_env.GOPATH }}/src/k8s.io/kubernetes
ExecStart=/usr/local/bin/createcluster.sh
User=root
[Install]
WantedBy=multi-user.target
- name: Add create cluster background script for e2e testing
copy:
dest: /usr/local/bin/createcluster.sh
content: |
#!/bin/bash
export PATH=/usr/local/go/bin:/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/root/bin:{{ ansible_env.GOPATH }}/bin:{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes/third_party/etcd:{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes/_output/local/bin/linux/amd64/
export CONTAINER_RUNTIME=remote
export CGROUP_DRIVER=systemd
export CONTAINER_RUNTIME_ENDPOINT='{{ crio_socket }} --runtime-request-timeout=5m'
export ALLOW_SECURITY_CONTEXT=","
export ALLOW_PRIVILEGED=1
export DNS_SERVER_IP={{ ansible_default_ipv4.address }}
export API_HOST={{ ansible_default_ipv4.address }}
export API_HOST_IP={{ ansible_default_ipv4.address }}
export KUBE_ENABLE_CLUSTER_DNS=true
export ENABLE_HOSTPATH_PROVISIONER=true
export KUBE_ENABLE_CLUSTER_DASHBOARD=true
./hack/local-up-cluster.sh
mode: "u=rwx,g=rwx,o=x"
- name: Set kubernetes_provider to be local
lineinfile:
dest: /etc/environment
line: 'KUBERNETES_PROVIDER=local'
regexp: 'KUBERNETES_PROVIDER='
state: present
- name: Set KUBECONFIG
lineinfile:
dest: /etc/environment
line: 'KUBECONFIG=/var/run/kubernetes/admin.kubeconfig'
regexp: 'KUBECONFIG='
state: present

View file

@ -1,50 +0,0 @@
---
- name: clone plugins source repo
git:
repo: "https://github.com/containernetworking/plugins.git"
dest: "{{ ansible_env.GOPATH }}/src/github.com/containernetworking/plugins"
version: "dcf7368eeab15e2affc6256f0bb1e84dd46a34de"
- name: build plugins
command: "./build.sh"
args:
chdir: "{{ ansible_env.GOPATH }}/src/github.com/containernetworking/plugins"
- name: install plugins
copy:
src: "{{ ansible_env.GOPATH }}/src/github.com/containernetworking/plugins/bin/{{ item }}"
dest: "/opt/cni/bin"
mode: "o=rwx,g=rx,o=rx"
remote_src: yes
with_items:
- bridge
- dhcp
- flannel
- host-local
- ipvlan
- loopback
- macvlan
- ptp
- sample
- tuning
- vlan
- name: clone runcom plugins source repo
git:
repo: "https://github.com/runcom/plugins.git"
dest: "{{ ansible_env.GOPATH }}/src/github.com/containernetworking/plugins"
version: "custom-bridge"
force: yes
- name: build plugins
command: "./build.sh"
args:
chdir: "{{ ansible_env.GOPATH }}/src/github.com/containernetworking/plugins"
- name: install custom bridge
copy:
src: "{{ ansible_env.GOPATH }}/src/github.com/containernetworking/plugins/bin/bridge"
dest: "/opt/cni/bin/bridge-custom"
mode: "o=rwx,g=rx,o=rx"
remote_src: yes

View file

@ -1,23 +0,0 @@
---
- name: clone runc source repo
git:
repo: "https://github.com/opencontainers/runc.git"
dest: "{{ ansible_env.GOPATH }}/src/github.com/opencontainers/runc"
version: "c6e4a1ebeb1a72b529c6f1b6ee2b1ae5b868b14f"
- name: build runc
make:
params: BUILDTAGS="seccomp selinux"
chdir: "{{ ansible_env.GOPATH }}/src/github.com/opencontainers/runc"
- name: install runc
make:
target: "install"
chdir: "{{ ansible_env.GOPATH }}/src/github.com/opencontainers/runc"
- name: link runc
file:
src: /usr/local/sbin/runc
dest: /usr/bin/runc
state: link

View file

@ -1,156 +0,0 @@
'''Plugin to override the default output logic.'''
# upstream: https://gist.github.com/cliffano/9868180
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# For some reason this has to be done
import imp
import os
ANSIBLE_PATH = imp.find_module('ansible')[1]
DEFAULT_PATH = os.path.join(ANSIBLE_PATH, 'plugins/callback/default.py')
DEFAULT_MODULE = imp.load_source(
'ansible.plugins.callback.default',
DEFAULT_PATH
)
try:
from ansible.plugins.callback import CallbackBase
BASECLASS = CallbackBase
except ImportError: # < ansible 2.1
BASECLASS = DEFAULT_MODULE.CallbackModule
class CallbackModule(DEFAULT_MODULE.CallbackModule): # pylint: disable=too-few-public-methods,no-init
'''
Override for the default callback module.
Render std err/out outside of the rest of the result which it prints with
indentation.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'default'
def __init__(self, *args, **kwargs):
# pylint: disable=non-parent-init-called
BASECLASS.__init__(self, *args, **kwargs)
self.failed_task = []
self.result_file = os.environ.get('AHT_RESULT_FILE')
def _dump_results(self, result):
'''Return the text to output for a result.'''
result['_ansible_verbose_always'] = True
save = {}
for key in ['stdout', 'stdout_lines', 'stderr', 'stderr_lines', 'msg']:
if key in result:
save[key] = result.pop(key)
output = BASECLASS._dump_results(self, result) # pylint: disable=protected-access
for key in ['stdout', 'stderr', 'msg']:
if key in save and save[key]:
output += '\n\n%s:\n---\n%s\n---' % (key.upper(), save[key])
for key, value in save.items():
result[key] = value
return output
def v2_runner_on_unreachable(self, result):
self.failed_task = result
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_UNREACHABLE)
else:
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_UNREACHABLE)
def v2_runner_on_failed(self,result, ignore_errors=False):
if ignore_errors is not True:
# Sets environment variable for test failures for use in playboks.
# Handlers tasks can conditionalize themselves using this variable
# to run only on failure.
os.environ["AHT_FAILURE"] = "1"
# Save last failure
self.failed_task = result
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if 'exception' in result._result:
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = result._result['exception'].strip().split('\n')[-1]
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
self._display.display(msg, color=C.COLOR_ERROR)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
if delegated_vars:
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR)
else:
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR)
if ignore_errors:
self._display.display("...ignoring", color=C.COLOR_SKIP)
def v2_playbook_on_stats(self, stats):
self._display.banner("PLAY RECAP")
hosts = sorted(stats.processed.keys())
for h in hosts:
t = stats.summarize(h)
self._display.display(u"%s : %s %s %s %s" % (
hostcolor(h, t),
colorize(u'ok', t['ok'], C.COLOR_OK),
colorize(u'changed', t['changed'], C.COLOR_CHANGED),
colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
colorize(u'failed', t['failures'], C.COLOR_ERROR)),
screen_only=True
)
self._display.display(u"%s : %s %s %s %s" % (
hostcolor(h, t, False),
colorize(u'ok', t['ok'], None),
colorize(u'changed', t['changed'], None),
colorize(u'unreachable', t['unreachable'], None),
colorize(u'failed', t['failures'], None)),
log_only=True
)
self._display.display("", screen_only=True)
# Save result to file if environment variable exists
if self.result_file is not None:
if self.failed_task:
with open(self.result_file, 'w') as f:
f.write("PLAY: %s\n%s\n%s" % (self._play, \
self.failed_task._task, \
self._dump_results(self.failed_task._result)))
else:
open(self.result_file, 'w').close()

View file

@ -1,45 +0,0 @@
---
- name: enable and start CRI-O
systemd:
name: crio
state: started
enabled: yes
daemon_reload: yes
- name: Flush the iptables
command: iptables -F
- name: Enable localnet routing
command: sysctl -w net.ipv4.conf.all.route_localnet=1
- name: Add masquerade for localhost
command: iptables -t nat -I POSTROUTING -s 127.0.0.1 ! -d 127.0.0.1 -j MASQUERADE
- name: run critest validation
shell: "critest -c --runtime-endpoint /var/run/crio/crio.sock --image-endpoint /var/run/crio/crio.sock v"
args:
chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o"
async: 5400
poll: 30
when: ansible_distribution not in ['RedHat', 'CentOS']
# XXX: RHEL has an additional test which fails because of selinux but disabling
# it doesn't solve the issue.
# TODO(runcom): enable skipped tests once we fix them (selinux)
# https://bugzilla.redhat.com/show_bug.cgi?id=1414236
# https://access.redhat.com/solutions/2897781
- name: run critest validation
shell: "critest -c --runtime-endpoint /var/run/crio/crio.sock --image-endpoint /var/run/crio/crio.sock -s 'should not allow privilege escalation when true' v"
args:
chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o"
async: 5400
poll: 30
when: ansible_distribution in ['RedHat', 'CentOS']
- name: run critest benchmarks
shell: "critest -c --runtime-endpoint /var/run/crio/crio.sock --image-endpoint /var/run/crio/crio.sock b"
args:
chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o"
async: 5400
poll: 30

View file

@ -1,58 +0,0 @@
---
- name: enable and start CRI-O
systemd:
name: crio
state: started
enabled: yes
daemon_reload: yes
- name: update the server address for the custom cluster
lineinfile:
dest: /usr/local/bin/createcluster.sh
line: "export {{ item }}={{ ansible_default_ipv4.address }}"
regexp: "^export {{ item }}="
state: present
with_items:
- DNS_SERVER_IP
- API_HOST
- API_HOST_IP
- name: enable and start the custom cluster
systemd:
name: customcluster.service
state: started
enabled: yes
daemon_reload: yes
- name: wait for the cluster to be running
command: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes/_output/bin/kubectl get service kubernetes --namespace default"
register: kube_poll
until: kube_poll | succeeded
retries: 100
delay: 30
- name: ensure directory exists for e2e reports
file:
path: "{{ artifacts }}"
state: directory
# TODO remove the last test skipped once https://github.com/kubernetes-incubator/cri-o/pull/1217 is merged
- name: Buffer the e2e testing command to workaround Ansible YAML folding "feature"
set_fact:
e2e_shell_cmd: >
/usr/bin/go run hack/e2e.go
--test
--test_args="-host=https://{{ ansible_default_ipv4.address }}:6443
--ginkgo.skip=\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|PersistentVolumes|\[HPA\]|should.support.building.a.client.with.a.CSR|should.support.inline.execution.and.attach
--report-dir={{ artifacts }}"
&> {{ artifacts }}/e2e.log
# Fix vim syntax hilighting: "
- name: disable SELinux
command: setenforce 0
- name: run e2e tests
shell: "{{ e2e_shell_cmd | regex_replace('\\s+', ' ') }}"
args:
chdir: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes"

View file

@ -1,55 +0,0 @@
---
- name: ensure Golang dir is empty first
file:
path: /usr/local/go
state: absent
- name: fetch Golang
unarchive:
remote_src: yes
src: "https://storage.googleapis.com/golang/go{{ version }}.linux-amd64.tar.gz"
dest: /usr/local
- name: link go toolchain
file:
src: "/usr/local/go/bin/{{ item }}"
dest: "/usr/bin/{{ item }}"
state: link
with_items:
- go
- gofmt
- godoc
- name: ensure user profile exists
file:
path: "{{ ansible_user_dir }}/.profile"
state: touch
- name: set up PATH for Go toolchain and built binaries
lineinfile:
dest: "{{ ansible_user_dir }}/.profile"
line: 'PATH={{ ansible_env.PATH }}:{{ ansible_env.GOPATH }}/bin:/usr/local/go/bin'
regexp: '^PATH='
state: present
- name: set up directories
file:
path: "{{ item }}"
state: directory
with_items:
- "{{ ansible_env.GOPATH }}/src/github.com/containernetworking"
- "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator"
- "{{ ansible_env.GOPATH }}/src/github.com/k8s.io"
- "{{ ansible_env.GOPATH }}/src/github.com/sstephenson"
- "{{ ansible_env.GOPATH }}/src/github.com/opencontainers"
- name: install Go tools and dependencies
shell: /usr/bin/go get -u "github.com/{{ item }}"
with_items:
- tools/godep
- onsi/ginkgo/ginkgo
- onsi/gomega
- cloudflare/cfssl/cmd/...
- jteeuwen/go-bindata/go-bindata
- cpuguy83/go-md2man

View file

@ -1,125 +0,0 @@
- hosts: all
remote_user: root
vars_files:
- "{{ playbook_dir }}/vars.yml"
tags:
- setup
tasks:
- name: set up the system
include: system.yml
- name: install Golang tools
include: golang.yml
vars:
version: "1.8.5"
- name: clone build and install bats
include: "build/bats.yml"
- name: clone build and install cri-tools
include: "build/cri-tools.yml"
vars:
cri_tools_git_version: "b42fc3f364dd48f649d55926c34492beeb9b2e99"
- name: clone build and install kubernetes
include: "build/kubernetes.yml"
vars:
k8s_git_version: "cri-o-node-e2e-patched-logs"
k8s_github_fork: "runcom"
crio_socket: "/var/run/crio.sock"
- name: clone build and install runc
include: "build/runc.yml"
- name: clone build and install networking plugins
include: "build/plugins.yml"
- hosts: all
remote_user: root
vars_files:
- "{{ playbook_dir }}/vars.yml"
tags:
- integration
- e2e
- node-e2e
- critest
tasks:
- name: clone build and install cri-o
include: "build/cri-o.yml"
- hosts: all
remote_user: root
vars_files:
- "{{ playbook_dir }}/vars.yml"
tags:
- integration
tasks:
- name: clone build and install cri-tools
include: "build/cri-tools.yml"
vars:
force_clone: True
cri_tools_git_version: "a9e38a4a000bc1a4052fb33de1c967b8cfe9ad40"
- name: run cri-o integration tests
include: test.yml
- hosts: all
remote_user: root
vars_files:
- "{{ playbook_dir }}/vars.yml"
tags:
- critest
tasks:
- name: install Golang tools
include: golang.yml
vars:
version: "1.9.2"
- name: setup critest
include: "build/cri-tools.yml"
vars:
force_clone: True
cri_tools_git_version: "a9e38a4a000bc1a4052fb33de1c967b8cfe9ad40"
- name: run critest validation and benchmarks
include: critest.yml
- hosts: all
remote_user: root
vars_files:
- "{{ playbook_dir }}/vars.yml"
tags:
- node-e2e
tasks:
- name: install Golang tools
include: golang.yml
vars:
version: "1.9.2"
- name: clone build and install kubernetes
include: "build/kubernetes.yml"
vars:
force_clone: True
k8s_git_version: "master"
k8s_github_fork: "kubernetes"
crio_socket: "/var/run/crio/crio.sock"
- name: run k8s node-e2e tests
include: node-e2e.yml
- hosts: all
remote_user: root
vars_files:
- "{{ playbook_dir }}/vars.yml"
tags:
- e2e
tasks:
- name: install Golang tools
include: golang.yml
vars:
version: "1.9.2"
- name: clone build and install kubernetes
include: "build/kubernetes.yml"
vars:
force_clone: True
# master as of 12/11/2017
k8s_git_version: "master-nfs-fix"
k8s_github_fork: "runcom"
crio_socket: "/var/run/crio/crio.sock"
- name: run k8s e2e tests
include: e2e.yml

View file

@ -1,26 +0,0 @@
---
- name: enable and start CRI-O
systemd:
name: crio
state: started
enabled: yes
daemon_reload: yes
- name: disable SELinux
command: setenforce 0
- name: Flush the iptables
command: iptables -F
- name: run node-e2e tests
shell: |
# parametrize crio socket
# cgroup-driver???
# TODO(runcom): remove conformance focus, we want everything for testgrid
make test-e2e-node PARALLELISM=1 RUNTIME=remote CONTAINER_RUNTIME_ENDPOINT=/var/run/crio.sock IMAGE_SERVICE_ENDPOINT=/var/run/crio/crio.sock TEST_ARGS='--prepull-images=true --kubelet-flags="--cgroup-driver=systemd"' FOCUS="\[Conformance\]" &> {{ artifacts }}/node-e2e.log
args:
chdir: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes"
async: 7200
poll: 10
ignore_errors: true

View file

@ -1,62 +0,0 @@
---
# vim-syntax: ansible
- hosts: '{{ hosts | default("all") }}'
vars_files:
- "{{ playbook_dir }}/vars.yml"
vars:
_result_filepaths: [] # do not use
_dstfnbuff: [] # do not use
tasks:
- name: The crio_integration_filepath is required
tags:
- integration
set_fact:
_result_filepaths: "{{ _result_filepaths + [crio_integration_filepath] }}"
- name: The crio_node_e2e_filepath is required
tags:
- e2e
set_fact:
_result_filepaths: "{{ _result_filepaths + [crio_node_e2e_filepath] }}"
- name: Verify expectations
assert:
that:
- 'result_dest_basedir | default(False, True)'
- '_result_filepaths | default(False, True)'
- '_dstfnbuff == []'
- 'results_fetched is undefined'
- name: Results directory exists
file:
path: "{{ result_dest_basedir }}"
state: directory
delegate_to: localhost
- name: destination file paths are buffered for overwrite-checking and jUnit conversion
set_fact:
_dstfnbuff: >
{{ _dstfnbuff |
union( [result_dest_basedir ~ "/" ~ inventory_hostname ~ "/" ~ item | basename] ) }}
with_items: '{{ _result_filepaths }}'
- name: Overwriting existing results assumed very very bad
fail:
msg: "Cowardly refusing to overwrite {{ item }}"
when: item | exists
delegate_to: localhost
with_items: '{{ _dstfnbuff }}'
# fetch module doesn't support directories
- name: Retrieve results from all hosts
synchronize:
checksum: True # Don't rely on date/time being in sync
archive: False # Don't bother with permissions or times
copy_links: True # We want files, not links to files
recursive: True
mode: pull
dest: '{{ result_dest_basedir }}/{{ inventory_hostname }}/' # must end in /
src: '{{ item }}'
register: results_fetched
with_items: '{{ _result_filepaths }}'

View file

@ -1,134 +0,0 @@
---
- name: Make sure we have all required packages
package:
name: "{{ item }}"
state: present
with_items:
- atomic-registries
- container-selinux
- curl
- device-mapper-devel
- expect
- findutils
- gcc
- git
- glib2-devel
- glibc-devel
- glibc-static
- gpgme-devel
- hostname
- iproute
- iptables
- krb5-workstation
- libassuan-devel
- libffi-devel
- libgpg-error-devel
- libguestfs-tools
- libseccomp-devel
- libvirt-client
- libvirt-python
- libxml2-devel
- libxslt-devel
- make
- mlocate
- nfs-utils
- nmap-ncat
- oci-register-machine
- oci-systemd-hook
- oci-umount
- openssl
- openssl-devel
- ostree-devel
- pkgconfig
- python
- python2-crypto
- python-devel
- python-rhsm-certificates
- python-virtualenv
- PyYAML
- redhat-rpm-config
- rpcbind
- rsync
- sed
- skopeo-containers
- socat
- tar
- wget
async: 600
poll: 10
- name: Add python2-boto for Fedora
package:
name: "{{ item }}"
state: present
with_items:
- python2-boto
when: ansible_distribution in ['Fedora']
- name: Add python-boto for RHEL and CentOS
package:
name: "{{ item }}"
state: present
with_items:
- python-boto
when: ansible_distribution in ['RedHat', 'CentOS']
- name: Add Btrfs for Fedora
package:
name: "{{ item }}"
state: present
with_items:
- btrfs-progs-devel
when: ansible_distribution in ['Fedora']
- name: Update all packages
package:
name: '*'
state: latest
async: 600
poll: 10
- name: Setup swap to prevent kernel firing off the OOM killer
shell: |
truncate -s 8G /root/swap && \
export SWAPDEV=$(losetup --show -f /root/swap | head -1) && \
mkswap $SWAPDEV && \
swapon $SWAPDEV && \
swapon --show
- name: ensure directories exist as needed
file:
path: "{{ item }}"
state: directory
with_items:
- /opt/cni/bin
- /etc/cni/net.d
- name: set sysctl vm.overcommit_memory=1 for CentOS
sysctl:
name: vm.overcommit_memory
state: present
value: 1
when: ansible_distribution == 'CentOS'
- name: inject hostname into /etc/hosts
lineinfile:
dest: /etc/hosts
line: '{{ ansible_default_ipv4.address }} {{ ansible_nodename }}'
insertafter: 'EOF'
regexp: '{{ ansible_default_ipv4.address }}\s+{{ ansible_nodename }}'
state: present
- name: Flush the iptables
command: iptables -F
- name: Enable localnet routing
command: sysctl -w net.ipv4.conf.all.route_localnet=1
- name: Add masquerade for localhost
command: iptables -t nat -I POSTROUTING -s 127.0.0.1 ! -d 127.0.0.1 -j MASQUERADE
- name: Update the kernel cmdline to include quota support
command: grubby --update-kernel=ALL --args="rootflags=pquota"
when: ansible_distribution in ['RedHat', 'CentOS']

View file

@ -1,25 +0,0 @@
---
- name: Make testing output verbose so it can be converted to xunit
lineinfile:
dest: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes/hack/make-rules/test.sh"
line: ' go test -v "${goflags[@]:+${goflags[@]}}" \'
regexp: ' go test \"\$'
state: present
- name: set extra storage options
set_fact:
extra_storage_opts: " --storage-opt overlay.override_kernel_check=1"
when: ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS'
- name: ensure directory exists for e2e reports
file:
path: "{{ artifacts }}"
state: directory
- name: run integration tests
shell: "CGROUP_MANAGER=cgroupfs STORAGE_OPTIONS='--storage-driver=overlay{{ extra_storage_opts | default('') }}' make localintegration >& {{ artifacts }}/testout.txt"
args:
chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o"
async: 5400
poll: 30

View file

@ -1,8 +0,0 @@
---
# For results.yml Paths use rsync 'source' conventions
artifacts: "/tmp/artifacts" # Base-directory for collection
crio_integration_filepath: "{{ artifacts }}/testout.txt"
crio_node_e2e_filepath: "{{ artifacts }}/junit_01.xml"
result_dest_basedir: '{{ lookup("env","WORKSPACE") |
default(playbook_dir, True) }}/artifacts'

View file

@ -1,54 +0,0 @@
# Pip requirements file for Ansible-based integration-testing environment.
# Intended to be utilized by venv-ansible-playbook.sh script
#
# N/B: Hashes are required here | versions frozen for stability
ansible==2.3.1.0 --hash=sha256:cd4b8f53720fcd0c351156b840fdd15ecfbec22c951b5406ec503de49d40b9f5
asn1crypto==0.22.0 --hash=sha256:d232509fefcfcdb9a331f37e9c9dc20441019ad927c7d2176cf18ed5da0ba097 \
--hash=sha256:cbbadd640d3165ab24b06ef25d1dca09a3441611ac15f6a6b452474fdf0aed1a
bcrypt==3.1.3 --hash=sha256:05b35b9842b009b44496fa5433ce462f69966291e50fbd471dbb427f399f748f \
--hash=sha256:6645c8d0ad845308de3eb9be98b6fd22a46ec5412bfc664a423e411cdd8f5488
cffi==1.10.0 --hash=sha256:c49187260043bd4c1d6a52186f9774f17d9b1da0a406798ebf4bfc12da166ade \
--hash=sha256:b3b02911eb1f6ada203b0763ba924234629b51586f72a21faacc638269f4ced5
cryptography==1.9 --hash=sha256:5518337022718029e367d982642f3e3523541e098ad671672a90b82474c84882
enum34==1.1.6 --hash=sha256:6bd0f6ad48ec2aa117d3d141940d484deccda84d4fcd884f5c3d93c23ecd8c79 \
--hash=sha256:8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1
idna==2.5 --hash=sha256:cc19709fd6d0cbfed39ea875d29ba6d4e22c0cebc510a76d6302a28385e8bb70 \
--hash=sha256:3cb5ce08046c4e3a560fc02f138d0ac63e00f8ce5901a56b32ec8b7994082aab
ipaddress==1.0.18 --hash=sha256:d34cf15d95ce9a734560f7400a8bd2ac2606f378e2a1d0eadbf1c98707e7c74a \
--hash=sha256:5d8534c8e185f2d8a1fda1ef73f2c8f4b23264e8e30063feeb9511d492a413e1
Jinja2==2.9.6 --hash=sha256:2231bace0dfd8d2bf1e5d7e41239c06c9e0ded46e70cc1094a0aa64b0afeb054 \
--hash=sha256:ddaa01a212cd6d641401cb01b605f4a4d9f37bfc93043d7f760ec70fb99ff9ff
MarkupSafe==1.0 --hash=sha256:a6be69091dac236ea9c6bc7d012beab42010fa914c459791d627dad4910eb665
paramiko==2.2.1 --hash=sha256:9c9402377ba8594889aab1e44a13b78eda685eb2145dc00b2353b4fbb25088cf \
--hash=sha256:ff94ae65379914ec3c960de731381f49092057b6dd1d24d18842ead5a2eb2277
pyasn1==0.2.3 --hash=sha256:0439b9bd518418260c2641a571f0e07fce4370cab13b68f19b5e023306c03cad \
--hash=sha256:738c4ebd88a718e700ee35c8d129acce2286542daa80a82823a7073644f706ad
pycparser==2.17 --hash=sha256:0aac31e917c24cb3357f5a4d5566f2cc91a19ca41862f6c3c22dc60a629673b6
pycrypto==2.6.1 --hash=sha256:f2ce1e989b272cfcb677616763e0a2e7ec659effa67a88aa92b3a65528f60a3c
PyNaCl==1.1.2 --hash=sha256:57314a7bad4bd39501dc622942f9921923673e52e126b0fc4f0214b5d25d619a \
--hash=sha256:32f52b754abf07c319c04ce16905109cab44b0e7f7c79497431d3b2000f8af8c
PyYAML==3.12 --hash=sha256:592766c6303207a20efc445587778322d7f73b161bd994f227adaa341ba212ab
six==1.10.0 --hash=sha256:0ff78c403d9bccf5a425a6d31a12aa6b47f1c21ca4dc2573a7e2f32a97335eb1 \
--hash=sha256:105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a
virtualenv==15.1.0 --hash=sha256:39d88b533b422825d644087a21e78c45cf5af0ef7a99a1fc9fbb7b481e5c85b0 \
--hash=sha256:02f8102c2436bb03b3ee6dede1919d1dac8a427541652e5ec95171ec8adbc93a
pip==9.0.1 --hash=sha256:690b762c0a8460c303c089d5d0be034fb15a5ea2b75bdf565f40421f542fefb0

View file

@ -1,106 +0,0 @@
#!/bin/bash
# example usage
# $ ./venv-ansible-playbook.sh \
# -i 192.168.169.170 \
# --private-key=/path/to/key \
# --extra-vars "pullrequest=42" \
# --extra-vars "commit=abcd1234" \
# --user root \
# --verbose \
# $PWD/crio-integration-playbook.yaml
# All errors are fatal
set -e
SCRIPT_PATH=`realpath $(dirname $0)`
REQUIREMENTS="$SCRIPT_PATH/requirements.txt"
echo
if ! type -P virtualenv &> /dev/null
then
echo "Could not find required 'virtualenv' binary installed on system."
exit 1
fi
if [ "$#" -lt "1" ]
then
echo "No ansible-playbook command-line options specified."
echo "usage: $0 -i whatever --private-key=something --extra-vars foo=bar playbook.yml"
exit 2
fi
# Avoid dirtying up repository, keep execution bits confined to a known location
if [ -z "$WORKSPACE" ] || [ ! -d "$WORKSPACE" ]
then
export WORKSPACE="$(mktemp -d)"
echo "Using temporary \$WORKSPACE=\"$WORKSPACE\" for execution environment."
echo "Directory will be removed upon exit. Export this variable with path"
echo "to an existing directory to preserve contents."
trap 'rm -rf "$WORKSPACE"' EXIT
else
echo "Using existing \$WORKSPACE=\"$WORKSPACE\" for execution environment."
echo "Directory will be left as-is upon exit."
# Don't recycle cache, next job may have different requirements
trap 'rm -rf "$PIPCACHE"' EXIT
fi
# Create a directory to contain logs and test artifacts
export ARTIFACTS=$(mkdir -pv $WORKSPACE/artifacts | tail -1 | cut -d \' -f 2)
[ -d "$ARTIFACTS" ] || exit 3
# All command failures from now on are fatal
set -e
echo
echo "Bootstrapping trusted virtual environment, this may take a few minutes, depending on networking."
echo "(logs: \"$ARTIFACTS/crio_venv_setup_log.txt\")"
echo
(
set -x
cd "$WORKSPACE"
# When running more than once, make it fast by skipping the bootstrap
if [ ! -d "./.cri-o_venv" ]; then
# N/B: local system's virtualenv binary - uncontrolled version fixed below
virtualenv --no-site-packages --python=python2.7 ./.venvbootstrap
# Set up paths to install/operate out of $WORKSPACE/.venvbootstrap
source ./.venvbootstrap/bin/activate
# N/B: local system's pip binary - uncontrolled version fixed below
# pip may not support --cache-dir, force it's location into $WORKSPACE the ugly-way
OLD_HOME="$HOME"
export HOME="$WORKSPACE"
export PIPCACHE="$WORKSPACE/.cache/pip"
pip install --force-reinstall --upgrade pip==9.0.1
# Undo --cache-dir workaround
export HOME="$OLD_HOME"
# Install fixed, trusted, hashed versions of all requirements (including pip and virtualenv)
pip --cache-dir="$PIPCACHE" install --require-hashes \
--requirement "$SCRIPT_PATH/requirements.txt"
# Setup trusted virtualenv using hashed binary from requirements.txt
./.venvbootstrap/bin/virtualenv --no-site-packages --python=python2.7 ./.cri-o_venv
# Exit untrusted virtualenv
deactivate
fi
# Enter trusted virtualenv
source ./.cri-o_venv/bin/activate
# Upgrade stock-pip to support hashes
pip install --force-reinstall --cache-dir="$PIPCACHE" --upgrade pip==9.0.1
# Re-install from cache but validate all hashes (including on pip itself)
pip --cache-dir="$PIPCACHE" install --require-hashes \
--requirement "$SCRIPT_PATH/requirements.txt"
# Remove temporary bootstrap virtualenv
rm -rf ./.venvbootstrap
# Exit trusted virtualenv
) &> $ARTIFACTS/crio_venv_setup_log.txt;
echo
echo "Executing \"$WORKSPACE/.cri-o_venv/bin/ansible-playbook $@\""
echo
# Execute command-line arguments under virtualenv
source ${WORKSPACE}/.cri-o_venv/bin/activate
${WORKSPACE}/.cri-o_venv/bin/ansible-playbook $@

View file

@ -1 +0,0 @@
runtime-endpoint: /var/run/crio/crio.sock

View file

@ -1,8 +0,0 @@
# This contains a list of paths on host which will be unmounted inside
# container. (If they are mounted inside container).
# If there is a "/*" at the end, that means only mounts underneath that
# mounts (submounts) will be unmounted but top level mount will remain
# in place.
/var/run/containers/*
/var/lib/containers/storage/*

View file

@ -1,142 +0,0 @@
% crio(8) Open Container Initiative Daemon
% Dan Walsh
% SEPTEMBER 2016
# NAME
crio - OCI Kubernetes Container Runtime daemon
# SYNOPSIS
crio
```
[--apparmor-profile=[value]]
[--cgroup-manager=[value]]
[--cni-config-dir=[value]]
[--cni-plugin-dir=[value]]
[--config=[value]]
[--conmon=[value]]
[--cpu-profile=[value]]
[--default-transport=[value]]
[--help|-h]
[--insecure-registry=[value]]
[--listen=[value]]
[--log=[value]]
[--log-format value]
[--log-level value]
[--pause-command=[value]]
[--pause-image=[value]]
[--registry=[value]]
[--root=[value]]
[--runroot=[value]]
[--runtime=[value]]
[--seccomp-profile=[value]]
[--selinux]
[--signature-policy=[value]]
[--storage-driver=[value]]
[--storage-opt=[value]]
[--version|-v]
```
# DESCRIPTION
OCI-based implementation of Kubernetes Container Runtime Interface Daemon
crio is meant to provide an integration path between OCI conformant runtimes and the kubelet. Specifically, it implements the Kubelet Container Runtime Interface (CRI) using OCI conformant runtimes. The scope of crio is tied to the scope of the CRI.
1. Support multiple image formats including the existing Docker image format.
2. Support for multiple means to download images including trust & image verification.
3. Container image management (managing image layers, overlay filesystems, etc).
4. Container process lifecycle management.
5. Monitoring and logging required to satisfy the CRI.
6. Resource isolation as required by the CRI.
**Usage**:
```
crio [GLOBAL OPTIONS]
crio [GLOBAL OPTIONS] config [OPTIONS]
```
# GLOBAL OPTIONS
**--apparmor_profile**="": Name of the apparmor profile to be used as the runtime's default (default: "crio-default")
**--cgroup-manager**="": cgroup manager (cgroupfs or systemd)
**--config**="": path to configuration file
**--conmon**="": path to the conmon executable (default: "/usr/local/libexec/crio/conmon")
**--cpu-profile**="": set the CPU profile file path
**--default-transport**: A prefix to prepend to image names that can't be pulled as-is.
**--help, -h**: Print usage statement
**--insecure-registry=**: Enable insecure registry communication, i.e., enable un-encrypted and/or untrusted communication.
1. List of insecure registries can contain an element with CIDR notation to specify a whole subnet.
2. Insecure registries accept HTTP or accept HTTPS with certificates from unknown CAs.
3. Enabling `--insecure-registry` is useful when running a local registry. However, because its use creates security vulnerabilities, **it should ONLY be enabled for testing purposes**. For increased security, users should add their CA to their system's list of trusted CAs instead of using `--insecure-registry`.
**--image-volumes**="": Image volume handling ('mkdir', 'bind' or 'ignore') (default: "mkdir")
1. mkdir: A directory is created inside the container root filesystem for the volumes.
2. bind: A directory is created inside container state directory and bind mounted into the container for the volumes.
3. ignore: All volumes are just ignored and no action is taken.
**--listen**="": Path to CRI-O socket (default: "/var/run/crio/crio.sock")
**--log**="": Set the log file path where internal debug information is written
**--log-format**="": Set the format used by logs ('text' (default), or 'json') (default: "text")
**--log-level**="": log crio messages above specified level: debug, info (default), warn, error, fatal or panic
**--log-size-max**="": Maximum log size in bytes for a container (default: -1 (no limit)). If it is positive, it must be >= 8192 (to match/exceed conmon read buffer).
**--pause-command**="": Path to the pause executable in the pause image (default: "/pause")
**--pause-image**="": Image which contains the pause executable (default: "kubernetes/pause")
**--pids-limit**="": Maximum number of processes allowed in a container (default: 1024)
**--enable-shared-pid-namespace**="": Enable using a shared PID namespace for containers in a pod (default: false)
**--root**="": The crio root dir (default: "/var/lib/containers/storage")
**--registry**="": Registry host which will be prepended to unqualified images, can be specified multiple times
**--runroot**="": The crio state dir (default: "/var/run/containers/storage")
**--runtime**="": OCI runtime path (default: "/usr/bin/runc")
**--selinux**=**true**|**false**: Enable selinux support (default: false)
**--seccomp-profile**="": Path to the seccomp json profile to be used as the runtime's default (default: "/etc/crio/seccomp.json")
**--signature-policy**="": Path to the signature policy json file (default: "", to use the system-wide default)
**--storage-driver**: OCI storage driver (default: "devicemapper")
**--storage-opt**: OCI storage driver option (no default)
**--cni-config-dir**="": CNI configuration files directory (default: "/etc/cni/net.d/")
**--cni-plugin-dir**="": CNI plugin binaries directory (default: "/opt/cni/bin/")
**--cpu-profile**: Set the CPU profile file path
**--version, -v**: Print the version
# COMMANDS
CRI-O's default command is to start the daemon. However, it currently offers a
single additional subcommand.
## config
Outputs a commented version of the configuration file that would've been used
by CRI-O. This allows you to save you current configuration setup and then load
it later with **--config**. Global options will modify the output.
**--default**
Output the default configuration (without taking into account any configuration options).
# SEE ALSO
crio.conf(5)
# HISTORY
Sept 2016, Originally compiled by Dan Walsh <dwalsh@redhat.com> and Aleksa Sarai <asarai@suse.de>

View file

@ -1,162 +0,0 @@
% crio.conf(5) Open Container Initiative Daemon
% Aleksa Sarai
% OCTOBER 2016
# NAME
crio.conf - CRI-O configuration file
# DESCRIPTION
The CRI-O configuration file specifies all of the available command-line options
for the crio(8) program, but in a TOML format that can be more easily modified
and versioned.
# FORMAT
The [TOML format][toml] is used as the encoding of the configuration file.
Every option and subtable listed here is nested under a global "crio" table.
No bare options are used. The format of TOML can be simplified to:
[table]
option = value
[table.subtable1]
option = value
[table.subtable2]
option = value
## CRIO TABLE
The `crio` table supports the following options:
**root**=""
CRIO root dir (default: "/var/lib/containers/storage")
**runroot**=""
CRIO state dir (default: "/var/run/containers/storage")
**storage_driver**=""
CRIO storage driver (default is "overlay")
Note:
**overlay** and **overlay2** are the same driver
**storage_option**=[]
CRIO storage driver option list (no default)
Values:
"STORAGE_DRIVER.imagestore=/PATH",
Paths to additional container image stores. These are read/only and are usually stored on remote network shares, based on overlay storage format.
storage_option=[ "overlay.imagestore=/mnt/overlay", ]
"STORAGE_DRIVER.size=SIZE"
Maximum size of a container image. Default is 10GB. The size flag sets quota on the size of container images.
storage_option=[ "overlay.size=1G", ]
Note: Not all drivers support all options.
Note: In order to use the **size** option for quota on *overlay* storage you must use the *xfs* file system. The mount point that the *overlay* file system must be setup with the *pquota* flag at mount time. If you are setting up / to be used with quota, you have to modify the linux boot line in /etc/grubq2.conf and add the rootflags=pquota flag.
Example:
linux16 /vmlinuz-4.12.13-300.fc26.x86_64 root=/dev/mapper/fedora-root ro rd.lvm.lv=fedora/root rd.lvm.lv=fedora/swap rhgb quiet LANG=en_US.UTF-8 rootflags=pquota
## CRIO.API TABLE
**listen**=""
Path to crio socket (default: "/var/run/crio/crio.sock")
## CRIO.RUNTIME TABLE
**conmon**=""
Path to the conmon executable (default: "/usr/local/libexec/crio/conmon")
**conmon_env**=[]
Environment variable list for conmon process (default: ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",])
**log_size_max**=""
Maximum sized allowed for the container log file (default: -1)
Negative numbers indicate that no size limit is imposed.
If it is positive, it must be >= 8192 (to match/exceed conmon read buffer).
The file is truncated and re-opened so the limit is never exceeded.
**pids_limit**=""
Maximum number of processes allowed in a container (default: 1024)
**enable_shared_pid_namespace**=""
Enable using a shared PID namespace for containers in a pod (default: false)
**runtime**=""
OCI runtime path (default: "/usr/bin/runc")
**selinux**=*true*|*false*
Enable selinux support (default: false)
**signature_policy**=""
Path to the signature policy json file (default: "", to use the system-wide default)
**seccomp_profile**=""
Path to the seccomp json profile to be used as the runtime's default (default: "/etc/crio/seccomp.json")
**apparmor_profile**=""
Name of the apparmor profile to be used as the runtime's default (default: "crio-default")
**no_pivot**=*true*|*false*
Instructs the runtime to not use pivot_root, but instead use MS_MOVE
**default_mounts**=[]
List of mount points, in the form host:container, to be mounted in every container
## CRIO.IMAGE TABLE
**default_transport**
A prefix to prepend to image names that can't be pulled as-is (default: "docker://")
**image_volumes**=""
Image volume handling ('mkdir', 'bind' or 'ignore') (default: "mkdir")
mkdir: A directory is created inside the container root filesystem for the volumes.
bind: A directory is created inside container state directory and bind mounted into
the container for the volumes.
ignore: All volumes are just ignored and no action is taken.
**insecure_registries**=""
Enable insecure registry communication, i.e., enable un-encrypted
and/or untrusted communication.
List of insecure registries can contain an element with CIDR notation
to specify a whole subnet. Insecure registries accept HTTP and/or
accept HTTPS with certificates from unknown CAs.
Enabling --insecure-registry is useful when running a local registry.
However, because its use creates security vulnerabilities it should
ONLY be enabled for testing purposes. For increased security, users
should add their CA to their system's list of trusted CAs instead of
using --insecure-registry.
**pause_command**=""
Path to the pause executable in the pause image (default: "/pause")
**pause_image**=""
Image which contains the pause executable (default: "kubernetes/pause")
**registries**=""
Comma separated list of registries that will be prepended when pulling
unqualified images
## CRIO.NETWORK TABLE
**network_dir**=""
Path to CNI configuration files (default: "/etc/cni/net.d/")
**plugin_dir**=""
Path to CNI plugin binaries (default: "/opt/cni/bin/")
# SEE ALSO
crio(8)
# HISTORY
Oct 2016, Originally compiled by Aleksa Sarai <asarai@suse.de>

42
docs/kpod-launch.1.md Normal file
View file

@ -0,0 +1,42 @@
% kpod(8) # kpod-launch - Simple management tool for pods and images
% Dan Walsh
% SEPTEMBER 2016
# NAME
kpod-launch - Launch a new pod
# SYNOPSIS
**kpod launch**
[**--help**|**-h**]
# DESCRIPTION
Launch a container process in a new pod. **kpod launch** starts a process with
its own file system, its own networking, and its own isolated process tree.
The IMAGE which starts the process may define defaults related to the process
that will be launch in the pod, the networking to expose, and more, but
**kpod launch** gives final control to the operator or administrator who
starts the pod from the image. For that reason **kpod launch** has more
options than any other kpod commands.
If the IMAGE is not already loaded then **kpod launch** will pull the IMAGE, and
all image dependencies, from the repository in the same way launching **kpod
pull** IMAGE, before it starts the container from that image.
**kpod [GLOBAL OPTIONS]**
**kpod [GLOBAL OPTIONS] launch [OPTIONS]**
# GLOBAL OPTIONS
**--help, -h**
Print usage statement
# COMMANDS
## launch
Launch a pod
# SEE ALSO
kpod(1), ocid(8), ocid.conf(5)
# HISTORY
Dec 2016, Originally compiled by Dan Walsh <dwalsh@redhat.com>

39
docs/kpod.1.md Normal file
View file

@ -0,0 +1,39 @@
% kpod(8) # kpod - Simple management tool for pods and images
% Dan Walsh
% SEPTEMBER 2016
# NAME
kpod
# SYNOPSIS
**kpod**
[**--help**|**-h**]
# DESCRIPTION
kpod is a simple client only tool to help with debugging issues when daemons
such as CRI runtime and the kubelet are not responding or failing. A shared API
layer could be created to share code between the daemon and kpod. kpod does not
require any daemon running. kpod utilizes the same underlying components that
ocid uses i.e. containers/image, container/storage, oci-runtime-tool/generate,
runc or any other OCI compatible runtime. kpod shares state with ocid and so
has the capability to debug pods/images created by ocid.
**kpod [GLOBAL OPTIONS]**
# GLOBAL OPTIONS
**--help, -h**
Print usage statement
**--version, -v**
Print the version
# COMMANDS
## launch
Launch a pod
# SEE ALSO
ocid(8), ocid.conf(5)
# HISTORY
Dec 2016, Originally compiled by Dan Walsh <dwalsh@redhat.com>

136
docs/ocid.8.md Normal file
View file

@ -0,0 +1,136 @@
% ocid(8) Open Container Initiative Daemon
% Dan Walsh
% SEPTEMBER 2016
# NAME
ocid - Enable OCI Kubernetes Container Runtime daemon
# SYNOPSIS
**ocid**
[**--config**=[*value*]]
[**--conmon**=[*value*]]
[**--debug**]
[**--default-transport**=[*value*]]
[**--help**|**-h**]
[**--listen**=[*value*]]
[**--log**=[*value*]]
[**--log-format value**]
[**--pause-command**=[*value*]]
[**--pause-image**=[*value*]]
[**--root**=[*value*]]
[**--runroot**=[*value*]]
[**--runtime**=[*value*]]
[**--signature-policy**=[*value*]]
[**--storage-driver**=[*value*]]
[**--storage-opt**=[*value*]]
[**--selinux**]
[**--seccomp-profile**=[*value*]]
[**--apparmor-profile**=[*value*]]
[**---cni-config-dir**=[*value*]]
[**---cni-plugin-dir**=[*value*]]
[**--version**|**-v**]
# DESCRIPTION
OCI-based implementation of Kubernetes Container Runtime Interface Daemon
ocid is meant to provide an integration path between OCI conformant runtimes and the kubelet. Specifically, it implements the Kubelet Container Runtime Interface (CRI) using OCI conformant runtimes. The scope of ocid is tied to the scope of the CRI.
* Support multiple image formats including the existing Docker image format
* Support for multiple means to download images including trust & image verification
* Container image management (managing image layers, overlay filesystems, etc)
* Container process lifecycle management
* Monitoring and logging required to satisfy the CRI
* Resource isolation as required by the CRI
**ocid [GLOBAL OPTIONS]**
**ocid [GLOBAL OPTIONS] config [OPTIONS]**
# GLOBAL OPTIONS
**--apparmor_profile**=""
Name of the apparmor profile to be used as the runtime's default (default: "ocid-default")
**--config**=""
path to configuration file
**--conmon**=""
path to the conmon executable (default: "/usr/local/libexec/ocid/conmon")
**--debug**
Enable debug output for logging
**--default-transport**
A prefix to prepend to image names that can't be pulled as-is.
**--help, -h**
Print usage statement
**--listen**=""
Path to ocid socket (default: "/var/run/ocid.sock")
**--log**=""
Set the log file path where internal debug information is written
**--log-format**=""
Set the format used by logs ('text' (default), or 'json') (default: "text")
**--pause-command**=""
Path to the pause executable in the pause image (default: "/pause")
**--pause-image**=""
Image which contains the pause executable (default: "kubernetes/pause")
**--root**=""
OCID root dir (default: "/var/lib/containers/storage")
**--runroot**=""
OCID state dir (default: "/var/run/containers/storage")
**--runtime**=""
OCI runtime path (default: "/usr/bin/runc")
**--selinux**=*true*|*false*
Enable selinux support (default: false)
**--seccomp-profile**=""
Path to the seccomp json profile to be used as the runtime's default (default: "/etc/ocid/seccomp.json")
**--signature-policy**=""
Path to the signature policy json file (default: "", to use the system-wide default)
**--storage-driver**
OCI storage driver (default: "devicemapper")
**--storage-opt**
OCI storage driver option (no default)
**--cni-config-dir**=""
CNI configuration files directory (default: "/etc/cni/net.d/")
**--cni-plugin-dir**=""
CNI plugin binaries directory (default: "/opt/cni/bin/")
**--cpu-profile**
Set the CPU profile file path
**--version, -v**
Print the version
# COMMANDS
OCID's default command is to start the daemon. However, it currently offers a
single additional subcommand.
## config
Outputs a commented version of the configuration file that would've been used
by OCID. This allows you to save you current configuration setup and then load
it later with **--config**. Global options will modify the output.
**--default**
Output the default configuration (without taking into account any configuration options).
# SEE ALSO
ocid.conf(5)
# HISTORY
Sept 2016, Originally compiled by Dan Walsh <dwalsh@redhat.com> and Aleksa Sarai <asarai@suse.de>

95
docs/ocid.conf.5.md Normal file
View file

@ -0,0 +1,95 @@
% ocid.conf(5) Open Container Initiative Daemon
% Aleksa Sarai
% OCTOBER 2016
# NAME
ocid.conf - Syntax of OCID configuration file
# DESCRIPTION
The OCID configuration file specifies all of the available command-line options
for the ocid(8) program, but in a TOML format that can be more easily modified
and versioned.
# FORMAT
The [TOML format][toml] is used as the encoding of the configuration file.
Every option and subtable listed here is nested under a global "ocid" table.
No bare options are used. The format of TOML can be simplified to:
[table]
option = value
[table.subtable1]
option = value
[table.subtable2]
option = value
## OCID TABLE
The `ocid` table supports the following options:
**root**=""
OCID root dir (default: "/var/lib/containers/storage")
**runroot**=""
OCID state dir (default: "/var/run/containers/storage")
**storage_driver**=""
OCID storage driver (default is "devicemapper")
**storage_option**=[]
OCID storage driver option list (no default)
## OCID.API TABLE
**listen**=""
Path to ocid socket (default: "/var/run/ocid.sock")
## OCID.RUNTIME TABLE
**conmon**=""
Path to the conmon executable (default: "/usr/local/libexec/ocid/conmon")
**conmon_env**=[]
Environment variable list for conmon process (default: ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",])
**runtime**=""
OCI runtime path (default: "/usr/bin/runc")
**selinux**=*true*|*false*
Enable selinux support (default: false)
**signature_policy**=""
Path to the signature policy json file (default: "", to use the system-wide default)
**seccomp_profile**=""
Path to the seccomp json profile to be used as the runtime's default (default: "/etc/ocid/seccomp.json")
**apparmor_profile**=""
Name of the apparmor profile to be used as the runtime's default (default: "ocid-default")
## OCID.IMAGE TABLE
**default_transport**
A prefix to prepend to image names that can't be pulled as-is (default: "docker://")
**pause_command**=""
Path to the pause executable in the pause image (default: "/pause")
**pause_image**=""
Image which contains the pause executable (default: "kubernetes/pause")
## OCID.NETWORK TABLE
**network_dir**=""
Path to CNI configuration files (default: "/etc/cni/net.d/")
**plugin_dir**=""
Path to CNI plugin binaries (default: "/opt/cni/bin/")
# SEE ALSO
ocid(8)
# HISTORY
Oct 2016, Originally compiled by Aleksa Sarai <asarai@suse.de>

View file

@ -1,7 +0,0 @@
#!/bin/bash
cc -E - > /dev/null 2> /dev/null << EOF
#include <btrfs/ioctl.h>
EOF
if test $? -ne 0 ; then
echo exclude_graphdriver_btrfs
fi

View file

@ -23,7 +23,7 @@ function find-deps() {
local deps=
# gather imports from cri-o
pkgs=$(cd ${basepath}/${srcdir} && go list -f "{{.Imports}}" . | tr ' ' '\n' | tr -d '[]' | grep -v "/vendor/" | grep ${pkgname} | sed -e "s|${pkgname}/||g")
pkgs=$(cd ${basepath}/${srcdir} && go list -f "{{.Imports}}" . | tr ' ' '\n' | grep -v "/vendor/" | grep ${pkgname} | sed -e "s|${pkgname}/||g")
# add each Go import's sources to the deps list,
# and recursively get that imports's imports too

View file

@ -1,7 +0,0 @@
#!/bin/bash
cc -E - > /dev/null 2> /dev/null << EOF
#include <libdevmapper.h>
EOF
if test $? -ne 0 ; then
echo exclude_graphdriver_devicemapper
fi

View file

@ -1,4 +0,0 @@
#!/bin/bash
if ! pkg-config ostree-1 2> /dev/null ; then
echo containers_image_ostree_stub
fi

View file

@ -1,4 +0,0 @@
#!/bin/bash
if pkg-config libselinux 2> /dev/null ; then
echo selinux
fi

View file

@ -1,30 +0,0 @@
#!/usr/bin/env bash
set -e -o pipefail
if [ -z "$VALIDATE_UPSTREAM" ]; then
# this is kind of an expensive check, so let's not do this twice if we
# are running more than one validate bundlescript
VALIDATE_REPO='https://github.com/kubernetes-incubator/cri-o.git'
VALIDATE_BRANCH='master'
VALIDATE_HEAD="$(git rev-parse --verify HEAD)"
git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH"
VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)"
VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD"
VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD"
validate_diff() {
if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then
git diff "$VALIDATE_COMMIT_DIFF" "$@"
fi
}
validate_log() {
if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then
git log "$VALIDATE_COMMIT_LOG" "$@"
fi
}
fi

Some files were not shown because too many files have changed in this diff Show more