Merge pull request #495 from rhatdan/rename

Rename ocid to crio
This commit is contained in:
Mrunal Patel 2017-05-15 11:27:28 -07:00 committed by GitHub
commit 7ea255fcea
52 changed files with 677 additions and 677 deletions

4
.gitignore vendored
View file

@ -6,8 +6,8 @@
/docs/*.[158].gz
/kpod
/ocic
/ocid
/ocid.conf
/crio
/crio.conf
*.o
*.orig
/pause/pause

View file

@ -29,4 +29,4 @@ script:
- make
notifications:
irc: "chat.freenode.net#ocid"
irc: "chat.freenode.net#crio"

View file

@ -3,14 +3,14 @@ EPOCH_TEST_COMMIT ?= 78aae688e2932f0cfc2a23e28ad30b58c6b8577f
PROJECT := github.com/kubernetes-incubator/cri-o
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
OCID_IMAGE := ocid_dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
OCID_INSTANCE := ocid_dev
OCID_IMAGE := crio_dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
OCID_INSTANCE := crio_dev
PREFIX ?= ${DESTDIR}/usr/local
BINDIR ?= ${PREFIX}/bin
LIBEXECDIR ?= ${PREFIX}/libexec
MANDIR ?= ${PREFIX}/share/man
ETCDIR ?= ${DESTDIR}/etc
ETCDIR_OCID ?= ${ETCDIR}/ocid
ETCDIR_OCID ?= ${ETCDIR}/crio
BUILDTAGS := selinux seccomp $(shell hack/btrfs_tag.sh) $(shell hack/libdm_tag.sh)
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
@ -25,7 +25,7 @@ GOPKGBASEDIR := $(shell dirname "$(GOPKGDIR)")
# Update VPATH so make finds .gopathok
VPATH := $(VPATH):$(GOPATH)
all: binaries ocid.conf docs
all: binaries crio.conf docs
default: help
@ -33,7 +33,7 @@ help:
@echo "Usage: make <target>"
@echo
@echo " * 'install' - Install binaries to system locations"
@echo " * 'binaries' - Build ocid, conmon and ocic"
@echo " * 'binaries' - Build crio, conmon and crioctl"
@echo " * 'integration' - Execute integration tests"
@echo " * 'clean' - Clean artifacts"
@echo " * 'lint' - Execute the source code linter"
@ -68,19 +68,19 @@ copyimg: .gopathok $(wildcard test/copyimg/*.go)
checkseccomp: .gopathok $(wildcard test/checkseccomp/*.go)
go build -o test/checkseccomp/$@ $(PROJECT)/test/checkseccomp
ocid: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/ocid $(PROJECT))
crio: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/crio $(PROJECT))
$(GO) build -o $@ \
-tags "$(BUILDTAGS)" \
$(PROJECT)/cmd/ocid
$(PROJECT)/cmd/crio
ocic: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/ocic $(PROJECT))
$(GO) build -o $@ $(PROJECT)/cmd/ocic
crioctl: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/crioctl $(PROJECT))
$(GO) build -o $@ $(PROJECT)/cmd/crioctl
kpod: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/kpod $(PROJECT))
$(GO) build -o $@ $(PROJECT)/cmd/kpod
ocid.conf: ocid
./ocid --config="" config --default > ocid.conf
crio.conf: crio
./crio --config="" config --default > crio.conf
clean:
ifneq ($(GOPATH),)
@ -91,26 +91,26 @@ endif
rm -fr test/testdata/redis-image
find . -name \*~ -delete
find . -name \#\* -delete
rm -f ocic ocid kpod
rm -f crioctl crio kpod
make -C conmon clean
make -C pause clean
rm -f test/bin2img/bin2img
rm -f test/copyimg/copyimg
rm -f test/checkseccomp/checkseccomp
ocidimage:
crioimage:
docker build -t ${OCID_IMAGE} .
dbuild: ocidimage
dbuild: crioimage
docker run --name=${OCID_INSTANCE} --privileged ${OCID_IMAGE} -v ${PWD}:/go/src/${PROJECT} --rm make binaries
integration: ocidimage
integration: crioimage
docker run -e TESTFLAGS -e TRAVIS -t --privileged --rm -v ${CURDIR}:/go/src/${PROJECT} ${OCID_IMAGE} make localintegration
localintegration: clean binaries
./test/test_runner.sh ${TESTFLAGS}
binaries: ocid ocic kpod conmon pause bin2img copyimg checkseccomp
binaries: crio crioctl kpod conmon pause bin2img copyimg checkseccomp
MANPAGES_MD := $(wildcard docs/*.md)
MANPAGES := $(MANPAGES_MD:%.md=%)
@ -127,11 +127,11 @@ docs/%.8: docs/%.8.md .gopathok
docs: $(MANPAGES)
install: .gopathok
install -D -m 755 ocid $(BINDIR)/ocid
install -D -m 755 ocic $(BINDIR)/ocic
install -D -m 755 crio $(BINDIR)/crio
install -D -m 755 crioctl $(BINDIR)/crioctl
install -D -m 755 kpod $(BINDIR)/kpod
install -D -m 755 conmon/conmon $(LIBEXECDIR)/ocid/conmon
install -D -m 755 pause/pause $(LIBEXECDIR)/ocid/pause
install -D -m 755 conmon/conmon $(LIBEXECDIR)/crio/conmon
install -D -m 755 pause/pause $(LIBEXECDIR)/crio/pause
install -d -m 755 $(MANDIR)/man1
install -d -m 755 $(MANDIR)/man5
install -d -m 755 $(MANDIR)/man8
@ -140,7 +140,7 @@ install: .gopathok
install -m 644 $(filter %.8,$(MANPAGES)) -t $(MANDIR)/man8
install.config:
install -D -m 644 ocid.conf $(ETCDIR_OCID)/ocid.conf
install -D -m 644 crio.conf $(ETCDIR_OCID)/crio.conf
install -D -m 644 seccomp.json $(ETCDIR_OCID)/seccomp.json
install.completions:
@ -148,14 +148,14 @@ install.completions:
install -m 644 -D completions/bash/kpod ${BASHINSTALLDIR}
install.systemd:
install -D -m 644 contrib/systemd/ocid.service $(PREFIX)/lib/systemd/system/ocid.service
install -D -m 644 contrib/systemd/ocid-shutdown.service $(PREFIX)/lib/systemd/system/ocid-shutdown.service
install -D -m 644 contrib/systemd/crio.service $(PREFIX)/lib/systemd/system/crio.service
install -D -m 644 contrib/systemd/crio-shutdown.service $(PREFIX)/lib/systemd/system/crio-shutdown.service
uninstall:
rm -f $(BINDIR)/ocid
rm -f $(BINDIR)/ocic
rm -f $(LIBEXECDIR)/ocid/conmon
rm -f $(LIBEXECDIR)/ocid/pause
rm -f $(BINDIR)/crio
rm -f $(BINDIR)/crioctl
rm -f $(LIBEXECDIR)/crio/conmon
rm -f $(LIBEXECDIR)/crio/pause
for i in $(filter %.1,$(MANPAGES)); do \
rm -f $(MANDIR)/man8/$$(basename $${i}); \
done

View file

@ -40,7 +40,7 @@ It is currently in active development in the Kubernetes community through the [d
### Prerequisites
`runc` version 1.0.0.rc1 or greater is expected to be installed on the system. It is picked up as the default runtime by ocid.
`runc` version 1.0.0.rc1 or greater is expected to be installed on the system. It is picked up as the default runtime by crio.
### Build Dependencies
@ -170,8 +170,8 @@ your system.
You can run a local version of kubernetes with cri-o using `local-up-cluster.sh`:
1. Clone the [kubernetes repository](https://github.com/kubernetes/kubernetes)
1. Start the cri-o daemon (`ocid`)
1. From the kubernetes project directory, run: `CONTAINER_RUNTIME=remote CONTAINER_RUNTIME_ENDPOINT='/var/run/ocid.sock --runtime-request-timeout=15m' ./hack/local-up-cluster.sh`
1. Start the cri-o daemon (`crio`)
1. From the kubernetes project directory, run: `CONTAINER_RUNTIME=remote CONTAINER_RUNTIME_ENDPOINT='/var/run/crio.sock --runtime-request-timeout=15m' ./hack/local-up-cluster.sh`
To run a full cluster, see [the instructions](kubernetes.md).

View file

@ -9,8 +9,8 @@ import (
)
var commentedConfigTemplate = template.Must(template.New("config").Parse(`
# The "ocid" table contains all of the server options.
[ocid]
# The "crio" table contains all of the server options.
[crio]
# root is a path to the "root directory". OCID stores all of its data,
# including container images, in this directory.
@ -28,23 +28,23 @@ storage_driver = "{{ .Storage }}"
storage_option = [
{{ range $opt := .StorageOptions }}{{ printf "\t%q,\n" $opt }}{{ end }}]
# The "ocid.api" table contains settings for the kubelet/gRPC
# interface (which is also used by ocic).
[ocid.api]
# The "crio.api" table contains settings for the kubelet/gRPC
# interface (which is also used by crioctl).
[crio.api]
# listen is the path to the AF_LOCAL socket on which ocid will listen.
# listen is the path to the AF_LOCAL socket on which crio will listen.
listen = "{{ .Listen }}"
# The "ocid.runtime" table contains settings pertaining to the OCI
# The "crio.runtime" table contains settings pertaining to the OCI
# runtime used and options for how to set up and manage the OCI runtime.
[ocid.runtime]
[crio.runtime]
# runtime is a path to the OCI runtime which ocid will be using.
# runtime is a path to the OCI runtime which crio will be using.
runtime = "{{ .Runtime }}"
# runtime_host_privileged is a path to the OCI runtime which ocid
# runtime_host_privileged is a path to the OCI runtime which crio
# will be using for host privileged operations.
# If this string is empty, ocid will not try to use the "runtime"
# If this string is empty, crio will not try to use the "runtime"
# for all operations.
runtime_host_privileged = "{{ .RuntimeHostPrivileged }}"
@ -73,9 +73,9 @@ apparmor_profile = "{{ .ApparmorProfile }}"
# for the runtime.
cgroup_manager = "{{ .CgroupManager }}"
# The "ocid.image" table contains settings pertaining to the
# The "crio.image" table contains settings pertaining to the
# management of OCI images.
[ocid.image]
[crio.image]
# default_transport is the prefix we try prepending to an image name if the
# image name as we receive it can't be parsed as a valid source reference
@ -95,9 +95,9 @@ pause_command = "{{ .PauseCommand }}"
# unspecified so that the default system-wide policy will be used.
signature_policy = "{{ .SignaturePolicyPath }}"
# The "ocid.network" table contains settings pertaining to the
# The "crio.network" table contains settings pertaining to the
# management of CNI plugins.
[ocid.network]
[crio.network]
# network_dir is is where CNI network configuration
# files are stored.
@ -112,7 +112,7 @@ plugin_dir = "{{ .PluginDir }}"
var configCommand = cli.Command{
Name: "config",
Usage: "generate ocid configuration files",
Usage: "generate crio configuration files",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "default",

View file

@ -20,7 +20,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
const ociConfigPath = "/etc/ocid/ocid.conf"
const ociConfigPath = "/etc/crio/crio.conf"
func mergeConfig(config *server.Config, ctx *cli.Context) error {
// Don't parse the config if the user explicitly set it to "".
@ -117,8 +117,8 @@ func main() {
return
}
app := cli.NewApp()
app.Name = "ocid"
app.Usage = "ocid server"
app.Name = "crio"
app.Usage = "crio server"
app.Version = "0.3"
app.Metadata = map[string]interface{}{
"config": server.DefaultConfig(),
@ -140,7 +140,7 @@ func main() {
},
cli.StringFlag{
Name: "listen",
Usage: "path to ocid socket",
Usage: "path to crio socket",
},
cli.StringFlag{
Name: "log",
@ -166,11 +166,11 @@ func main() {
},
cli.StringFlag{
Name: "root",
Usage: "ocid root dir",
Usage: "crio root dir",
},
cli.StringFlag{
Name: "runroot",
Usage: "ocid state dir",
Usage: "crio state dir",
},
cli.StringFlag{
Name: "storage-driver",
@ -194,7 +194,7 @@ func main() {
},
cli.StringFlag{
Name: "apparmor-profile",
Usage: "default apparmor profile name (default: \"ocid-default\")",
Usage: "default apparmor profile name (default: \"crio-default\")",
},
cli.BoolFlag{
Name: "selinux",

View file

@ -65,8 +65,8 @@ func loadContainerConfig(path string) (*pb.ContainerConfig, error) {
func main() {
app := cli.NewApp()
app.Name = "ocic"
app.Usage = "client for ocid"
app.Name = "crioctl"
app.Usage = "client for crio"
app.Version = "0.3"
app.Commands = []cli.Command{
@ -79,7 +79,7 @@ func main() {
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "connect",
Value: "/var/run/ocid.sock",
Value: "/var/run/crio.sock",
Usage: "Socket to connect to",
},
cli.DurationFlag{

View file

@ -1,8 +1,8 @@
# kpod - Simple debugging tool for pods and images
kpod is a simple client only tool to help with debugging issues when daemons such as CRI runtime and the kubelet are not responding or
failing. A shared API layer could be created to share code between the daemon and kpod. kpod does not require any daemon running. kpod
utilizes the same underlying components that ocid uses i.e. containers/image, container/storage, oci-runtime-tool/generate, runc or
any other OCI compatible runtime. kpod shares state with ocid and so has the capability to debug pods/images created by ocid.
utilizes the same underlying components that crio uses i.e. containers/image, container/storage, oci-runtime-tool/generate, runc or
any other OCI compatible runtime. kpod shares state with crio and so has the capability to debug pods/images created by crio.
## Use cases
1. List pods.

View file

@ -412,7 +412,7 @@ int main(int argc, char *argv[])
char *argv[] = {"sh", "-c", cmd->str, NULL};
/* We only need to touch the stdio if we have terminal=false. */
/* FIXME: This results in us not outputting runc error messages to ocid's log. */
/* FIXME: This results in us not outputting runc error messages to crio's log. */
if (slavefd_stdout >= 0) {
if (dup2(slavefd_stdout, STDOUT_FILENO) < 0)
pexit("Failed to dup over stdout");

View file

@ -1,6 +1,6 @@
{
"cniVersion": "0.2.0",
"name": "ocid-bridge",
"name": "crio-bridge",
"type": "bridge",
"bridge": "cni0",
"isGateway": true,

View file

@ -6,10 +6,10 @@ basis for your own configurations (distibutions should package these files in
example directories).
To use these configurations, place them in `/etc/cni/net.d` (or the directory
specified by `ocid.network.network_dir` in your `ocid.conf`).
specified by `crio.network.network_dir` in your `crio.conf`).
In addition, you need to install the [CNI plugins][cni] necessary into
`/opt/cni/bin` (or the directory specified by `ocid.network.plugin_dir`). The
`/opt/cni/bin` (or the directory specified by `crio.network.plugin_dir`). The
two plugins necessary for the example CNI configurations are `loopback` and
`bridge`.

View file

@ -1,11 +1,11 @@
.PHONY: dist
dist: ocid.spec
spectool -g ocid.spec
dist: crio.spec
spectool -g crio.spec
.PHONY: rpm
rpm: dist
rpmbuild --define "_sourcedir `pwd`" --define "_specdir `pwd`" \
--define "_rpmdir `pwd`" --define "_srcrpmdir `pwd`" -ba ocid.spec
--define "_rpmdir `pwd`" --define "_srcrpmdir `pwd`" -ba crio.spec
all: rpm

View file

@ -3,7 +3,7 @@
%global provider_tld com
%global project kubernetes-incubator
%global repo cri-o
%global Name ocid
%global Name crio
# https://github.com/kubernetes-incubator/cri-o
%global provider_prefix %{provider}.%{provider_tld}/%{project}/%{repo}
%global import_path %{provider_prefix}
@ -22,10 +22,10 @@ Source0: https://%{provider_prefix}/archive/%{commit}/%{repo}-%{shortcomm
BuildRequires: golang-github-cpuguy83-go-md2man
%description
The ocid package provides an implementation of the
The crio package provides an implementation of the
Kubelet Container Runtime Interface (CRI) using OCI conformant runtimes.
ocid provides following functionalities:
crio provides following functionalities:
Support multiple image formats including the existing Docker image format
Support for multiple means to download images including trust & image verification
@ -47,15 +47,15 @@ make all
#define license tag if not already defined
%{!?_licensedir:%global license %doc}
%files
%{_bindir}/ocid
%{_bindir}/ocic
%{_mandir}/man5/ocid.conf.5*
%{_mandir}/man8/ocid.8*
%{_sysconfdir}/ocid.conf
%dir /%{_libexecdir}/ocid
/%{_libexecdir}/ocid/conmon
/%{_libexecdir}/ocid/pause
%{_unitdir}/ocid.service
%{_bindir}/crio
%{_bindir}/crioctl
%{_mandir}/man5/crio.conf.5*
%{_mandir}/man8/crio.8*
%{_sysconfdir}/crio.conf
%dir /%{_libexecdir}/crio
/%{_libexecdir}/crio/conmon
/%{_libexecdir}/crio/pause
%{_unitdir}/crio.service
%doc README.md
%license LICENSE

View file

@ -1,13 +1,13 @@
[Unit]
Description=Shutdown OCID containers before shutting down the system
Wants=ocid.service
After=ocid.service
Documentation=man:ocid(8)
Wants=crio.service
After=crio.service
Documentation=man:crio(8)
[Service]
Type=oneshot
ExecStart=/usr/bin/true
ExecStop=mkdir -p /var/lib/ocid; touch /var/lib/ocid/ocid.shutdown
ExecStop=mkdir -p /var/lib/crio; touch /var/lib/crio/crio.shutdown
RemainAfterExit=yes
[Install]

View file

@ -5,10 +5,10 @@ After=network.target
[Service]
Type=notify
EnvironmentFile=-/etc/sysconfig/ocid-storage
EnvironmentFile=-/etc/sysconfig/ocid-network
EnvironmentFile=-/etc/sysconfig/crio-storage
EnvironmentFile=-/etc/sysconfig/crio-network
Environment=GOTRACEBACK=crash
ExecStart=/usr/local/bin/ocid \
ExecStart=/usr/local/bin/crio \
$OCID_STORAGE_OPTIONS \
$OCID_NETWORK_OPTIONS \
ExecReload=/bin/kill -s HUP $MAINPID

View file

@ -1,11 +1,11 @@
% ocid(8) Open Container Initiative Daemon
% crio(8) Open Container Initiative Daemon
% Dan Walsh
% SEPTEMBER 2016
# NAME
ocid - Enable OCI Kubernetes Container Runtime daemon
crio - Enable OCI Kubernetes Container Runtime daemon
# SYNOPSIS
**ocid**
**crio**
[**--config**=[*value*]]
[**--conmon**=[*value*]]
[**--debug**]
@ -32,7 +32,7 @@ ocid - Enable OCI Kubernetes Container Runtime daemon
# DESCRIPTION
OCI-based implementation of Kubernetes Container Runtime Interface Daemon
ocid is meant to provide an integration path between OCI conformant runtimes and the kubelet. Specifically, it implements the Kubelet Container Runtime Interface (CRI) using OCI conformant runtimes. The scope of ocid is tied to the scope of the CRI.
crio is meant to provide an integration path between OCI conformant runtimes and the kubelet. Specifically, it implements the Kubelet Container Runtime Interface (CRI) using OCI conformant runtimes. The scope of crio is tied to the scope of the CRI.
* Support multiple image formats including the existing Docker image format
* Support for multiple means to download images including trust & image verification
@ -41,20 +41,20 @@ ocid is meant to provide an integration path between OCI conformant runtimes and
* Monitoring and logging required to satisfy the CRI
* Resource isolation as required by the CRI
**ocid [GLOBAL OPTIONS]**
**crio [GLOBAL OPTIONS]**
**ocid [GLOBAL OPTIONS] config [OPTIONS]**
**crio [GLOBAL OPTIONS] config [OPTIONS]**
# GLOBAL OPTIONS
**--apparmor_profile**=""
Name of the apparmor profile to be used as the runtime's default (default: "ocid-default")
Name of the apparmor profile to be used as the runtime's default (default: "crio-default")
**--config**=""
path to configuration file
**--conmon**=""
path to the conmon executable (default: "/usr/local/libexec/ocid/conmon")
path to the conmon executable (default: "/usr/local/libexec/crio/conmon")
**--debug**
Enable debug output for logging
@ -66,7 +66,7 @@ ocid is meant to provide an integration path between OCI conformant runtimes and
Print usage statement
**--listen**=""
Path to ocid socket (default: "/var/run/ocid.sock")
Path to crio socket (default: "/var/run/crio.sock")
**--log**=""
Set the log file path where internal debug information is written
@ -93,7 +93,7 @@ ocid is meant to provide an integration path between OCI conformant runtimes and
Enable selinux support (default: false)
**--seccomp-profile**=""
Path to the seccomp json profile to be used as the runtime's default (default: "/etc/ocid/seccomp.json")
Path to the seccomp json profile to be used as the runtime's default (default: "/etc/crio/seccomp.json")
**--signature-policy**=""
Path to the signature policy json file (default: "", to use the system-wide default)
@ -130,7 +130,7 @@ it later with **--config**. Global options will modify the output.
Output the default configuration (without taking into account any configuration options).
# SEE ALSO
ocid.conf(5)
crio.conf(5)
# HISTORY
Sept 2016, Originally compiled by Dan Walsh <dwalsh@redhat.com> and Aleksa Sarai <asarai@suse.de>

View file

@ -1,18 +1,18 @@
% ocid.conf(5) Open Container Initiative Daemon
% crio.conf(5) Open Container Initiative Daemon
% Aleksa Sarai
% OCTOBER 2016
# NAME
ocid.conf - Syntax of OCID configuration file
crio.conf - Syntax of OCID configuration file
# DESCRIPTION
The OCID configuration file specifies all of the available command-line options
for the ocid(8) program, but in a TOML format that can be more easily modified
for the crio(8) program, but in a TOML format that can be more easily modified
and versioned.
# FORMAT
The [TOML format][toml] is used as the encoding of the configuration file.
Every option and subtable listed here is nested under a global "ocid" table.
Every option and subtable listed here is nested under a global "crio" table.
No bare options are used. The format of TOML can be simplified to:
[table]
@ -26,7 +26,7 @@ No bare options are used. The format of TOML can be simplified to:
## OCID TABLE
The `ocid` table supports the following options:
The `crio` table supports the following options:
**root**=""
@ -44,12 +44,12 @@ The `ocid` table supports the following options:
## OCID.API TABLE
**listen**=""
Path to ocid socket (default: "/var/run/ocid.sock")
Path to crio socket (default: "/var/run/crio.sock")
## OCID.RUNTIME TABLE
**conmon**=""
Path to the conmon executable (default: "/usr/local/libexec/ocid/conmon")
Path to the conmon executable (default: "/usr/local/libexec/crio/conmon")
**conmon_env**=[]
Environment variable list for conmon process (default: ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",])
@ -64,10 +64,10 @@ The `ocid` table supports the following options:
Path to the signature policy json file (default: "", to use the system-wide default)
**seccomp_profile**=""
Path to the seccomp json profile to be used as the runtime's default (default: "/etc/ocid/seccomp.json")
Path to the seccomp json profile to be used as the runtime's default (default: "/etc/crio/seccomp.json")
**apparmor_profile**=""
Name of the apparmor profile to be used as the runtime's default (default: "ocid-default")
Name of the apparmor profile to be used as the runtime's default (default: "crio-default")
## OCID.IMAGE TABLE
@ -89,7 +89,7 @@ The `ocid` table supports the following options:
Path to CNI plugin binaries (default: "/opt/cni/bin/")
# SEE ALSO
ocid(8)
crio(8)
# HISTORY
Oct 2016, Originally compiled by Aleksa Sarai <asarai@suse.de>

View file

@ -36,7 +36,7 @@ pull** IMAGE, before it starts the container from that image.
Launch a pod
# SEE ALSO
kpod(1), ocid(8), ocid.conf(5)
kpod(1), crio(8), crio.conf(5)
# HISTORY
Dec 2016, Originally compiled by Dan Walsh <dwalsh@redhat.com>

View file

@ -13,9 +13,9 @@ kpod is a simple client only tool to help with debugging issues when daemons
such as CRI runtime and the kubelet are not responding or failing. A shared API
layer could be created to share code between the daemon and kpod. kpod does not
require any daemon running. kpod utilizes the same underlying components that
ocid uses i.e. containers/image, container/storage, oci-runtime-tool/generate,
runc or any other OCI compatible runtime. kpod shares state with ocid and so
has the capability to debug pods/images created by ocid.
crio uses i.e. containers/image, container/storage, oci-runtime-tool/generate,
runc or any other OCI compatible runtime. kpod shares state with crio and so
has the capability to debug pods/images created by crio.
**kpod [GLOBAL OPTIONS]**
@ -33,7 +33,7 @@ has the capability to debug pods/images created by ocid.
Launch a pod
# SEE ALSO
ocid(8), ocid.conf(5)
crio(8), crio.conf(5)
# HISTORY
Dec 2016, Originally compiled by Dan Walsh <dwalsh@redhat.com>

View file

@ -11,32 +11,32 @@ so we can keep kubelet running inside container (as well as directly on the host
Below, you can find an instruction how to switch one or more nodes on running kubernetes cluster from docker to cri-o.
### Preparing ocid
### Preparing crio
You must prepare and install `ocid` on each node you would like to switch. Here's the list of files that must be provided:
You must prepare and install `crio` on each node you would like to switch. Here's the list of files that must be provided:
| File path | Description | Location |
|--------------------------------------------|----------------------------|-----------------------------------------------------|
| `/etc/ocid/ocid.conf` | ocid configuration | Generated on cri-o `make install` |
| `/etc/ocid/seccomp.conf` | seccomp config | Example stored in cri-o repository |
| `/etc/crio/crio.conf` | crio configuration | Generated on cri-o `make install` |
| `/etc/crio/seccomp.conf` | seccomp config | Example stored in cri-o repository |
| `/etc/containers/policy.json` | containers policy | Example stored in cri-o repository |
| `/bin/{ocid, runc}` | `ocid` and `runc` binaries | Built from cri-o repository |
| `/usr/local/libexec/ocid/conmon` | `conmon` binary | Built from cri-o repository |
| `/bin/{crio, runc}` | `crio` and `runc` binaries | Built from cri-o repository |
| `/usr/local/libexec/crio/conmon` | `conmon` binary | Built from cri-o repository |
| `/opt/cni/bin/{flannel, bridge,...}` | CNI plugins binaries | Can be built from sources `containernetworking/cni` |
| `/etc/cni/net.d/10-mynet.conf` | Network config | Example stored in [README file](README.md) |
`ocid` binary can be executed directly on host, inside the container or in any way.
`crio` binary can be executed directly on host, inside the container or in any way.
However, recommended way is to set it as a systemd service.
Here's the example of unit file:
```
# cat /etc/systemd/system/ocid.service
# cat /etc/systemd/system/crio.service
[Unit]
Description=CRI-O daemon
Documentation=https://github.com/kubernetes-incubator/cri-o
[Service]
ExecStart=/bin/ocid --runtime /bin/runc --log /root/ocid.log --debug
ExecStart=/bin/crio --runtime /bin/runc --log /root/crio.log --debug
Restart=always
RestartSec=10s
@ -55,12 +55,12 @@ and stop all kubelet docker containers that are still runing.
# docker stop $(docker ps | grep k8s_ | awk '{print $1}')
```
We have to be sure that `kubelet.service` will start after `ocid.service`.
It can be done by adding `ocid.service` to `Wants=` section in `/etc/systemd/system/kubelet.service`:
We have to be sure that `kubelet.service` will start after `crio.service`.
It can be done by adding `crio.service` to `Wants=` section in `/etc/systemd/system/kubelet.service`:
```
# cat /etc/systemd/system/kubelet.service | grep Wants
Wants=docker.socket ocid.service
Wants=docker.socket crio.service
```
If you'd like to change the way of starting kubelet (e.g. directly on host instead of docker container), you can change it here, but, as mentioned, it's not necessary.
@ -79,7 +79,7 @@ KUBELET_ARGS="--pod-manifest-path=/etc/kubernetes/manifests
You need to add following parameters to `KUBELET_ARGS`:
* `--experimental-cri=true` - Use Container Runtime Interface. Will be true by default from kubernetes 1.6 release.
* `--container-runtime=remote` - Use remote runtime with provided socket.
* `--container-runtime-endpoint=/var/run/ocid.sock` - Socket for remote runtime (default `ocid` socket localization).
* `--container-runtime-endpoint=/var/run/crio.sock` - Socket for remote runtime (default `crio` socket localization).
* `--runtime-request-timeout=10m` - Optional but useful. Some requests, especially pulling huge images, may take longer than default (2 minutes) and will cause an error.
Kubelet is prepared now.
@ -96,9 +96,9 @@ If your cluster is using flannel network, your network configuration should be l
Then, kubelet will take parameters from `/run/flannel/subnet.env` - file generated by flannel kubelet microservice.
## Starting kubelet with cri-o
Start ocid first, then kubelet. If you created `ocid` service:
Start crio first, then kubelet. If you created `crio` service:
```
# systemctl start ocid
# systemctl start crio
# systemctl start kubelet
```

View file

@ -146,8 +146,8 @@ func (r *Runtime) CreateContainer(c *Container, cgroupParent string) error {
// Move conmon to specified cgroup
if cgroupParent != "" {
if r.cgroupManager == "systemd" {
logrus.Infof("Running conmon under slice %s and unitName %s", cgroupParent, createUnitName("ocid", c.name))
if err = utils.RunUnderSystemdScope(cmd.Process.Pid, cgroupParent, createUnitName("ocid", c.name)); err != nil {
logrus.Infof("Running conmon under slice %s and unitName %s", cgroupParent, createUnitName("crio", c.name))
if err = utils.RunUnderSystemdScope(cmd.Process.Pid, cgroupParent, createUnitName("crio", c.name)); err != nil {
logrus.Warnf("Failed to add conmon to sandbox cgroup: %v", err)
}
}
@ -282,7 +282,7 @@ func (r *Runtime) ExecSync(c *Container, command []string, timeout int64) (resp
}
}()
logFile, err := ioutil.TempFile("", "ocid-log-"+c.name)
logFile, err := ioutil.TempFile("", "crio-log-"+c.name)
if err != nil {
return nil, ExecSyncError{
ExitCode: -1,

View file

@ -1,5 +1,5 @@
// Package storage provides helper functions for creating and managing CRI pod
// sandboxes and containers and metadata associated with them in the format
// that ocid understands. The API it provides should be considered to be
// that crio understands. The API it provides should be considered to be
// unstable.
package storage

View file

@ -2,7 +2,7 @@ package apparmor
const (
// DefaultApparmorProfile is the name of default apparmor profile name.
DefaultApparmorProfile = "ocid-default"
DefaultApparmorProfile = "crio-default"
// ContainerAnnotationKeyPrefix is the prefix to an annotation key specifying a container profile.
ContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/"

View file

@ -10,14 +10,14 @@ import (
// Default paths if none are specified
const (
ocidRoot = "/var/lib/containers/storage"
ocidRunRoot = "/var/run/containers/storage"
conmonPath = "/usr/local/libexec/ocid/conmon"
crioRoot = "/var/lib/containers/storage"
crioRunRoot = "/var/run/containers/storage"
conmonPath = "/usr/local/libexec/crio/conmon"
pauseImage = "kubernetes/pause"
pauseCommand = "/pause"
defaultTransport = "docker://"
seccompProfilePath = "/etc/ocid/seccomp.json"
apparmorProfileName = "ocid-default"
seccompProfilePath = "/etc/crio/seccomp.json"
apparmorProfileName = "crio-default"
cniConfigDir = "/etc/cni/net.d/"
cniBinDir = "/opt/cni/bin/"
cgroupManager = "cgroupfs"
@ -37,7 +37,7 @@ type Config struct {
// while also not requiring a bunch of layered structs for no good
// reason.
// RootConfig represents the root of the "ocid" TOML config table.
// RootConfig represents the root of the "crio" TOML config table.
type RootConfig struct {
// Root is a path to the "root directory" where data not
// explicitly handled by other options will be stored.
@ -59,7 +59,7 @@ type RootConfig struct {
LogDir string `toml:"log_dir"`
}
// APIConfig represents the "ocid.api" TOML config table.
// APIConfig represents the "crio.api" TOML config table.
type APIConfig struct {
// Listen is the path to the AF_LOCAL socket on which cri-o will listen.
// This may support proto://addr formats later, but currently this is just
@ -67,14 +67,14 @@ type APIConfig struct {
Listen string `toml:"listen"`
}
// RuntimeConfig represents the "ocid.runtime" TOML config table.
// RuntimeConfig represents the "crio.runtime" TOML config table.
type RuntimeConfig struct {
// Runtime is a path to the OCI runtime which ocid will be using. Currently
// Runtime is a path to the OCI runtime which crio will be using. Currently
// the only known working choice is runC, simply because the OCI has not
// yet merged a CLI API (so we assume runC's API here).
Runtime string `toml:"runtime"`
// RuntimeHostPrivileged is a path to the OCI runtime which ocid will be
// RuntimeHostPrivileged is a path to the OCI runtime which crio will be
// using for host privileged operations.
RuntimeHostPrivileged string `toml:"runtime_host_privileged"`
@ -100,7 +100,7 @@ type RuntimeConfig struct {
CgroupManager string `toml:"cgroup_manager"`
}
// ImageConfig represents the "ocid.image" TOML config table.
// ImageConfig represents the "crio.image" TOML config table.
type ImageConfig struct {
// DefaultTransport is a value we prefix to image names that fail to
// validate source references.
@ -119,7 +119,7 @@ type ImageConfig struct {
SignaturePolicyPath string `toml:"signature_policy"`
}
// NetworkConfig represents the "ocid.network" TOML config table
// NetworkConfig represents the "crio.network" TOML config table
type NetworkConfig struct {
// NetworkDir is where CNI network configuration files are stored.
NetworkDir string `toml:"network_dir"`
@ -138,7 +138,7 @@ type tomlConfig struct {
Runtime struct{ RuntimeConfig } `toml:"runtime"`
Image struct{ ImageConfig } `toml:"image"`
Network struct{ NetworkConfig } `toml:"network"`
} `toml:"ocid"`
} `toml:"crio"`
}
func (t *tomlConfig) toConfig(c *Config) {
@ -195,16 +195,16 @@ func (c *Config) ToFile(path string) error {
return ioutil.WriteFile(path, w.Bytes(), 0644)
}
// DefaultConfig returns the default configuration for ocid.
// DefaultConfig returns the default configuration for crio.
func DefaultConfig() *Config {
return &Config{
RootConfig: RootConfig{
Root: ocidRoot,
RunRoot: ocidRunRoot,
LogDir: "/var/log/ocid/pods",
Root: crioRoot,
RunRoot: crioRunRoot,
LogDir: "/var/log/crio/pods",
},
APIConfig: APIConfig{
Listen: "/var/run/ocid.sock",
Listen: "/var/run/crio.sock",
},
RuntimeConfig: RuntimeConfig{
Runtime: "/usr/bin/runc",

View file

@ -425,7 +425,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
if sb.cgroupParent != "" {
if s.config.CgroupManager == "systemd" {
cgPath := sb.cgroupParent + ":" + "ocid" + ":" + containerID
cgPath := sb.cgroupParent + ":" + "crio" + ":" + containerID
specgen.SetLinuxCgroupsPath(cgPath)
} else {
specgen.SetLinuxCgroupsPath(sb.cgroupParent + "/" + containerID)
@ -532,31 +532,31 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
specgen.SetHostname(sb.hostname)
}
specgen.AddAnnotation("ocid/name", containerName)
specgen.AddAnnotation("ocid/sandbox_id", sb.id)
specgen.AddAnnotation("ocid/sandbox_name", sb.infraContainer.Name())
specgen.AddAnnotation("ocid/container_type", containerTypeContainer)
specgen.AddAnnotation("ocid/log_path", logPath)
specgen.AddAnnotation("ocid/tty", fmt.Sprintf("%v", containerConfig.Tty))
specgen.AddAnnotation("ocid/image", image)
specgen.AddAnnotation("crio/name", containerName)
specgen.AddAnnotation("crio/sandbox_id", sb.id)
specgen.AddAnnotation("crio/sandbox_name", sb.infraContainer.Name())
specgen.AddAnnotation("crio/container_type", containerTypeContainer)
specgen.AddAnnotation("crio/log_path", logPath)
specgen.AddAnnotation("crio/tty", fmt.Sprintf("%v", containerConfig.Tty))
specgen.AddAnnotation("crio/image", image)
metadataJSON, err := json.Marshal(metadata)
if err != nil {
return nil, err
}
specgen.AddAnnotation("ocid/metadata", string(metadataJSON))
specgen.AddAnnotation("crio/metadata", string(metadataJSON))
labelsJSON, err := json.Marshal(labels)
if err != nil {
return nil, err
}
specgen.AddAnnotation("ocid/labels", string(labelsJSON))
specgen.AddAnnotation("crio/labels", string(labelsJSON))
annotationsJSON, err := json.Marshal(annotations)
if err != nil {
return nil, err
}
specgen.AddAnnotation("ocid/annotations", string(annotationsJSON))
specgen.AddAnnotation("crio/annotations", string(annotationsJSON))
if err = s.setupSeccomp(&specgen, containerName, sb.annotations); err != nil {
return nil, err

View file

@ -252,20 +252,20 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
}
privileged := s.privilegedSandbox(req)
g.AddAnnotation("ocid/metadata", string(metadataJSON))
g.AddAnnotation("ocid/labels", string(labelsJSON))
g.AddAnnotation("ocid/annotations", string(annotationsJSON))
g.AddAnnotation("ocid/log_path", logPath)
g.AddAnnotation("ocid/name", name)
g.AddAnnotation("ocid/container_type", containerTypeSandbox)
g.AddAnnotation("ocid/sandbox_id", id)
g.AddAnnotation("ocid/container_name", containerName)
g.AddAnnotation("ocid/container_id", id)
g.AddAnnotation("ocid/shm_path", shmPath)
g.AddAnnotation("ocid/privileged_runtime", fmt.Sprintf("%v", privileged))
g.AddAnnotation("ocid/resolv_path", resolvPath)
g.AddAnnotation("ocid/hostname", hostname)
g.AddAnnotation("ocid/kube_name", kubeName)
g.AddAnnotation("crio/metadata", string(metadataJSON))
g.AddAnnotation("crio/labels", string(labelsJSON))
g.AddAnnotation("crio/annotations", string(annotationsJSON))
g.AddAnnotation("crio/log_path", logPath)
g.AddAnnotation("crio/name", name)
g.AddAnnotation("crio/container_type", containerTypeSandbox)
g.AddAnnotation("crio/sandbox_id", id)
g.AddAnnotation("crio/container_name", containerName)
g.AddAnnotation("crio/container_id", id)
g.AddAnnotation("crio/shm_path", shmPath)
g.AddAnnotation("crio/privileged_runtime", fmt.Sprintf("%v", privileged))
g.AddAnnotation("crio/resolv_path", resolvPath)
g.AddAnnotation("crio/hostname", hostname)
g.AddAnnotation("crio/kube_name", kubeName)
sb := &sandbox{
id: id,
@ -319,7 +319,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
cgroupParent := req.GetConfig().GetLinux().CgroupParent
if cgroupParent != "" {
if s.config.CgroupManager == "systemd" {
cgPath := cgroupParent + ":" + "ocid" + ":" + id
cgPath := cgroupParent + ":" + "crio" + ":" + id
g.SetLinuxCgroupsPath(cgPath)
} else {

View file

@ -26,7 +26,7 @@ import (
const (
runtimeAPIVersion = "v1alpha1"
shutdownFile = "/var/lib/ocid/ocid.shutdown"
shutdownFile = "/var/lib/crio/crio.shutdown"
)
// streamService implements streaming.Runtime.
@ -87,10 +87,10 @@ func (s *Server) loadContainer(id string) error {
return err
}
labels := make(map[string]string)
if err = json.Unmarshal([]byte(m.Annotations["ocid/labels"]), &labels); err != nil {
if err = json.Unmarshal([]byte(m.Annotations["crio/labels"]), &labels); err != nil {
return err
}
name := m.Annotations["ocid/name"]
name := m.Annotations["crio/name"]
name, err = s.reserveContainerName(id, name)
if err != nil {
return err
@ -103,16 +103,16 @@ func (s *Server) loadContainer(id string) error {
}()
var metadata pb.ContainerMetadata
if err = json.Unmarshal([]byte(m.Annotations["ocid/metadata"]), &metadata); err != nil {
if err = json.Unmarshal([]byte(m.Annotations["crio/metadata"]), &metadata); err != nil {
return err
}
sb := s.getSandbox(m.Annotations["ocid/sandbox_id"])
sb := s.getSandbox(m.Annotations["crio/sandbox_id"])
if sb == nil {
return fmt.Errorf("could not get sandbox with id %s, skipping", m.Annotations["ocid/sandbox_id"])
return fmt.Errorf("could not get sandbox with id %s, skipping", m.Annotations["crio/sandbox_id"])
}
var tty bool
if v := m.Annotations["ocid/tty"]; v == "true" {
if v := m.Annotations["crio/tty"]; v == "true" {
tty = true
}
containerPath, err := s.store.GetContainerRunDirectory(id)
@ -121,7 +121,7 @@ func (s *Server) loadContainer(id string) error {
}
var img *pb.ImageSpec
image, ok := m.Annotations["ocid/image"]
image, ok := m.Annotations["crio/image"]
if ok {
img = &pb.ImageSpec{
Image: image,
@ -129,11 +129,11 @@ func (s *Server) loadContainer(id string) error {
}
annotations := make(map[string]string)
if err = json.Unmarshal([]byte(m.Annotations["ocid/annotations"]), &annotations); err != nil {
if err = json.Unmarshal([]byte(m.Annotations["crio/annotations"]), &annotations); err != nil {
return err
}
ctr, err := oci.NewContainer(id, name, containerPath, m.Annotations["ocid/log_path"], sb.netNs(), labels, annotations, img, &metadata, sb.id, tty, sb.privileged)
ctr, err := oci.NewContainer(id, name, containerPath, m.Annotations["crio/log_path"], sb.netNs(), labels, annotations, img, &metadata, sb.id, tty, sb.privileged)
if err != nil {
return err
}
@ -170,10 +170,10 @@ func (s *Server) loadSandbox(id string) error {
return err
}
labels := make(map[string]string)
if err = json.Unmarshal([]byte(m.Annotations["ocid/labels"]), &labels); err != nil {
if err = json.Unmarshal([]byte(m.Annotations["crio/labels"]), &labels); err != nil {
return err
}
name := m.Annotations["ocid/name"]
name := m.Annotations["crio/name"]
name, err = s.reservePodName(id, name)
if err != nil {
return err
@ -184,7 +184,7 @@ func (s *Server) loadSandbox(id string) error {
}
}()
var metadata pb.PodSandboxMetadata
if err = json.Unmarshal([]byte(m.Annotations["ocid/metadata"]), &metadata); err != nil {
if err = json.Unmarshal([]byte(m.Annotations["crio/metadata"]), &metadata); err != nil {
return err
}
@ -194,26 +194,26 @@ func (s *Server) loadSandbox(id string) error {
}
annotations := make(map[string]string)
if err = json.Unmarshal([]byte(m.Annotations["ocid/annotations"]), &annotations); err != nil {
if err = json.Unmarshal([]byte(m.Annotations["crio/annotations"]), &annotations); err != nil {
return err
}
privileged := m.Annotations["ocid/privileged_runtime"] == "true"
privileged := m.Annotations["crio/privileged_runtime"] == "true"
sb := &sandbox{
id: id,
name: name,
kubeName: m.Annotations["ocid/kube_name"],
logDir: filepath.Dir(m.Annotations["ocid/log_path"]),
kubeName: m.Annotations["crio/kube_name"],
logDir: filepath.Dir(m.Annotations["crio/log_path"]),
labels: labels,
containers: oci.NewMemoryStore(),
processLabel: processLabel,
mountLabel: mountLabel,
annotations: annotations,
metadata: &metadata,
shmPath: m.Annotations["ocid/shm_path"],
shmPath: m.Annotations["crio/shm_path"],
privileged: privileged,
resolvPath: m.Annotations["ocid/resolv_path"],
resolvPath: m.Annotations["crio/resolv_path"],
}
// We add a netNS only if we can load a permanent one.
@ -244,7 +244,7 @@ func (s *Server) loadSandbox(id string) error {
return err
}
cname, err := s.reserveContainerName(m.Annotations["ocid/container_id"], m.Annotations["ocid/container_name"])
cname, err := s.reserveContainerName(m.Annotations["crio/container_id"], m.Annotations["crio/container_name"])
if err != nil {
return err
}
@ -254,7 +254,7 @@ func (s *Server) loadSandbox(id string) error {
}
}()
scontainer, err := oci.NewContainer(m.Annotations["ocid/container_id"], cname, sandboxPath, m.Annotations["ocid/log_path"], sb.netNs(), labels, annotations, nil, nil, id, false, privileged)
scontainer, err := oci.NewContainer(m.Annotations["crio/container_id"], cname, sandboxPath, m.Annotations["crio/log_path"], sb.netNs(), labels, annotations, nil, nil, id, false, privileged)
if err != nil {
return err
}

View file

@ -79,7 +79,7 @@ make localintegration RUNTIME=cc-oci-runtime
## Writing integration tests
[Helper functions]
(https://github.com/kubernetes-incubator/ocid/blob/master/test/helpers.bash)
(https://github.com/kubernetes-incubator/crio/blob/master/test/helpers.bash)
are provided in order to facilitate writing tests.
```sh
@ -97,9 +97,9 @@ function teardown() {
cleanup_test
}
@test "ocic runtimeversion" {
start_ocid
ocic runtimeversion
@test "crioctl runtimeversion" {
start_crio
crioctl runtimeversion
[ "$status" -eq 0 ]
}

View file

@ -15,31 +15,31 @@ function teardown() {
skip "skip this test since apparmor is not enabled."
fi
start_ocid
start_crio
sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname1": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor1.json
run ocic pod run --name apparmor1 --config "$TESTDIR"/apparmor1.json
run crioctl pod run --name apparmor1 --config "$TESTDIR"/apparmor1.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --name testname1 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --name testname1 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" touch test.txt
run crioctl ctr execsync --id "$ctr_id" touch test.txt
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
# 2. test running with loading a specific apparmor profile as ocid default apparmor profile.
# test that we can run with a specific apparmor profile which will block touching a file in `.` as ocid default apparmor profile.
# 2. test running with loading a specific apparmor profile as crio default apparmor profile.
# test that we can run with a specific apparmor profile which will block touching a file in `.` as crio default apparmor profile.
@test "load a specific apparmor profile as default apparmor and run a container with it" {
# this test requires apparmor, so skip this test if apparmor is not enabled.
enabled=$(is_apparmor_enabled)
@ -48,31 +48,31 @@ function teardown() {
fi
load_apparmor_profile "$APPARMOR_TEST_PROFILE_PATH"
start_ocid "" "$APPARMOR_TEST_PROFILE_NAME"
start_crio "" "$APPARMOR_TEST_PROFILE_NAME"
sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname2": "apparmor-test-deny-write"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor2.json
run ocic pod run --name apparmor2 --config "$TESTDIR"/apparmor2.json
run crioctl pod run --name apparmor2 --config "$TESTDIR"/apparmor2.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --name testname2 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --name testname2 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" touch test.txt
run crioctl ctr execsync --id "$ctr_id" touch test.txt
echo "$output"
[ "$status" -ne 0 ]
[[ "$output" =~ "Permission denied" ]]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
remove_apparmor_profile "$APPARMOR_TEST_PROFILE_PATH"
}
# 3. test running with loading a specific apparmor profile but not as ocid default apparmor profile.
# 3. test running with loading a specific apparmor profile but not as crio default apparmor profile.
# test that we can run with a specific apparmor profile which will block touching a file in `.`
@test "load default apparmor profile and run a container with another apparmor profile" {
# this test requires apparmor, so skip this test if apparmor is not enabled.
@ -82,27 +82,27 @@ function teardown() {
fi
load_apparmor_profile "$APPARMOR_TEST_PROFILE_PATH"
start_ocid
start_crio
sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname3": "apparmor-test-deny-write"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor3.json
run ocic pod run --name apparmor3 --config "$TESTDIR"/apparmor3.json
run crioctl pod run --name apparmor3 --config "$TESTDIR"/apparmor3.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --name testname3 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --name testname3 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" touch test.txt
run crioctl ctr execsync --id "$ctr_id" touch test.txt
echo "$output"
[ "$status" -ne 0 ]
[[ "$output" =~ "Permission denied" ]]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
remove_apparmor_profile "$APPARMOR_TEST_PROFILE_PATH"
}
@ -115,15 +115,15 @@ function teardown() {
skip "skip this test since apparmor is not enabled."
fi
start_ocid
start_crio
sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname4": "not-exists"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor4.json
run ocic pod run --name apparmor4 --config "$TESTDIR"/apparmor4.json
run crioctl pod run --name apparmor4 --config "$TESTDIR"/apparmor4.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --name testname4 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --name testname4 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -ne 0 ]
[[ "$output" =~ "Creating container failed" ]]
@ -131,7 +131,7 @@ function teardown() {
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
# 5. test running with default apparmor profile unloaded.
@ -143,26 +143,26 @@ function teardown() {
skip "skip this test since apparmor is not enabled."
fi
start_ocid
start_crio
remove_apparmor_profile "$FAKE_OCID_DEFAULT_PROFILE_PATH"
sed -e 's/%VALUE%/,"container\.apparmor\.security\.beta\.kubernetes\.io\/testname5": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/apparmor5.json
run ocic pod run --name apparmor5 --config "$TESTDIR"/apparmor5.json
run crioctl pod run --name apparmor5 --config "$TESTDIR"/apparmor5.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --name testname5 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --name testname5 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" touch test.txt
run crioctl ctr execsync --id "$ctr_id" touch test.txt
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}

View file

@ -7,125 +7,125 @@ function teardown() {
}
@test "ctr remove" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr start --id "$ctr_id"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr remove --id "$ctr_id"
run crioctl ctr remove --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic pod stop --id "$pod_id"
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic pod remove --id "$pod_id"
run crioctl pod remove --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "ctr lifecycle" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic pod list
run crioctl pod list
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr list
run crioctl ctr list
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr status --id "$ctr_id"
run crioctl ctr status --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr start --id "$ctr_id"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr status --id "$ctr_id"
run crioctl ctr status --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr list
run crioctl ctr list
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr stop --id "$ctr_id"
run crioctl ctr stop --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr status --id "$ctr_id"
run crioctl ctr status --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr list
run crioctl ctr list
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr remove --id "$ctr_id"
run crioctl ctr remove --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr list
run crioctl ctr list
echo "$output"
[ "$status" -eq 0 ]
run ocic pod stop --id "$pod_id"
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic pod list
run crioctl pod list
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr list
run crioctl ctr list
echo "$output"
[ "$status" -eq 0 ]
run ocic pod remove --id "$pod_id"
run crioctl pod remove --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic pod list
run crioctl pod list
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr list
run crioctl ctr list
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "ctr logging" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic pod list
run crioctl pod list
echo "$output"
[ "$status" -eq 0 ]
# Create a new container.
newconfig=$(mktemp --tmpdir ocid-config.XXXXXX.json)
newconfig=$(mktemp --tmpdir crio-config.XXXXXX.json)
cp "$TESTDATA"/container_config_logging.json "$newconfig"
sed -i 's|"%shellcommand%"|"echo here is some output \&\& echo and some from stderr >\&2"|' "$newconfig"
run ocic ctr create --config "$newconfig" --pod "$pod_id"
run crioctl ctr create --config "$newconfig" --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr start --id "$ctr_id"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr stop --id "$ctr_id"
run crioctl ctr stop --id "$ctr_id"
echo "$output"
# Ignore errors on stop.
run ocic ctr status --id "$ctr_id"
run crioctl ctr status --id "$ctr_id"
[ "$status" -eq 0 ]
run ocic ctr remove --id "$ctr_id"
run crioctl ctr remove --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
@ -136,46 +136,46 @@ function teardown() {
grep -E "^[^\n]+ stdout here is some output$" "$logpath"
grep -E "^[^\n]+ stderr and some from stderr$" "$logpath"
run ocic pod stop --id "$pod_id"
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic pod remove --id "$pod_id"
run crioctl pod remove --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "ctr logging [tty=true]" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic pod list
run crioctl pod list
echo "$output"
[ "$status" -eq 0 ]
# Create a new container.
newconfig=$(mktemp --tmpdir ocid-config.XXXXXX.json)
newconfig=$(mktemp --tmpdir crio-config.XXXXXX.json)
cp "$TESTDATA"/container_config_logging.json "$newconfig"
sed -i 's|"%shellcommand%"|"echo here is some output"|' "$newconfig"
sed -i 's|"tty": false,|"tty": true,|' "$newconfig"
run ocic ctr create --config "$newconfig" --pod "$pod_id"
run crioctl ctr create --config "$newconfig" --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr start --id "$ctr_id"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr stop --id "$ctr_id"
run crioctl ctr stop --id "$ctr_id"
echo "$output"
# Ignore errors on stop.
run ocic ctr status --id "$ctr_id"
run crioctl ctr status --id "$ctr_id"
[ "$status" -eq 0 ]
run ocic ctr remove --id "$ctr_id"
run crioctl ctr remove --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
@ -185,211 +185,211 @@ function teardown() {
echo "$logpath :: $(cat "$logpath")"
grep -E "^[^\n]+ stdout here is some output$" "$logpath"
run ocic pod stop --id "$pod_id"
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic pod remove --id "$pod_id"
run crioctl pod remove --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
# regression test for #127
@test "ctrs status for a pod" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr list --quiet
run crioctl ctr list --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "${output}" != "" ]]
printf '%s\n' "$output" | while IFS= read -r id
do
run ocic ctr status --id "$id"
run crioctl ctr status --id "$id"
echo "$output"
[ "$status" -eq 0 ]
done
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "ctr list filtering" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json --name pod1
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json --name pod1
echo "$output"
[ "$status" -eq 0 ]
pod1_id="$output"
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod1_id"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod1_id"
echo "$output"
[ "$status" -eq 0 ]
ctr1_id="$output"
run ocic ctr start --id "$ctr1_id"
run crioctl ctr start --id "$ctr1_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic pod run --config "$TESTDATA"/sandbox_config.json --name pod2
run crioctl pod run --config "$TESTDATA"/sandbox_config.json --name pod2
echo "$output"
[ "$status" -eq 0 ]
pod2_id="$output"
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod2_id"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod2_id"
echo "$output"
[ "$status" -eq 0 ]
ctr2_id="$output"
run ocic pod run --config "$TESTDATA"/sandbox_config.json --name pod3
run crioctl pod run --config "$TESTDATA"/sandbox_config.json --name pod3
echo "$output"
[ "$status" -eq 0 ]
pod3_id="$output"
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod3_id"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod3_id"
echo "$output"
[ "$status" -eq 0 ]
ctr3_id="$output"
run ocic ctr start --id "$ctr3_id"
run crioctl ctr start --id "$ctr3_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr stop --id "$ctr3_id"
run crioctl ctr stop --id "$ctr3_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr list --id "$ctr1_id" --quiet
run crioctl ctr list --id "$ctr1_id" --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$ctr1_id" ]]
run ocic ctr list --id "${ctr1_id:0:4}" --quiet
run crioctl ctr list --id "${ctr1_id:0:4}" --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$ctr1_id" ]]
run ocic ctr list --id "$ctr2_id" --pod "$pod2_id" --quiet
run crioctl ctr list --id "$ctr2_id" --pod "$pod2_id" --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$ctr2_id" ]]
run ocic ctr list --id "$ctr2_id" --pod "$pod3_id" --quiet
run crioctl ctr list --id "$ctr2_id" --pod "$pod3_id" --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" == "" ]]
run ocic ctr list --state created --quiet
run crioctl ctr list --state created --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$ctr2_id" ]]
run ocic ctr list --state running --quiet
run crioctl ctr list --state running --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$ctr1_id" ]]
run ocic ctr list --state stopped --quiet
run crioctl ctr list --state stopped --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$ctr3_id" ]]
run ocic ctr list --pod "$pod1_id" --quiet
run crioctl ctr list --pod "$pod1_id" --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$ctr1_id" ]]
run ocic ctr list --pod "$pod2_id" --quiet
run crioctl ctr list --pod "$pod2_id" --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$ctr2_id" ]]
run ocic ctr list --pod "$pod3_id" --quiet
run crioctl ctr list --pod "$pod3_id" --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$ctr3_id" ]]
run ocic pod remove --id "$pod1_id"
run crioctl pod remove --id "$pod1_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic pod remove --id "$pod2_id"
run crioctl pod remove --id "$pod2_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic pod remove --id "$pod3_id"
run crioctl pod remove --id "$pod3_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "ctr list label filtering" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" --name ctr1 --label "a=b" --label "c=d" --label "e=f"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" --name ctr1 --label "a=b" --label "c=d" --label "e=f"
echo "$output"
[ "$status" -eq 0 ]
ctr1_id="$output"
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" --name ctr2 --label "a=b" --label "c=d"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" --name ctr2 --label "a=b" --label "c=d"
echo "$output"
[ "$status" -eq 0 ]
ctr2_id="$output"
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" --name ctr3 --label "a=b"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id" --name ctr3 --label "a=b"
echo "$output"
[ "$status" -eq 0 ]
ctr3_id="$output"
run ocic ctr list --label "tier=backend" --label "a=b" --label "c=d" --label "e=f" --quiet
run crioctl ctr list --label "tier=backend" --label "a=b" --label "c=d" --label "e=f" --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$ctr1_id" ]]
run ocic ctr list --label "tier=frontend" --quiet
run crioctl ctr list --label "tier=frontend" --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" == "" ]]
run ocic ctr list --label "a=b" --label "c=d" --quiet
run crioctl ctr list --label "a=b" --label "c=d" --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$ctr1_id" ]]
[[ "$output" =~ "$ctr2_id" ]]
run ocic ctr list --label "a=b" --quiet
run crioctl ctr list --label "a=b" --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$ctr1_id" ]]
[[ "$output" =~ "$ctr2_id" ]]
[[ "$output" =~ "$ctr3_id" ]]
run ocic pod remove --id "$pod_id"
run crioctl pod remove --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "ctr metadata in list & status" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr list --id "$ctr_id"
run crioctl ctr list --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
# TODO: expected value should not hard coded here
[[ "$output" =~ "Name: container1" ]]
[[ "$output" =~ "Attempt: 1" ]]
run ocic ctr status --id "$ctr_id"
run crioctl ctr status --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
# TODO: expected value should not hard coded here
@ -398,163 +398,163 @@ function teardown() {
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "ctr execsync" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr start --id "$ctr_id"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" echo HELLO
run crioctl ctr execsync --id "$ctr_id" echo HELLO
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "HELLO" ]]
run ocic ctr execsync --id "$ctr_id" --timeout 1 sleep 10
run crioctl ctr execsync --id "$ctr_id" --timeout 1 sleep 10
echo "$output"
[[ "$output" =~ "command timed out" ]]
run ocic pod remove --id "$pod_id"
run crioctl pod remove --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "ctr device add" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --config "$TESTDATA"/container_redis_device.json --pod "$pod_id"
run crioctl ctr create --config "$TESTDATA"/container_redis_device.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr start --id "$ctr_id"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" ls /dev/mynull
run crioctl ctr execsync --id "$ctr_id" ls /dev/mynull
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "/dev/mynull" ]]
run ocic pod remove --id "$pod_id"
run crioctl pod remove --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "ctr execsync failure" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr start --id "$ctr_id"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" doesnotexist
run crioctl ctr execsync --id "$ctr_id" doesnotexist
echo "$output"
[ "$status" -ne 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "ctr execsync exit code" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr start --id "$ctr_id"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" false
run crioctl ctr execsync --id "$ctr_id" false
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "Exit code: 1" ]]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "ctr execsync std{out,err}" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr start --id "$ctr_id"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" "echo hello0 stdout"
run crioctl ctr execsync --id "$ctr_id" "echo hello0 stdout"
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" == *"$(printf "Stdout:\nhello0 stdout")"* ]]
run ocic ctr execsync --id "$ctr_id" "echo hello1 stderr >&2"
run crioctl ctr execsync --id "$ctr_id" "echo hello1 stderr >&2"
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" == *"$(printf "Stderr:\nhello1 stderr")"* ]]
run ocic ctr execsync --id "$ctr_id" "echo hello2 stderr >&2; echo hello3 stdout"
run crioctl ctr execsync --id "$ctr_id" "echo hello2 stderr >&2; echo hello3 stdout"
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" == *"$(printf "Stderr:\nhello2 stderr")"* ]]
[[ "$output" == *"$(printf "Stdout:\nhello3 stdout")"* ]]
run ocic pod remove --id "$pod_id"
run crioctl pod remove --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "ctr stop idempotent" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr start --id "$ctr_id"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr stop --id "$ctr_id"
run crioctl ctr stop --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr stop --id "$ctr_id"
run crioctl ctr stop --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "ctr caps drop" {

View file

@ -9,10 +9,10 @@ TESTDATA="${INTEGRATION_ROOT}/testdata"
# Root directory of the repository.
OCID_ROOT=${OCID_ROOT:-$(cd "$INTEGRATION_ROOT/../.."; pwd -P)}
# Path of the ocid binary.
OCID_BINARY=${OCID_BINARY:-${OCID_ROOT}/cri-o/ocid}
# Path of the ocic binary.
OCIC_BINARY=${OCIC_BINARY:-${OCID_ROOT}/cri-o/ocic}
# Path of the crio binary.
OCID_BINARY=${OCID_BINARY:-${OCID_ROOT}/cri-o/crio}
# Path of the crioctl binary.
OCIC_BINARY=${OCIC_BINARY:-${OCID_ROOT}/cri-o/crioctl}
# Path of the conmon binary.
CONMON_BINARY=${CONMON_BINARY:-${OCID_ROOT}/cri-o/conmon/conmon}
# Path of the pause binary.
@ -20,7 +20,7 @@ PAUSE_BINARY=${PAUSE_BINARY:-${OCID_ROOT}/cri-o/pause/pause}
# Path of the default seccomp profile.
SECCOMP_PROFILE=${SECCOMP_PROFILE:-${OCID_ROOT}/cri-o/seccomp.json}
# Name of the default apparmor profile.
APPARMOR_PROFILE=${APPARMOR_PROFILE:-ocid-default}
APPARMOR_PROFILE=${APPARMOR_PROFILE:-crio-default}
# Runtime
RUNTIME=${RUNTIME:-runc}
RUNTIME_PATH=$(command -v $RUNTIME || true)
@ -29,8 +29,8 @@ RUNTIME_BINARY=${RUNTIME_PATH:-/usr/local/sbin/runc}
APPARMOR_PARSER_BINARY=${APPARMOR_PARSER_BINARY:-/sbin/apparmor_parser}
# Path of the apparmor profile for test.
APPARMOR_TEST_PROFILE_PATH=${APPARMOR_TEST_PROFILE_PATH:-${TESTDATA}/apparmor_test_deny_write}
# Path of the apparmor profile for unloading ocid-default.
FAKE_OCID_DEFAULT_PROFILE_PATH=${FAKE_OCID_DEFAULT_PROFILE_PATH:-${TESTDATA}/fake_ocid_default}
# Path of the apparmor profile for unloading crio-default.
FAKE_OCID_DEFAULT_PROFILE_PATH=${FAKE_OCID_DEFAULT_PROFILE_PATH:-${TESTDATA}/fake_crio_default}
# Name of the apparmor profile for test.
APPARMOR_TEST_PROFILE_NAME=${APPARMOR_TEST_PROFILE_NAME:-apparmor-test-deny-write}
# Path of boot config.
@ -46,7 +46,7 @@ ARTIFACTS_PATH=${ARTIFACTS_PATH:-${OCID_ROOT}/cri-o/.artifacts}
# Path of the checkseccomp binary.
CHECKSECCOMP_BINARY=${CHECKSECCOMP_BINARY:-${OCID_ROOT}/cri-o/test/checkseccomp/checkseccomp}
# XXX: This is hardcoded inside cri-o at the moment.
DEFAULT_LOG_PATH=/var/log/ocid/pods
DEFAULT_LOG_PATH=/var/log/crio/pods
TESTDIR=$(mktemp -d)
if [ -e /usr/sbin/selinuxenabled ] && /usr/sbin/selinuxenabled; then
@ -54,8 +54,8 @@ if [ -e /usr/sbin/selinuxenabled ] && /usr/sbin/selinuxenabled; then
filelabel=$(awk -F'"' '/^file.*=.*/ {print $2}' /etc/selinux/${SELINUXTYPE}/contexts/lxc_contexts)
chcon -R ${filelabel} $TESTDIR
fi
OCID_SOCKET="$TESTDIR/ocid.sock"
OCID_CONFIG="$TESTDIR/ocid.conf"
OCID_SOCKET="$TESTDIR/crio.sock"
OCID_CONFIG="$TESTDIR/crio.conf"
OCID_CNI_CONFIG="$TESTDIR/cni/net.d/"
OCID_CNI_PLUGIN="/opt/cni/bin/"
POD_CIDR="10.88.0.0/16"
@ -85,14 +85,14 @@ if ! [ -d "$ARTIFACTS_PATH"/busybox-image ]; then
fi
fi
# Run ocid using the binary specified by $OCID_BINARY.
# This must ONLY be run on engines created with `start_ocid`.
function ocid() {
# Run crio using the binary specified by $OCID_BINARY.
# This must ONLY be run on engines created with `start_crio`.
function crio() {
"$OCID_BINARY" --listen "$OCID_SOCKET" "$@"
}
# Run ocic using the binary specified by $OCIC_BINARY.
function ocic() {
# Run crioctl using the binary specified by $OCIC_BINARY.
function crioctl() {
"$OCIC_BINARY" --connect "$OCID_SOCKET" "$@"
}
@ -122,13 +122,13 @@ function retry() {
false
}
# Waits until the given ocid becomes reachable.
# Waits until the given crio becomes reachable.
function wait_until_reachable() {
retry 15 1 ocic runtimeversion
retry 15 1 crioctl runtimeversion
}
# Start ocid.
function start_ocid() {
# Start crio.
function start_crio() {
if [[ -n "$1" ]]; then
seccomp="$1"
else
@ -141,12 +141,12 @@ function start_ocid() {
apparmor="$APPARMOR_PROFILE"
fi
# Don't forget: bin2img, copyimg, and ocid have their own default drivers, so if you override any, you probably need to override them all
# Don't forget: bin2img, copyimg, and crio have their own default drivers, so if you override any, you probably need to override them all
if ! [ "$3" = "--no-pause-image" ] ; then
"$BIN2IMG_BINARY" --root "$TESTDIR/ocid" $STORAGE_OPTS --runroot "$TESTDIR/ocid-run" --source-binary "$PAUSE_BINARY"
"$BIN2IMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --source-binary "$PAUSE_BINARY"
fi
"$COPYIMG_BINARY" --root "$TESTDIR/ocid" $STORAGE_OPTS --runroot "$TESTDIR/ocid-run" --image-name=redis:alpine --import-from=dir:"$ARTIFACTS_PATH"/redis-image --add-name=docker://docker.io/library/redis:alpine --signature-policy="$INTEGRATION_ROOT"/policy.json
"$OCID_BINARY" --conmon "$CONMON_BINARY" --listen "$OCID_SOCKET" --runtime "$RUNTIME_BINARY" --root "$TESTDIR/ocid" --runroot "$TESTDIR/ocid-run" $STORAGE_OPTS --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$OCID_CNI_CONFIG" --signature-policy "$INTEGRATION_ROOT"/policy.json --config /dev/null config >$OCID_CONFIG
"$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --image-name=redis:alpine --import-from=dir:"$ARTIFACTS_PATH"/redis-image --add-name=docker://docker.io/library/redis:alpine --signature-policy="$INTEGRATION_ROOT"/policy.json
"$OCID_BINARY" --conmon "$CONMON_BINARY" --listen "$OCID_SOCKET" --runtime "$RUNTIME_BINARY" --root "$TESTDIR/crio" --runroot "$TESTDIR/crio-run" $STORAGE_OPTS --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$OCID_CNI_CONFIG" --signature-policy "$INTEGRATION_ROOT"/policy.json --config /dev/null config >$OCID_CONFIG
# Prepare the CNI configuration files, we're running with non host networking by default
if [[ -n "$4" ]]; then
@ -159,58 +159,58 @@ function start_ocid() {
"$OCID_BINARY" --debug --config "$OCID_CONFIG" & OCID_PID=$!
wait_until_reachable
run ocic image status --id=redis:alpine
run crioctl image status --id=redis:alpine
if [ "$status" -ne 0 ] ; then
ocic image pull redis:alpine
crioctl image pull redis:alpine
fi
REDIS_IMAGEID=$(ocic image status --id=redis:alpine | head -1 | sed -e "s/ID: //g")
run ocic image status --id=busybox
REDIS_IMAGEID=$(crioctl image status --id=redis:alpine | head -1 | sed -e "s/ID: //g")
run crioctl image status --id=busybox
if [ "$status" -ne 0 ] ; then
ocic image pull busybox:latest
crioctl image pull busybox:latest
fi
BUSYBOX_IMAGEID=$(ocic image status --id=busybox | head -1 | sed -e "s/ID: //g")
BUSYBOX_IMAGEID=$(crioctl image status --id=busybox | head -1 | sed -e "s/ID: //g")
}
function cleanup_ctrs() {
run ocic ctr list --quiet
run crioctl ctr list --quiet
if [ "$status" -eq 0 ]; then
if [ "$output" != "" ]; then
printf '%s\n' "$output" | while IFS= read -r line
do
ocic ctr stop --id "$line" || true
ocic ctr remove --id "$line"
crioctl ctr stop --id "$line" || true
crioctl ctr remove --id "$line"
done
fi
fi
}
function cleanup_images() {
run ocic image list --quiet
run crioctl image list --quiet
if [ "$status" -eq 0 ]; then
if [ "$output" != "" ]; then
printf '%s\n' "$output" | while IFS= read -r line
do
ocic image remove --id "$line"
crioctl image remove --id "$line"
done
fi
fi
}
function cleanup_pods() {
run ocic pod list --quiet
run crioctl pod list --quiet
if [ "$status" -eq 0 ]; then
if [ "$output" != "" ]; then
printf '%s\n' "$output" | while IFS= read -r line
do
ocic pod stop --id "$line" || true
ocic pod remove --id "$line"
crioctl pod stop --id "$line" || true
crioctl pod remove --id "$line"
done
fi
fi
}
# Stop ocid.
function stop_ocid() {
# Stop crio.
function stop_crio() {
if [ "$OCID_PID" != "" ]; then
kill "$OCID_PID" >/dev/null 2>&1
wait "$OCID_PID"
@ -220,13 +220,13 @@ function stop_ocid() {
cleanup_network_conf
}
function restart_ocid() {
function restart_crio() {
if [ "$OCID_PID" != "" ]; then
kill "$OCID_PID" >/dev/null 2>&1
wait "$OCID_PID"
start_ocid
start_crio
else
echo "you must start ocid first"
echo "you must start crio first"
exit 1
fi
}
@ -265,10 +265,10 @@ function is_apparmor_enabled() {
function prepare_network_conf() {
mkdir -p $OCID_CNI_CONFIG
cat >$OCID_CNI_CONFIG/10-ocid.conf <<-EOF
cat >$OCID_CNI_CONFIG/10-crio.conf <<-EOF
{
"cniVersion": "0.2.0",
"name": "ocidnet",
"name": "crionet",
"type": "bridge",
"bridge": "cni0",
"isGateway": true,
@ -298,7 +298,7 @@ function prepare_plugin_test_args_network_conf() {
cat >$OCID_CNI_CONFIG/10-plugin-test-args.conf <<-EOF
{
"cniVersion": "0.2.0",
"name": "ocidnet",
"name": "crionet",
"type": "plugin_test_args.bash"
}
EOF
@ -307,7 +307,7 @@ EOF
}
function check_pod_cidr() {
fullnetns=`ocic pod status --id $1 | grep namespace | cut -d ' ' -f 3`
fullnetns=`crioctl pod status --id $1 | grep namespace | cut -d ' ' -f 3`
netns=`basename $fullnetns`
run ip netns exec $netns ip addr show dev eth0 scope global 2>&1
@ -328,7 +328,7 @@ function parse_pod_ip() {
}
function ping_pod() {
netns=`ocic pod status --id $1 | grep namespace | cut -d ' ' -f 3`
netns=`crioctl pod status --id $1 | grep namespace | cut -d ' ' -f 3`
inet=`ip netns exec \`basename $netns\` ip addr show dev eth0 scope global | grep inet`
IFS=" "
@ -340,8 +340,8 @@ function ping_pod() {
}
function ping_pod_from_pod() {
pod_ip=`ocic pod status --id $1 | grep "IP Address" | cut -d ' ' -f 3`
netns=`ocic pod status --id $2 | grep namespace | cut -d ' ' -f 3`
pod_ip=`crioctl pod status --id $1 | grep "IP Address" | cut -d ' ' -f 3`
netns=`crioctl pod status --id $2 | grep namespace | cut -d ' ' -f 3`
ip netns exec `basename $netns` ping -W 1 -c 2 $pod_ip

View file

@ -9,73 +9,73 @@ function teardown() {
}
@test "run container in pod with image ID" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
sed -e "s/%VALUE%/$REDIS_IMAGEID/g" "$TESTDATA"/container_config_by_imageid.json > "$TESTDIR"/ctr_by_imageid.json
run ocic ctr create --config "$TESTDIR"/ctr_by_imageid.json --pod "$pod_id"
run crioctl ctr create --config "$TESTDIR"/ctr_by_imageid.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "image pull" {
start_ocid "" "" --no-pause-image
run ocic image pull "$IMAGE"
start_crio "" "" --no-pause-image
run crioctl image pull "$IMAGE"
echo "$output"
[ "$status" -eq 0 ]
cleanup_images
stop_ocid
stop_crio
}
@test "image pull and list by digest" {
start_ocid "" "" --no-pause-image
run ocic image pull nginx@sha256:4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
start_crio "" "" --no-pause-image
run crioctl image pull nginx@sha256:4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
echo "$output"
[ "$status" -eq 0 ]
run ocic image list --quiet nginx@sha256:4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
run crioctl image list --quiet nginx@sha256:4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
[ "$status" -eq 0 ]
echo "$output"
[ "$output" != "" ]
run ocic image list --quiet nginx@4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
run crioctl image list --quiet nginx@4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
[ "$status" -eq 0 ]
echo "$output"
[ "$output" != "" ]
run ocic image list --quiet @4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
run crioctl image list --quiet @4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
[ "$status" -eq 0 ]
echo "$output"
[ "$output" != "" ]
run ocic image list --quiet 4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
run crioctl image list --quiet 4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
[ "$status" -eq 0 ]
echo "$output"
[ "$output" != "" ]
cleanup_images
stop_ocid
stop_crio
}
@test "image list with filter" {
start_ocid "" "" --no-pause-image
run ocic image pull "$IMAGE"
start_crio "" "" --no-pause-image
run crioctl image pull "$IMAGE"
echo "$output"
[ "$status" -eq 0 ]
run ocic image list --quiet "$IMAGE"
run crioctl image list --quiet "$IMAGE"
echo "$output"
[ "$status" -eq 0 ]
printf '%s\n' "$output" | while IFS= read -r id; do
run ocic image remove --id "$id"
run crioctl image remove --id "$id"
echo "$output"
[ "$status" -eq 0 ]
done
run ocic image list --quiet
run crioctl image list --quiet
echo "$output"
[ "$status" -eq 0 ]
printf '%s\n' "$output" | while IFS= read -r id; do
@ -83,24 +83,24 @@ function teardown() {
status=1
done
cleanup_images
stop_ocid
stop_crio
}
@test "image list/remove" {
start_ocid "" "" --no-pause-image
run ocic image pull "$IMAGE"
start_crio "" "" --no-pause-image
run crioctl image pull "$IMAGE"
echo "$output"
[ "$status" -eq 0 ]
run ocic image list --quiet
run crioctl image list --quiet
echo "$output"
[ "$status" -eq 0 ]
[ "$output" != "" ]
printf '%s\n' "$output" | while IFS= read -r id; do
run ocic image remove --id "$id"
run crioctl image remove --id "$id"
echo "$output"
[ "$status" -eq 0 ]
done
run ocic image list --quiet
run crioctl image list --quiet
echo "$output"
[ "$status" -eq 0 ]
[ "$output" = "" ]
@ -109,28 +109,28 @@ function teardown() {
status=1
done
cleanup_images
stop_ocid
stop_crio
}
@test "image status/remove" {
start_ocid "" "" --no-pause-image
run ocic image pull "$IMAGE"
start_crio "" "" --no-pause-image
run crioctl image pull "$IMAGE"
echo "$output"
[ "$status" -eq 0 ]
run ocic image list --quiet
run crioctl image list --quiet
echo "$output"
[ "$status" -eq 0 ]
[ "$output" != "" ]
printf '%s\n' "$output" | while IFS= read -r id; do
run ocic image status --id "$id"
run crioctl image status --id "$id"
echo "$output"
[ "$status" -eq 0 ]
[ "$output" != "" ]
run ocic image remove --id "$id"
run crioctl image remove --id "$id"
echo "$output"
[ "$status" -eq 0 ]
done
run ocic image list --quiet
run crioctl image list --quiet
echo "$output"
[ "$status" -eq 0 ]
[ "$output" = "" ]
@ -139,5 +139,5 @@ function teardown() {
status=1
done
cleanup_images
stop_ocid
stop_crio
}

View file

@ -3,8 +3,8 @@
load helpers
@test "Check for valid pod netns CIDR" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
@ -12,12 +12,12 @@ load helpers
check_pod_cidr $pod_id
cleanup_pods
stop_ocid
stop_crio
}
@test "Ping pod from the host" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
@ -25,19 +25,19 @@ load helpers
ping_pod $pod_id
cleanup_pods
stop_ocid
stop_crio
}
@test "Ping pod from another pod" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod1_id="$output"
temp_sandbox_conf cni_test
run ocic pod run --config "$TESTDIR"/sandbox_config_cni_test.json
run crioctl pod run --config "$TESTDIR"/sandbox_config_cni_test.json
echo "$output"
[ "$status" -eq 0 ]
pod2_id="$output"
@ -49,21 +49,21 @@ load helpers
[ "$status" -eq 0 ]
cleanup_pods
stop_ocid
stop_crio
}
@test "Ensure correct CNI plugin namespace/name/container-id arguments" {
start_ocid "" "" "" "prepare_plugin_test_args_network_conf"
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio "" "" "" "prepare_plugin_test_args_network_conf"
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
[ "$status" -eq 0 ]
. /tmp/plugin_test_args.out
[ "$FOUND_CNI_CONTAINERID" != "redhat.test.ocid" ]
[ "$FOUND_CNI_CONTAINERID" != "redhat.test.crio" ]
[ "$FOUND_CNI_CONTAINERID" != "podsandbox1" ]
[ "$FOUND_K8S_POD_NAMESPACE" = "redhat.test.ocid" ]
[ "$FOUND_K8S_POD_NAMESPACE" = "redhat.test.crio" ]
[ "$FOUND_K8S_POD_NAME" = "podsandbox1" ]
cleanup_pods
stop_ocid
stop_crio
}

View file

@ -8,282 +8,282 @@ function teardown() {
# PR#59
@test "pod release name on remove" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
id="$output"
run ocic pod stop --id "$id"
run crioctl pod stop --id "$id"
echo "$output"
[ "$status" -eq 0 ]
echo "$output"
run ocic pod remove --id "$id"
run crioctl pod remove --id "$id"
echo "$output"
[ "$status" -eq 0 ]
run ocic pod run --config "$TESTDATA"/sandbox_config.json
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
id="$output"
run ocic pod stop --id "$id"
run crioctl pod stop --id "$id"
echo "$output"
[ "$status" -eq 0 ]
run ocic pod remove --id "$id"
run crioctl pod remove --id "$id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "pod remove" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr start --id "$ctr_id"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic pod remove --id "$pod_id"
run crioctl pod remove --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "pod list filtering" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json -name pod1 --label "a=b" --label "c=d" --label "e=f"
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json -name pod1 --label "a=b" --label "c=d" --label "e=f"
echo "$output"
[ "$status" -eq 0 ]
pod1_id="$output"
run ocic pod run --config "$TESTDATA"/sandbox_config.json -name pod2 --label "a=b" --label "c=d"
run crioctl pod run --config "$TESTDATA"/sandbox_config.json -name pod2 --label "a=b" --label "c=d"
echo "$output"
[ "$status" -eq 0 ]
pod2_id="$output"
run ocic pod run --config "$TESTDATA"/sandbox_config.json -name pod3 --label "a=b"
run crioctl pod run --config "$TESTDATA"/sandbox_config.json -name pod3 --label "a=b"
echo "$output"
[ "$status" -eq 0 ]
pod3_id="$output"
run ocic pod list --label "a=b" --label "c=d" --label "e=f" --quiet
run crioctl pod list --label "a=b" --label "c=d" --label "e=f" --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$pod1_id" ]]
run ocic pod list --label "g=h" --quiet
run crioctl pod list --label "g=h" --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" == "" ]]
run ocic pod list --label "a=b" --label "c=d" --quiet
run crioctl pod list --label "a=b" --label "c=d" --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$pod1_id" ]]
[[ "$output" =~ "$pod2_id" ]]
run ocic pod list --label "a=b" --quiet
run crioctl pod list --label "a=b" --quiet
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$pod1_id" ]]
[[ "$output" =~ "$pod2_id" ]]
[[ "$output" =~ "$pod3_id" ]]
run ocic pod list --id "$pod1_id"
run crioctl pod list --id "$pod1_id"
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$pod1_id" ]]
# filter by truncated id should work as well
run ocic pod list --id "${pod1_id:0:4}"
run crioctl pod list --id "${pod1_id:0:4}"
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$pod1_id" ]]
run ocic pod list --id "$pod2_id"
run crioctl pod list --id "$pod2_id"
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$pod2_id" ]]
run ocic pod list --id "$pod3_id"
run crioctl pod list --id "$pod3_id"
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$pod3_id" ]]
run ocic pod list --id "$pod1_id" --label "a=b"
run crioctl pod list --id "$pod1_id" --label "a=b"
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$pod1_id" ]]
run ocic pod list --id "$pod2_id" --label "a=b"
run crioctl pod list --id "$pod2_id" --label "a=b"
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$pod2_id" ]]
run ocic pod list --id "$pod3_id" --label "a=b"
run crioctl pod list --id "$pod3_id" --label "a=b"
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" != "" ]]
[[ "$output" =~ "$pod3_id" ]]
run ocic pod list --id "$pod3_id" --label "c=d"
run crioctl pod list --id "$pod3_id" --label "c=d"
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" == "" ]]
run ocic pod remove --id "$pod1_id"
run crioctl pod remove --id "$pod1_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic pod remove --id "$pod2_id"
run crioctl pod remove --id "$pod2_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic pod remove --id "$pod3_id"
run crioctl pod remove --id "$pod3_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_pods
stop_ocid
stop_crio
}
@test "pod metadata in list & status" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic pod list --id "$pod_id"
run crioctl pod list --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
# TODO: expected value should not hard coded here
[[ "$output" =~ "Name: podsandbox1" ]]
[[ "$output" =~ "UID: redhat-test-ocid" ]]
[[ "$output" =~ "Namespace: redhat.test.ocid" ]]
[[ "$output" =~ "UID: redhat-test-crio" ]]
[[ "$output" =~ "Namespace: redhat.test.crio" ]]
[[ "$output" =~ "Attempt: 1" ]]
run ocic pod status --id "$pod_id"
run crioctl pod status --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
# TODO: expected value should not hard coded here
[[ "$output" =~ "Name: podsandbox1" ]]
[[ "$output" =~ "UID: redhat-test-ocid" ]]
[[ "$output" =~ "Namespace: redhat.test.ocid" ]]
[[ "$output" =~ "UID: redhat-test-crio" ]]
[[ "$output" =~ "Namespace: redhat.test.crio" ]]
[[ "$output" =~ "Attempt: 1" ]]
cleanup_pods
stop_ocid
stop_crio
}
@test "pass pod sysctls to runtime" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --pod "$pod_id" --config "$TESTDATA"/container_redis.json
run crioctl ctr create --pod "$pod_id" --config "$TESTDATA"/container_redis.json
echo "$output"
[ "$status" -eq 0 ]
container_id="$output"
run ocic ctr start --id "$container_id"
run crioctl ctr start --id "$container_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$container_id" sysctl kernel.shm_rmid_forced
run crioctl ctr execsync --id "$container_id" sysctl kernel.shm_rmid_forced
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "kernel.shm_rmid_forced = 1" ]]
run ocic ctr execsync --id "$container_id" sysctl kernel.msgmax
run crioctl ctr execsync --id "$container_id" sysctl kernel.msgmax
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "kernel.msgmax = 8192" ]]
run ocic ctr execsync --id "$container_id" sysctl net.ipv4.ip_local_port_range
run crioctl ctr execsync --id "$container_id" sysctl net.ipv4.ip_local_port_range
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "net.ipv4.ip_local_port_range = 1024 65000" ]]
cleanup_pods
stop_ocid
stop_crio
}
@test "pod stop idempotent" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic pod stop --id "$pod_id"
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic pod stop --id "$pod_id"
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "pod remove idempotent" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic pod remove --id "$pod_id"
run crioctl pod remove --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic pod remove --id "$pod_id"
run crioctl pod remove --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "pod stop idempotent with ctrs already stopped" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr start --id "$ctr_id"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic pod stop --id "$pod_id"
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
@test "restart ocid and still get pod status" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
@test "restart crio and still get pod status" {
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic pod stop --id "$pod_id"
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
restart_ocid
run ocic pod status --id "$pod_id"
restart_crio
run crioctl pod status --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
[ "$output" != "" ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}

View file

@ -6,74 +6,74 @@ function teardown() {
cleanup_test
}
@test "ocid restore" {
start_ocid
run ocic pod run --config "$TESTDATA"/sandbox_config.json
@test "crio restore" {
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic pod list --id "$pod_id"
run crioctl pod list --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
pod_list_info="$output"
run ocic pod status --id "$pod_id"
run crioctl pod status --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
pod_status_info="$output"
run ocic ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr list --id "$ctr_id"
run crioctl ctr list --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_list_info="$output"
run ocic ctr status --id "$ctr_id"
run crioctl ctr status --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_status_info="$output"
stop_ocid
stop_crio
start_ocid
run ocic pod list
start_crio
run crioctl pod list
echo "$output"
[ "$status" -eq 0 ]
[[ "${output}" != "" ]]
[[ "${output}" =~ "${pod_id}" ]]
run ocic pod list --id "$pod_id"
run crioctl pod list --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
[[ "${output}" == "${pod_list_info}" ]]
run ocic pod status --id "$pod_id"
run crioctl pod status --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
[[ "${output}" == "${pod_status_info}" ]]
run ocic ctr list
run crioctl ctr list
echo "$output"
[ "$status" -eq 0 ]
[[ "${output}" != "" ]]
[[ "${output}" =~ "${pod_id}" ]]
run ocic ctr list --id "$ctr_id"
run crioctl ctr list --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
[[ "${output}" == "${ctr_list_info}" ]]
run ocic ctr status --id "$ctr_id"
run crioctl ctr status --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
[[ "${output}" == "${ctr_status_info}" ]]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}

View file

@ -6,10 +6,10 @@ function teardown() {
cleanup_test
}
@test "ocic runtimeversion" {
start_ocid
run ocic runtimeversion
@test "crioctl runtimeversion" {
start_crio
run crioctl runtimeversion
echo "$output"
[ "$status" -eq 0 ]
stop_ocid
stop_crio
}

View file

@ -19,27 +19,27 @@ function teardown() {
sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
start_ocid "$TESTDIR"/seccomp_profile1.json
start_crio "$TESTDIR"/seccomp_profile1.json
sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/container\/redhat\.test\.ocid-seccomp1-1-testname-0": "unconfined"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp1.json
run ocic pod run --name seccomp1 --config "$TESTDIR"/seccomp1.json
sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/container\/redhat\.test\.crio-seccomp1-1-testname-0": "unconfined"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp1.json
run crioctl pod run --name seccomp1 --config "$TESTDIR"/seccomp1.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --name testname --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --name testname --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr start --id "$ctr_id"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" chmod 777 .
run crioctl ctr execsync --id "$ctr_id" chmod 777 .
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
# 2. test running with ctr runtime/default
@ -55,21 +55,21 @@ function teardown() {
sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
start_ocid "$TESTDIR"/seccomp_profile1.json
start_crio "$TESTDIR"/seccomp_profile1.json
sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/container\/redhat\.test\.ocid-seccomp2-1-testname2-0": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp2.json
run ocic pod run --name seccomp2 --config "$TESTDIR"/seccomp2.json
sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/container\/redhat\.test\.crio-seccomp2-1-testname2-0": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp2.json
run crioctl pod run --name seccomp2 --config "$TESTDIR"/seccomp2.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --name testname2 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --name testname2 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr start --id "$ctr_id"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" chmod 777 .
run crioctl ctr execsync --id "$ctr_id" chmod 777 .
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "Exit code: 1" ]]
@ -77,7 +77,7 @@ function teardown() {
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
# 3. test running with ctr wrong profile name
@ -92,14 +92,14 @@ function teardown() {
sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
start_ocid "$TESTDIR"/seccomp_profile1.json
start_crio "$TESTDIR"/seccomp_profile1.json
sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/container\/redhat\.test\.ocid-seccomp3-1-testname3-1": "notgood"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp3.json
run ocic pod run --name seccomp3 --config "$TESTDIR"/seccomp3.json
sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/container\/redhat\.test\.crio-seccomp3-1-testname3-1": "notgood"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp3.json
run crioctl pod run --name seccomp3 --config "$TESTDIR"/seccomp3.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --name testname3 --config "$TESTDATA"/container_config.json --pod "$pod_id"
run crioctl ctr create --name testname3 --config "$TESTDATA"/container_config.json --pod "$pod_id"
echo "$output"
[ "$status" -ne 0 ]
[[ "$output" =~ "unknown seccomp profile option:" ]]
@ -107,7 +107,7 @@ function teardown() {
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
# TODO(runcom): need https://issues.k8s.io/36997
@ -123,7 +123,7 @@ function teardown() {
#sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
#sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
#start_ocid "$TESTDIR"/seccomp_profile1.json
#start_crio "$TESTDIR"/seccomp_profile1.json
skip "need https://issues.k8s.io/36997"
}
@ -143,21 +143,21 @@ function teardown() {
sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
start_ocid "$TESTDIR"/seccomp_profile1.json
start_crio "$TESTDIR"/seccomp_profile1.json
sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/container\/redhat\.test\.ocid-seccomp2-1-testname2-0-not-exists": "unconfined", "security\.alpha\.kubernetes\.io\/seccomp\/pod": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp5.json
run ocic pod run --name seccomp5 --config "$TESTDIR"/seccomp5.json
sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/container\/redhat\.test\.crio-seccomp2-1-testname2-0-not-exists": "unconfined", "security\.alpha\.kubernetes\.io\/seccomp\/pod": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp5.json
run crioctl pod run --name seccomp5 --config "$TESTDIR"/seccomp5.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr start --id "$ctr_id"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" chmod 777 .
run crioctl ctr execsync --id "$ctr_id" chmod 777 .
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "Exit code: 1" ]]
@ -165,7 +165,7 @@ function teardown() {
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
# 6. test running with unkwown ctr profile and no pod, falls back to unconfined
@ -183,27 +183,27 @@ function teardown() {
sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
start_ocid "$TESTDIR"/seccomp_profile1.json
start_crio "$TESTDIR"/seccomp_profile1.json
sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/container\/redhat\.test\.ocid-seccomp6-1-testname6-0-not-exists": "runtime-default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp6.json
run ocic pod run --name seccomp6 --config "$TESTDIR"/seccomp6.json
sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/container\/redhat\.test\.crio-seccomp6-1-testname6-0-not-exists": "runtime-default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp6.json
run crioctl pod run --name seccomp6 --config "$TESTDIR"/seccomp6.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --name testname6 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --name testname6 --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr start --id "$ctr_id"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" chmod 777 .
run crioctl ctr execsync --id "$ctr_id" chmod 777 .
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
# 1. test running with pod unconfined
@ -219,27 +219,27 @@ function teardown() {
sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
start_ocid "$TESTDIR"/seccomp_profile1.json
start_crio "$TESTDIR"/seccomp_profile1.json
sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/pod": "unconfined"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp1.json
run ocic pod run --name seccomp1 --config "$TESTDIR"/seccomp1.json
run crioctl pod run --name seccomp1 --config "$TESTDIR"/seccomp1.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr start --id "$ctr_id"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" chmod 777 .
run crioctl ctr execsync --id "$ctr_id" chmod 777 .
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
# 2. test running with pod runtime/default
@ -255,21 +255,21 @@ function teardown() {
sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
start_ocid "$TESTDIR"/seccomp_profile1.json
start_crio "$TESTDIR"/seccomp_profile1.json
sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/pod": "runtime\/default"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp2.json
run ocic pod run --name seccomp2 --config "$TESTDIR"/seccomp2.json
run crioctl pod run --name seccomp2 --config "$TESTDIR"/seccomp2.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run ocic ctr start --id "$ctr_id"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ocic ctr execsync --id "$ctr_id" chmod 777 .
run crioctl ctr execsync --id "$ctr_id" chmod 777 .
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "Exit code: 1" ]]
@ -277,7 +277,7 @@ function teardown() {
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
# 3. test running with pod wrong profile name
@ -292,15 +292,15 @@ function teardown() {
sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
start_ocid "$TESTDIR"/seccomp_profile1.json
start_crio "$TESTDIR"/seccomp_profile1.json
# 3. test running with pod wrong profile name
sed -e 's/%VALUE%/,"security\.alpha\.kubernetes\.io\/seccomp\/pod": "notgood"/g' "$TESTDATA"/sandbox_config_seccomp.json > "$TESTDIR"/seccomp3.json
run ocic pod run --name seccomp3 --config "$TESTDIR"/seccomp3.json
run crioctl pod run --name seccomp3 --config "$TESTDIR"/seccomp3.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run ocic ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
echo "$output"
[ "$status" -ne 0 ]
[[ "$output" =~ "unknown seccomp profile option:" ]]
@ -308,7 +308,7 @@ function teardown() {
cleanup_ctrs
cleanup_pods
stop_ocid
stop_crio
}
# TODO(runcom): need https://issues.k8s.io/36997
@ -324,7 +324,7 @@ function teardown() {
#sed -i 's/"fchmod",//' "$TESTDIR"/seccomp_profile1.json
#sed -i 's/"fchmodat",//g' "$TESTDIR"/seccomp_profile1.json
#start_ocid "$TESTDIR"/seccomp_profile1.json
#start_crio "$TESTDIR"/seccomp_profile1.json
skip "need https://issues.k8s.io/36997"
}

View file

@ -1,15 +1,15 @@
In terminal 1:
```
sudo ./ocid
sudo ./crio
```
In terminal 2:
```
sudo ./ocic runtimeversion
sudo ./crioctl runtimeversion
sudo rm -rf /var/lib/containers/storage/sandboxes/podsandbox1
sudo ./ocic pod run --config testdata/sandbox_config.json
sudo ./crioctl pod run --config testdata/sandbox_config.json
sudo rm -rf /var/lib/containers/storage/containers/container1
sudo ./ocic container create --pod podsandbox1 --config testdata/container_config.json
sudo ./crioctl container create --pod podsandbox1 --config testdata/container_config.json
```

View file

@ -35,7 +35,7 @@
},
"annotations": {
"owner": "dragon",
"daemon": "ocid"
"daemon": "crio"
},
"privileged": true,
"readonly_rootfs": true,

View file

@ -35,7 +35,7 @@
},
"annotations": {
"owner": "dragon",
"daemon": "ocid"
"daemon": "crio"
},
"privileged": true,
"readonly_rootfs": true,

View file

@ -37,7 +37,7 @@
},
"annotations": {
"owner": "dragon",
"daemon": "ocid"
"daemon": "crio"
},
"privileged": true,
"readonly_rootfs": true,

View file

@ -37,7 +37,7 @@
},
"annotations": {
"owner": "dragon",
"daemon": "ocid"
"daemon": "crio"
},
"privileged": true,
"readonly_rootfs": true,

View file

@ -1 +1 @@
profile ocid-default flags=(attach_disconnected) {}
profile crio-default flags=(attach_disconnected) {}

View file

@ -1,11 +1,11 @@
{
"metadata": {
"name": "podsandbox1",
"uid": "redhat-test-ocid",
"namespace": "redhat.test.ocid",
"uid": "redhat-test-crio",
"namespace": "redhat.test.crio",
"attempt": 1
},
"hostname": "ocic_host",
"hostname": "crioctl_host",
"log_directory": "",
"dns_options": {
"servers": [
@ -52,7 +52,7 @@
"security.alpha.kubernetes.io/seccomp/pod": "unconfined"
},
"linux": {
"cgroup_parent": "/ocid-podsandbox1",
"cgroup_parent": "/crio-podsandbox1",
"security_context": {
"namespace_options": {
"host_network": false,

View file

@ -1,11 +1,11 @@
{
"metadata": {
"name": "podsandbox1",
"uid": "redhat-test-ocid",
"namespace": "redhat.test.ocid",
"uid": "redhat-test-crio",
"namespace": "redhat.test.crio",
"attempt": 1
},
"hostname": "ocic_host",
"hostname": "crioctl_host",
"log_directory": "",
"dns_options": {
"servers": [
@ -51,7 +51,7 @@
"security.alpha.kubernetes.io/seccomp/pod": "unconfined"
},
"linux": {
"cgroup_parent": "/ocid-podsandbox1",
"cgroup_parent": "/crio-podsandbox1",
"security_context": {
"namespace_options": {
"host_network": true,

View file

@ -1,11 +1,11 @@
{
"metadata": {
"name": "podsandbox1",
"uid": "redhat-test-ocid",
"namespace": "redhat.test.ocid",
"uid": "redhat-test-crio",
"namespace": "redhat.test.crio",
"attempt": 1
},
"hostname": "ocic_host",
"hostname": "crioctl_host",
"log_directory": "",
"dns_options": {
"servers": [

View file

@ -25,8 +25,8 @@ gcloud compute ssh cri-o
This section will walk you through installing the following components:
* ocid - The implementation of the Kubernetes CRI, which manages Pods.
* ocic - The ocid client for testing.
* crio - The implementation of the Kubernetes CRI, which manages Pods.
* crioctl - The crio client for testing.
* cni - The Container Network Interface
* runc - The OCI runtime to launch the container
@ -60,9 +60,9 @@ commit: c91b5bea4830a57eac7882d7455d59518cdf70ec
spec: 1.0.0-rc2-dev
```
### ocid
### crio
The `ocid` project does not ship binary releases so you'll need to build it from source.
The `crio` project does not ship binary releases so you'll need to build it from source.
#### Install the Go runtime and tool chain
@ -100,7 +100,7 @@ go version
go version go1.7.4 linux/amd64
```
#### Build ocid from source
#### Build crio from source
```
sudo apt-get install -y libglib2.0-dev libseccomp-dev libapparmor-dev
@ -130,16 +130,16 @@ Output:
```
install -D -m 755 kpod /usr/local/bin/kpod
install -D -m 755 ocid /usr/local/bin/ocid
install -D -m 755 ocic /usr/local/bin/ocic
install -D -m 755 conmon/conmon /usr/local/libexec/ocid/conmon
install -D -m 755 pause/pause /usr/local/libexec/ocid/pause
install -D -m 755 crio /usr/local/bin/crio
install -D -m 755 crioctl /usr/local/bin/crioctl
install -D -m 755 conmon/conmon /usr/local/libexec/crio/conmon
install -D -m 755 pause/pause /usr/local/libexec/crio/pause
install -d -m 755 /usr/local/share/man/man{1,5,8}
install -m 644 docs/kpod.1 docs/kpod-launch.1 -t /usr/local/share/man/man1
install -m 644 docs/ocid.conf.5 -t /usr/local/share/man/man5
install -m 644 docs/ocid.8 -t /usr/local/share/man/man8
install -D -m 644 ocid.conf /etc/ocid/ocid.conf
install -D -m 644 seccomp.json /etc/ocid/seccomp.json
install -m 644 docs/crio.conf.5 -t /usr/local/share/man/man5
install -m 644 docs/crio.8 -t /usr/local/share/man/man8
install -D -m 644 crio.conf /etc/crio/crio.conf
install -D -m 644 seccomp.json /etc/crio/seccomp.json
```
If you are installing for the first time, generate config as follows:
@ -151,11 +151,11 @@ make install.config
Output:
```
install -D -m 644 ocid.conf /etc/ocid/ocid.conf
install -D -m 644 seccomp.json /etc/ocid/seccomp.json
install -D -m 644 crio.conf /etc/crio/crio.conf
install -D -m 644 seccomp.json /etc/crio/seccomp.json
```
#### Start the ocid system daemon
#### Start the crio system daemon
```
sudo sh -c 'echo "[Unit]
@ -163,28 +163,28 @@ Description=OCI-based implementation of Kubernetes Container Runtime Interface
Documentation=https://github.com/kubernetes-incubator/cri-o
[Service]
ExecStart=/usr/local/bin/ocid --debug
ExecStart=/usr/local/bin/crio --debug
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target" > /etc/systemd/system/ocid.service'
WantedBy=multi-user.target" > /etc/systemd/system/crio.service'
```
```
sudo systemctl daemon-reload
```
```
sudo systemctl enable ocid
sudo systemctl enable crio
```
```
sudo systemctl start ocid
sudo systemctl start crio
```
#### Ensure the ocid service is running
#### Ensure the crio service is running
```
sudo ocic runtimeversion
sudo crioctl runtimeversion
```
```
VersionResponse: Version: 0.1.0, RuntimeName: runc, RuntimeVersion: 1.0.0-rc2, RuntimeApiVersion: v1alpha1
@ -290,15 +290,15 @@ cd $GOPATH/src/github.com/kubernetes-incubator/cri-o
Next create the Pod and capture the Pod ID for later use:
```
POD_ID=$(sudo ocic pod run --config test/testdata/sandbox_config.json)
POD_ID=$(sudo crioctl pod run --config test/testdata/sandbox_config.json)
```
> sudo ocic pod run --config test/testdata/sandbox_config.json
> sudo crioctl pod run --config test/testdata/sandbox_config.json
Use the `ocic` command to get the status of the Pod:
Use the `crioctl` command to get the status of the Pod:
```
sudo ocic pod status --id $POD_ID
sudo crioctl pod status --id $POD_ID
```
Output:
@ -306,8 +306,8 @@ Output:
```
ID: cd6c0883663c6f4f99697aaa15af8219e351e03696bd866bc3ac055ef289702a
Name: podsandbox1
UID: redhat-test-ocid
Namespace: redhat.test.ocid
UID: redhat-test-crio
Namespace: redhat.test.crio
Attempt: 1
Status: SANDBOX_READY
Created: 2016-12-14 15:59:04.373680832 +0000 UTC
@ -324,26 +324,26 @@ Annotations:
### Create a Redis container inside the Pod
Use the `ocic` command to create a redis container from a container configuration and attach it to the Pod created earlier:
Use the `crioctl` command to create a redis container from a container configuration and attach it to the Pod created earlier:
```
CONTAINER_ID=$(sudo ocic ctr create --pod $POD_ID --config test/testdata/container_redis.json)
CONTAINER_ID=$(sudo crioctl ctr create --pod $POD_ID --config test/testdata/container_redis.json)
```
> sudo ocic ctr create --pod $POD_ID --config test/testdata/container_redis.json
> sudo crioctl ctr create --pod $POD_ID --config test/testdata/container_redis.json
The `ocic ctr create` command will take a few seconds to return because the redis container needs to be pulled.
The `crioctl ctr create` command will take a few seconds to return because the redis container needs to be pulled.
Start the Redis container:
```
sudo ocic ctr start --id $CONTAINER_ID
sudo crioctl ctr start --id $CONTAINER_ID
```
Get the status for the Redis container:
```
sudo ocic ctr status --id $CONTAINER_ID
sudo crioctl ctr status --id $CONTAINER_ID
```
Output:
@ -391,34 +391,34 @@ Connection closed.
#### Viewing the Redis logs
The Redis logs are logged to the stderr of the ocid service, which can be viewed using `journalctl`:
The Redis logs are logged to the stderr of the crio service, which can be viewed using `journalctl`:
```
sudo journalctl -u ocid --no-pager
sudo journalctl -u crio --no-pager
```
### Stop the redis container and delete the Pod
```
sudo ocic ctr stop --id $CONTAINER_ID
sudo crioctl ctr stop --id $CONTAINER_ID
```
```
sudo ocic ctr remove --id $CONTAINER_ID
sudo crioctl ctr remove --id $CONTAINER_ID
```
```
sudo ocic pod stop --id $POD_ID
sudo crioctl pod stop --id $POD_ID
```
```
sudo ocic pod remove --id $POD_ID
sudo crioctl pod remove --id $POD_ID
```
```
sudo ocic pod list
sudo crioctl pod list
```
```
sudo ocic ctr list
sudo crioctl ctr list
```