fix chown and seccomp

Signed-off-by: Jess Frazelle <acidburn@microsoft.com>
This commit is contained in:
Jess Frazelle 2018-03-22 09:22:44 -04:00
parent 60f032f6f5
commit 2569457739
8197 changed files with 30742 additions and 1596554 deletions

View file

@ -20,6 +20,7 @@ RUN apt-get update && apt-get install -y \
protobuf-compiler \
python-minimal \
uidmap \
kmod \
--no-install-recommends \
&& apt-get clean

View file

@ -65,13 +65,13 @@ localtest:
make localunittest localintegration localrootlessintegration
unittest: runcimage
docker run -e TESTFLAGS -t --privileged --rm -v $(CURDIR):/go/src/$(PROJECT) $(RUNC_IMAGE) make localunittest
docker run -e TESTFLAGS -t --privileged --rm -v /lib/modules:/lib/modules:ro -v $(CURDIR):/go/src/$(PROJECT) $(RUNC_IMAGE) make localunittest
localunittest: all
$(GO) test -timeout 3m -tags "$(BUILDTAGS)" ${TESTFLAGS} -v $(allpackages)
integration: runcimage
docker run -e TESTFLAGS -t --privileged --rm -v $(CURDIR):/go/src/$(PROJECT) $(RUNC_IMAGE) make localintegration
docker run -e TESTFLAGS -t --privileged --rm -v /lib/modules:/lib/modules:ro -v $(CURDIR):/go/src/$(PROJECT) $(RUNC_IMAGE) make localintegration
localintegration: all
bats -t tests/integration${TESTFLAGS}

View file

@ -1,238 +0,0 @@
/*
* Copyright 2016 SUSE LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"io"
"io/ioutil"
"net"
"os"
"strings"
"github.com/containerd/console"
"github.com/opencontainers/runc/libcontainer/utils"
"github.com/urfave/cli"
)
// version will be populated by the Makefile, read from
// VERSION file of the source code.
var version = ""
// gitCommit will be the hash that the binary was built from
// and will be populated by the Makefile
var gitCommit = ""
const (
usage = `Open Container Initiative contrib/cmd/recvtty
recvtty is a reference implementation of a consumer of runC's --console-socket
API. It has two main modes of operation:
* single: Only permit one terminal to be sent to the socket, which is
then hooked up to the stdio of the recvtty process. This is useful
for rudimentary shell management of a container.
* null: Permit as many terminals to be sent to the socket, but they
are read to /dev/null. This is used for testing, and imitates the
old runC API's --console=/dev/pts/ptmx hack which would allow for a
similar trick. This is probably not what you want to use, unless
you're doing something like our bats integration tests.
To use recvtty, just specify a socket path at which you want to receive
terminals:
$ recvtty [--mode <single|null>] socket.sock
`
)
func bail(err error) {
fmt.Fprintf(os.Stderr, "[recvtty] fatal error: %v\n", err)
os.Exit(1)
}
func handleSingle(path string) error {
// Open a socket.
ln, err := net.Listen("unix", path)
if err != nil {
return err
}
defer ln.Close()
// We only accept a single connection, since we can only really have
// one reader for os.Stdin. Plus this is all a PoC.
conn, err := ln.Accept()
if err != nil {
return err
}
defer conn.Close()
// Close ln, to allow for other instances to take over.
ln.Close()
// Get the fd of the connection.
unixconn, ok := conn.(*net.UnixConn)
if !ok {
return fmt.Errorf("failed to cast to unixconn")
}
socket, err := unixconn.File()
if err != nil {
return err
}
defer socket.Close()
// Get the master file descriptor from runC.
master, err := utils.RecvFd(socket)
if err != nil {
return err
}
c, err := console.ConsoleFromFile(master)
if err != nil {
return err
}
console.ClearONLCR(c.Fd())
// Copy from our stdio to the master fd.
quitChan := make(chan struct{})
go func() {
io.Copy(os.Stdout, c)
quitChan <- struct{}{}
}()
go func() {
io.Copy(c, os.Stdin)
quitChan <- struct{}{}
}()
// Only close the master fd once we've stopped copying.
<-quitChan
c.Close()
return nil
}
func handleNull(path string) error {
// Open a socket.
ln, err := net.Listen("unix", path)
if err != nil {
return err
}
defer ln.Close()
// As opposed to handleSingle we accept as many connections as we get, but
// we don't interact with Stdin at all (and we copy stdout to /dev/null).
for {
conn, err := ln.Accept()
if err != nil {
return err
}
go func(conn net.Conn) {
// Don't leave references lying around.
defer conn.Close()
// Get the fd of the connection.
unixconn, ok := conn.(*net.UnixConn)
if !ok {
return
}
socket, err := unixconn.File()
if err != nil {
return
}
defer socket.Close()
// Get the master file descriptor from runC.
master, err := utils.RecvFd(socket)
if err != nil {
return
}
// Just do a dumb copy to /dev/null.
devnull, err := os.OpenFile("/dev/null", os.O_RDWR, 0)
if err != nil {
// TODO: Handle this nicely.
return
}
io.Copy(devnull, master)
devnull.Close()
}(conn)
}
}
func main() {
app := cli.NewApp()
app.Name = "recvtty"
app.Usage = usage
// Set version to be the same as runC.
var v []string
if version != "" {
v = append(v, version)
}
if gitCommit != "" {
v = append(v, fmt.Sprintf("commit: %s", gitCommit))
}
app.Version = strings.Join(v, "\n")
// Set the flags.
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "mode, m",
Value: "single",
Usage: "Mode of operation (single or null)",
},
cli.StringFlag{
Name: "pid-file",
Value: "",
Usage: "Path to write daemon process ID to",
},
}
app.Action = func(ctx *cli.Context) error {
args := ctx.Args()
if len(args) != 1 {
return fmt.Errorf("need to specify a single socket path")
}
path := ctx.Args()[0]
pidPath := ctx.String("pid-file")
if pidPath != "" {
pid := fmt.Sprintf("%d\n", os.Getpid())
if err := ioutil.WriteFile(pidPath, []byte(pid), 0644); err != nil {
return err
}
}
switch ctx.String("mode") {
case "single":
if err := handleSingle(path); err != nil {
return err
}
case "null":
if err := handleNull(path); err != nil {
return err
}
default:
return fmt.Errorf("need to select a valid mode: %s", ctx.String("mode"))
}
return nil
}
if err := app.Run(os.Args); err != nil {
bail(err)
}
}

View file

@ -1,805 +0,0 @@
#!/bin/bash
#
# bash completion file for runc command
#
# This script provides completion of:
# - commands and their options
# - filepaths
#
# To enable the completions either:
# - place this file in /usr/share/bash-completion/completions
# or
# - copy this file to e.g. ~/.runc-completion.sh and add the line
# below to your .bashrc after bash completion features are loaded
# . ~/.runc-completion.sh
#
# Configuration:
#
# Note for developers:
# Please arrange options sorted alphabetically by long name with the short
# options immediately following their corresponding long form.
# This order should be applied to lists, alternatives and code blocks.
__runc_previous_extglob_setting=$(shopt -p extglob)
shopt -s extglob
__runc_list_all() {
COMPREPLY=($(compgen -W "$(runc list -q)" -- $cur))
}
__runc_pos_first_nonflag() {
local argument_flags=$1
local counter=$((${subcommand_pos:-${command_pos}} + 1))
while [ $counter -le $cword ]; do
if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then
((counter++))
else
case "${words[$counter]}" in
-*) ;;
*)
break
;;
esac
fi
((counter++))
done
echo $counter
}
# Transforms a multiline list of strings into a single line string
# with the words separated by "|".
# This is used to prepare arguments to __runc_pos_first_nonflag().
__runc_to_alternatives() {
local parts=($1)
local IFS='|'
echo "${parts[*]}"
}
# Transforms a multiline list of options into an extglob pattern
# suitable for use in case statements.
__runc_to_extglob() {
local extglob=$(__runc_to_alternatives "$1")
echo "@($extglob)"
}
# Subcommand processing.
# Locates the first occurrence of any of the subcommands contained in the
# first argument. In case of a match, calls the corresponding completion
# function and returns 0.
# If no match is found, 1 is returned. The calling function can then
# continue processing its completion.
#
# TODO if the preceding command has options that accept arguments and an
# argument is equal ot one of the subcommands, this is falsely detected as
# a match.
__runc_subcommands() {
local subcommands="$1"
local counter=$(($command_pos + 1))
while [ $counter -lt $cword ]; do
case "${words[$counter]}" in
$(__runc_to_extglob "$subcommands"))
subcommand_pos=$counter
local subcommand=${words[$counter]}
local completions_func=_runc_${command}_${subcommand}
declare -F $completions_func >/dev/null && $completions_func
return 0
;;
esac
((counter++))
done
return 1
}
# List all Signals
__runc_list_signals() {
COMPREPLY=($(compgen -W "$(for i in $(kill -l | xargs); do echo $i; done | grep SIG)"))
}
# suppress trailing whitespace
__runc_nospace() {
# compopt is not available in ancient bash versions
type compopt &>/dev/null && compopt -o nospace
}
# The list of capabilities is defined in types.go, ALL was added manually.
__runc_complete_capabilities() {
COMPREPLY=($(compgen -W "
ALL
AUDIT_CONTROL
AUDIT_WRITE
AUDIT_READ
BLOCK_SUSPEND
CHOWN
DAC_OVERRIDE
DAC_READ_SEARCH
FOWNER
FSETID
IPC_LOCK
IPC_OWNER
KILL
LEASE
LINUX_IMMUTABLE
MAC_ADMIN
MAC_OVERRIDE
MKNOD
NET_ADMIN
NET_BIND_SERVICE
NET_BROADCAST
NET_RAW
SETFCAP
SETGID
SETPCAP
SETUID
SYS_ADMIN
SYS_BOOT
SYS_CHROOT
SYSLOG
SYS_MODULE
SYS_NICE
SYS_PACCT
SYS_PTRACE
SYS_RAWIO
SYS_RESOURCE
SYS_TIME
SYS_TTY_CONFIG
WAKE_ALARM
" -- "$cur"))
}
_runc_exec() {
local boolean_options="
--help
--no-new-privs
--tty, -t
--detach, -d
"
local options_with_args="
--console
--cwd
--env, -e
--user, -u
--process, -p
--pid-file
--process-label
--apparmor
--cap, -c
"
local all_options="$options_with_args $boolean_options"
case "$prev" in
--cap | -c)
__runc_complete_capabilities
return
;;
--console | --cwd | --process | --apparmor)
case "$cur" in
*:*) ;; # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
'')
COMPREPLY=($(compgen -W '/' -- "$cur"))
__runc_nospace
;;
/*)
_filedir
__runc_nospace
;;
esac
return
;;
--env | -e)
COMPREPLY=($(compgen -e -- "$cur"))
__runc_nospace
return
;;
$(__runc_to_extglob "$options_with_args"))
return
;;
esac
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$all_options" -- "$cur"))
;;
*)
__runc_list_all
;;
esac
}
# global options that may appear after the runc command
_runc_runc() {
local boolean_options="
$global_boolean_options
--help
--version -v
--debug
"
local options_with_args="
--log
--log-format
--root
--criu
"
case "$prev" in
--log | --root | --criu)
case "$cur" in
*:*) ;; # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
'')
COMPREPLY=($(compgen -W '/' -- "$cur"))
__runc_nospace
;;
*)
_filedir
__runc_nospace
;;
esac
return
;;
--log-format)
COMPREPLY=($(compgen -W 'text json' -- "$cur"))
return
;;
$(__runc_to_extglob "$options_with_args"))
return
;;
esac
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
;;
*)
local counter=$(__runc_pos_first_nonflag $(__runc_to_extglob "$options_with_args"))
if [ $cword -eq $counter ]; then
COMPREPLY=($(compgen -W "${commands[*]} help" -- "$cur"))
fi
;;
esac
}
_runc_pause() {
local boolean_options="
--help
-h
"
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
;;
*)
__runc_list_all
;;
esac
}
_runc_ps() {
local boolean_options="
--help
-h
"
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
;;
*)
__runc_list_all
;;
esac
}
_runc_delete() {
local boolean_options="
--help
-h
"
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
;;
*)
__runc_list_all
;;
esac
}
_runc_kill() {
local boolean_options="
--help
-h
--all
-a
"
case "$prev" in
"kill")
__runc_list_all
return
;;
*)
__runc_list_signals
return
;;
esac
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
;;
*)
__runc_list_all
;;
esac
}
_runc_events() {
local boolean_options="
--help
--stats
"
local options_with_args="
--interval
"
case "$prev" in
$(__runc_to_extglob "$options_with_args"))
return
;;
esac
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
;;
*)
__runc_list_all
;;
esac
}
_runc_list() {
local boolean_options="
--help
--quiet
-q
"
local options_with_args="
--format
-f
"
case "$prev" in
--format | -f)
COMPREPLY=($(compgen -W 'text json' -- "$cur"))
return
;;
$(__runc_to_extglob "$options_with_args"))
return
;;
esac
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
;;
*)
local counter=$(__runc_pos_first_nonflag $(__runc_to_extglob "$options_with_args"))
;;
esac
}
_runc_spec() {
local boolean_options="
--help
"
local options_with_args="
--bundle
-b
"
case "$prev" in
--bundle | -b)
case "$cur" in
'')
COMPREPLY=($(compgen -W '/' -- "$cur"))
__runc_nospace
;;
/*)
_filedir
__runc_nospace
;;
esac
return
;;
$(__runc_to_extglob "$options_with_args"))
return
;;
esac
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
;;
*)
local counter=$(__runc_pos_first_nonflag $(__runc_to_extglob "$options_with_args"))
;;
esac
}
_runc_run() {
local boolean_options="
--help
--detatch
-d
--no-subreaper
--no-pivot
--no-new-keyring
"
local options_with_args="
--bundle
-b
--console
--pid-file
"
case "$prev" in
--bundle | -b | --console | --pid-file)
case "$cur" in
'')
COMPREPLY=($(compgen -W '/' -- "$cur"))
__runc_nospace
;;
/*)
_filedir
__runc_nospace
;;
esac
return
;;
$(__runc_to_extglob "$options_with_args"))
return
;;
esac
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
;;
*)
__runc_list_all
;;
esac
}
_runc_checkpoint() {
local boolean_options="
--help
-h
--leave-running
--tcp-established
--ext-unix-sk
--shell-job
--file-locks
"
local options_with_args="
--image-path
--work-path
--page-server
--manage-cgroups-mode
"
case "$prev" in
--page-server) ;;
--manage-cgroups-mode)
COMPREPLY=($(compgen -W "soft full strict" -- "$cur"))
return
;;
--image-path | --work-path)
case "$cur" in
*:*) ;; # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
'')
COMPREPLY=($(compgen -W '/' -- "$cur"))
__runc_nospace
;;
*)
_filedir
__runc_nospace
;;
esac
return
;;
$(__runc_to_extglob "$options_with_args"))
return
;;
esac
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
;;
*)
__runc_list_all
;;
esac
}
_runc_create() {
local boolean_options="
--help
--no-pivot
--no-new-keyring
"
local options_with_args="
--bundle
-b
--console
--pid-file
"
case "$prev" in
--bundle | -b | --console | --pid-file)
case "$cur" in
'')
COMPREPLY=($(compgen -W '/' -- "$cur"))
__runc_nospace
;;
/*)
_filedir
__runc_nospace
;;
esac
return
;;
$(__runc_to_extglob "$options_with_args"))
return
;;
esac
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
;;
*)
__runc_list_all
;;
esac
}
_runc_help() {
local counter=$(__runc_pos_first_nonflag)
if [ $cword -eq $counter ]; then
COMPREPLY=($(compgen -W "${commands[*]}" -- "$cur"))
fi
}
_runc_restore() {
local boolean_options="
--help
--tcp-established
--ext-unix-sk
--shell-job
--file-locks
--detach
-d
--no-subreaper
--no-pivot
"
local options_with_args="
-b
--bundle
--image-path
--work-path
--manage-cgroups-mode
--pid-file
"
local all_options="$options_with_args $boolean_options"
case "$prev" in
--manage-cgroups-mode)
COMPREPLY=($(compgen -W "soft full strict" -- "$cur"))
return
;;
--pid-file | --image-path | --work-path | --bundle | -b)
case "$cur" in
*:*) ;; # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
'')
COMPREPLY=($(compgen -W '/' -- "$cur"))
__runc_nospace
;;
/*)
_filedir
__runc_nospace
;;
esac
return
;;
$(__runc_to_extglob "$options_with_args"))
return
;;
esac
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$all_options" -- "$cur"))
;;
*)
__runc_list_all
;;
esac
}
_runc_resume() {
local boolean_options="
--help
-h
"
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
;;
*)
__runc_list_all
;;
esac
}
_runc_state() {
local boolean_options="
--help
-h
"
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
;;
*)
__runc_list_all
;;
esac
}
_runc_start() {
local boolean_options="
--help
-h
"
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
;;
*)
__runc_list_all
;;
esac
}
_runc_update() {
local boolean_options="
--help
"
local options_with_args="
--blkio-weight
--cpu-period
--cpu-quota
--cpu-rt-period
--cpu-rt-runtime
--cpu-share
--cpuset-cpus
--cpuset-mems
--kernel-memory
--kernel-memory-tcp
--memory
--memory-reservation
--memory-swap
"
case "$prev" in
$(__runc_to_extglob "$options_with_args"))
return
;;
esac
case "$cur" in
-*)
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
;;
*)
__runc_list_all
;;
esac
}
_runc() {
local previous_extglob_setting=$(shopt -p extglob)
shopt -s extglob
local commands=(
checkpoint
create
delete
events
exec
init
kill
list
pause
ps
restore
resume
run
spec
start
state
update
help
h
)
# These options are valid as global options for all client commands
# and valid as command options for `runc daemon`
local global_boolean_options="
--help -h
--version -v
"
COMPREPLY=()
local cur prev words cword
_get_comp_words_by_ref -n : cur prev words cword
local command='runc' command_pos=0 subcommand_pos
local counter=1
while [ $counter -lt $cword ]; do
case "${words[$counter]}" in
-*) ;;
=)
((counter++))
;;
*)
command="${words[$counter]}"
command_pos=$counter
break
;;
esac
((counter++))
done
local completions_func=_runc_${command}
declare -F $completions_func >/dev/null && $completions_func
eval "$previous_extglob_setting"
return 0
}
eval "$__runc_previous_extglob_setting"
unset __runc_previous_extglob_setting
complete -F _runc runc

View file

@ -1,104 +0,0 @@
package devices
import (
"errors"
"io/ioutil"
"os"
"path/filepath"
"github.com/opencontainers/runc/libcontainer/configs"
"golang.org/x/sys/unix"
)
var (
ErrNotADevice = errors.New("not a device node")
)
// Testing dependencies
var (
unixLstat = unix.Lstat
ioutilReadDir = ioutil.ReadDir
)
// Given the path to a device and its cgroup_permissions(which cannot be easily queried) look up the information about a linux device and return that information as a Device struct.
func DeviceFromPath(path, permissions string) (*configs.Device, error) {
var stat unix.Stat_t
err := unixLstat(path, &stat)
if err != nil {
return nil, err
}
var (
devNumber = stat.Rdev
major = unix.Major(devNumber)
)
if major == 0 {
return nil, ErrNotADevice
}
var (
devType rune
mode = stat.Mode
)
switch {
case mode&unix.S_IFBLK == unix.S_IFBLK:
devType = 'b'
case mode&unix.S_IFCHR == unix.S_IFCHR:
devType = 'c'
}
return &configs.Device{
Type: devType,
Path: path,
Major: int64(major),
Minor: int64(unix.Minor(devNumber)),
Permissions: permissions,
FileMode: os.FileMode(mode),
Uid: stat.Uid,
Gid: stat.Gid,
}, nil
}
func HostDevices() ([]*configs.Device, error) {
return getDevices("/dev")
}
func getDevices(path string) ([]*configs.Device, error) {
files, err := ioutilReadDir(path)
if err != nil {
return nil, err
}
out := []*configs.Device{}
for _, f := range files {
switch {
case f.IsDir():
switch f.Name() {
// ".lxc" & ".lxd-mounts" added to address https://github.com/lxc/lxd/issues/2825
case "pts", "shm", "fd", "mqueue", ".lxc", ".lxd-mounts":
continue
default:
sub, err := getDevices(filepath.Join(path, f.Name()))
if err != nil {
return nil, err
}
out = append(out, sub...)
continue
}
case f.Name() == "console":
continue
}
device, err := DeviceFromPath(filepath.Join(path, f.Name()), "rwm")
if err != nil {
if err == ErrNotADevice {
continue
}
if os.IsNotExist(err) {
continue
}
return nil, err
}
out = append(out, device)
}
return out, nil
}

View file

@ -1,63 +0,0 @@
package devices
import (
"errors"
"os"
"testing"
"golang.org/x/sys/unix"
)
func TestDeviceFromPathLstatFailure(t *testing.T) {
testError := errors.New("test error")
// Override unix.Lstat to inject error.
unixLstat = func(path string, stat *unix.Stat_t) error {
return testError
}
_, err := DeviceFromPath("", "")
if err != testError {
t.Fatalf("Unexpected error %v, expected %v", err, testError)
}
}
func TestHostDevicesIoutilReadDirFailure(t *testing.T) {
testError := errors.New("test error")
// Override ioutil.ReadDir to inject error.
ioutilReadDir = func(dirname string) ([]os.FileInfo, error) {
return nil, testError
}
_, err := HostDevices()
if err != testError {
t.Fatalf("Unexpected error %v, expected %v", err, testError)
}
}
func TestHostDevicesIoutilReadDirDeepFailure(t *testing.T) {
testError := errors.New("test error")
called := false
// Override ioutil.ReadDir to inject error after the first call.
ioutilReadDir = func(dirname string) ([]os.FileInfo, error) {
if called {
return nil, testError
}
called = true
// Provoke a second call.
fi, err := os.Lstat("/tmp")
if err != nil {
t.Fatalf("Unexpected error %v", err)
}
return []os.FileInfo{fi}, nil
}
_, err := HostDevices()
if err != testError {
t.Fatalf("Unexpected error %v, expected %v", err, testError)
}
}

View file

@ -1,255 +0,0 @@
package integration
import (
"bufio"
"bytes"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/configs"
"golang.org/x/sys/unix"
)
func showFile(t *testing.T, fname string) error {
t.Logf("=== %s ===\n", fname)
f, err := os.Open(fname)
if err != nil {
t.Log(err)
return err
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
t.Log(scanner.Text())
}
if err := scanner.Err(); err != nil {
return err
}
t.Logf("=== END ===\n")
return nil
}
func TestUsernsCheckpoint(t *testing.T) {
if _, err := os.Stat("/proc/self/ns/user"); os.IsNotExist(err) {
t.Skip("userns is unsupported")
}
cmd := exec.Command("criu", "check", "--feature", "userns")
if err := cmd.Run(); err != nil {
t.Skip("Unable to c/r a container with userns")
}
testCheckpoint(t, true)
}
func TestCheckpoint(t *testing.T) {
testCheckpoint(t, false)
}
func testCheckpoint(t *testing.T, userns bool) {
if testing.Short() {
return
}
root, err := newTestRoot()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root)
rootfs, err := newRootfs()
if err != nil {
t.Fatal(err)
}
defer remove(rootfs)
config := newTemplateConfig(rootfs)
config.Mounts = append(config.Mounts, &configs.Mount{
Destination: "/sys/fs/cgroup",
Device: "cgroup",
Flags: defaultMountFlags | unix.MS_RDONLY,
})
if userns {
config.UidMappings = []configs.IDMap{{HostID: 0, ContainerID: 0, Size: 1000}}
config.GidMappings = []configs.IDMap{{HostID: 0, ContainerID: 0, Size: 1000}}
config.Namespaces = append(config.Namespaces, configs.Namespace{Type: configs.NEWUSER})
}
factory, err := libcontainer.New(root, libcontainer.Cgroupfs)
if err != nil {
t.Fatal(err)
}
container, err := factory.Create("test", config)
if err != nil {
t.Fatal(err)
}
defer container.Destroy()
stdinR, stdinW, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
var stdout bytes.Buffer
pconfig := libcontainer.Process{
Cwd: "/",
Args: []string{"cat"},
Env: standardEnvironment,
Stdin: stdinR,
Stdout: &stdout,
}
err = container.Run(&pconfig)
stdinR.Close()
defer stdinW.Close()
if err != nil {
t.Fatal(err)
}
pid, err := pconfig.Pid()
if err != nil {
t.Fatal(err)
}
process, err := os.FindProcess(pid)
if err != nil {
t.Fatal(err)
}
parentDir, err := ioutil.TempDir("", "criu-parent")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(parentDir)
preDumpOpts := &libcontainer.CriuOpts{
ImagesDirectory: parentDir,
WorkDirectory: parentDir,
PreDump: true,
}
preDumpLog := filepath.Join(preDumpOpts.WorkDirectory, "dump.log")
if err := container.Checkpoint(preDumpOpts); err != nil {
showFile(t, preDumpLog)
t.Fatal(err)
}
state, err := container.Status()
if err != nil {
t.Fatal(err)
}
if state != libcontainer.Running {
t.Fatal("Unexpected preDump state: ", state)
}
imagesDir, err := ioutil.TempDir("", "criu")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(imagesDir)
checkpointOpts := &libcontainer.CriuOpts{
ImagesDirectory: imagesDir,
WorkDirectory: imagesDir,
ParentImage: "../criu-parent",
}
dumpLog := filepath.Join(checkpointOpts.WorkDirectory, "dump.log")
restoreLog := filepath.Join(checkpointOpts.WorkDirectory, "restore.log")
if err := container.Checkpoint(checkpointOpts); err != nil {
showFile(t, dumpLog)
t.Fatal(err)
}
state, err = container.Status()
if err != nil {
t.Fatal(err)
}
if state != libcontainer.Stopped {
t.Fatal("Unexpected state checkpoint: ", state)
}
stdinW.Close()
_, err = process.Wait()
if err != nil {
t.Fatal(err)
}
// reload the container
container, err = factory.Load("test")
if err != nil {
t.Fatal(err)
}
restoreStdinR, restoreStdinW, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
restoreProcessConfig := &libcontainer.Process{
Cwd: "/",
Stdin: restoreStdinR,
Stdout: &stdout,
}
err = container.Restore(restoreProcessConfig, checkpointOpts)
restoreStdinR.Close()
defer restoreStdinW.Close()
if err != nil {
showFile(t, restoreLog)
t.Fatal(err)
}
state, err = container.Status()
if err != nil {
t.Fatal(err)
}
if state != libcontainer.Running {
t.Fatal("Unexpected restore state: ", state)
}
pid, err = restoreProcessConfig.Pid()
if err != nil {
t.Fatal(err)
}
process, err = os.FindProcess(pid)
if err != nil {
t.Fatal(err)
}
_, err = restoreStdinW.WriteString("Hello!")
if err != nil {
t.Fatal(err)
}
restoreStdinW.Close()
s, err := process.Wait()
if err != nil {
t.Fatal(err)
}
if !s.Success() {
t.Fatal(s.String(), pid)
}
output := string(stdout.Bytes())
if !strings.Contains(output, "Hello!") {
t.Fatal("Did not restore the pipe correctly:", output)
}
}

View file

@ -1,2 +0,0 @@
// integration is used for integration testing of libcontainer
package integration

File diff suppressed because it is too large Load diff

View file

@ -1,597 +0,0 @@
package integration
import (
"bytes"
"fmt"
"io"
"os"
"strconv"
"strings"
"testing"
"time"
"github.com/containerd/console"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/utils"
"golang.org/x/sys/unix"
)
func TestExecIn(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
container, err := newContainer(config)
ok(t, err)
defer container.Destroy()
// Execute a first process in the container
stdinR, stdinW, err := os.Pipe()
ok(t, err)
process := &libcontainer.Process{
Cwd: "/",
Args: []string{"cat"},
Env: standardEnvironment,
Stdin: stdinR,
}
err = container.Run(process)
stdinR.Close()
defer stdinW.Close()
ok(t, err)
buffers := newStdBuffers()
ps := &libcontainer.Process{
Cwd: "/",
Args: []string{"ps"},
Env: standardEnvironment,
Stdin: buffers.Stdin,
Stdout: buffers.Stdout,
Stderr: buffers.Stderr,
}
err = container.Run(ps)
ok(t, err)
waitProcess(ps, t)
stdinW.Close()
waitProcess(process, t)
out := buffers.Stdout.String()
if !strings.Contains(out, "cat") || !strings.Contains(out, "ps") {
t.Fatalf("unexpected running process, output %q", out)
}
if strings.Contains(out, "\r") {
t.Fatalf("unexpected carriage-return in output")
}
}
func TestExecInUsernsRlimit(t *testing.T) {
if _, err := os.Stat("/proc/self/ns/user"); os.IsNotExist(err) {
t.Skip("userns is unsupported")
}
testExecInRlimit(t, true)
}
func TestExecInRlimit(t *testing.T) {
testExecInRlimit(t, false)
}
func testExecInRlimit(t *testing.T, userns bool) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
if userns {
config.UidMappings = []configs.IDMap{{HostID: 0, ContainerID: 0, Size: 1000}}
config.GidMappings = []configs.IDMap{{HostID: 0, ContainerID: 0, Size: 1000}}
config.Namespaces = append(config.Namespaces, configs.Namespace{Type: configs.NEWUSER})
}
container, err := newContainer(config)
ok(t, err)
defer container.Destroy()
stdinR, stdinW, err := os.Pipe()
ok(t, err)
process := &libcontainer.Process{
Cwd: "/",
Args: []string{"cat"},
Env: standardEnvironment,
Stdin: stdinR,
}
err = container.Run(process)
stdinR.Close()
defer stdinW.Close()
ok(t, err)
buffers := newStdBuffers()
ps := &libcontainer.Process{
Cwd: "/",
Args: []string{"/bin/sh", "-c", "ulimit -n"},
Env: standardEnvironment,
Stdin: buffers.Stdin,
Stdout: buffers.Stdout,
Stderr: buffers.Stderr,
Rlimits: []configs.Rlimit{
// increase process rlimit higher than container rlimit to test per-process limit
{Type: unix.RLIMIT_NOFILE, Hard: 1026, Soft: 1026},
},
}
err = container.Run(ps)
ok(t, err)
waitProcess(ps, t)
stdinW.Close()
waitProcess(process, t)
out := buffers.Stdout.String()
if limit := strings.TrimSpace(out); limit != "1026" {
t.Fatalf("expected rlimit to be 1026, got %s", limit)
}
}
func TestExecInAdditionalGroups(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
container, err := newContainer(config)
ok(t, err)
defer container.Destroy()
// Execute a first process in the container
stdinR, stdinW, err := os.Pipe()
ok(t, err)
process := &libcontainer.Process{
Cwd: "/",
Args: []string{"cat"},
Env: standardEnvironment,
Stdin: stdinR,
}
err = container.Run(process)
stdinR.Close()
defer stdinW.Close()
ok(t, err)
var stdout bytes.Buffer
pconfig := libcontainer.Process{
Cwd: "/",
Args: []string{"sh", "-c", "id", "-Gn"},
Env: standardEnvironment,
Stdin: nil,
Stdout: &stdout,
AdditionalGroups: []string{"plugdev", "audio"},
}
err = container.Run(&pconfig)
ok(t, err)
// Wait for process
waitProcess(&pconfig, t)
stdinW.Close()
waitProcess(process, t)
outputGroups := string(stdout.Bytes())
// Check that the groups output has the groups that we specified
if !strings.Contains(outputGroups, "audio") {
t.Fatalf("Listed groups do not contain the audio group as expected: %v", outputGroups)
}
if !strings.Contains(outputGroups, "plugdev") {
t.Fatalf("Listed groups do not contain the plugdev group as expected: %v", outputGroups)
}
}
func TestExecInError(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
container, err := newContainer(config)
ok(t, err)
defer container.Destroy()
// Execute a first process in the container
stdinR, stdinW, err := os.Pipe()
ok(t, err)
process := &libcontainer.Process{
Cwd: "/",
Args: []string{"cat"},
Env: standardEnvironment,
Stdin: stdinR,
}
err = container.Run(process)
stdinR.Close()
defer func() {
stdinW.Close()
if _, err := process.Wait(); err != nil {
t.Log(err)
}
}()
ok(t, err)
for i := 0; i < 42; i++ {
var out bytes.Buffer
unexistent := &libcontainer.Process{
Cwd: "/",
Args: []string{"unexistent"},
Env: standardEnvironment,
Stderr: &out,
}
err = container.Run(unexistent)
if err == nil {
t.Fatal("Should be an error")
}
if !strings.Contains(err.Error(), "executable file not found") {
t.Fatalf("Should be error about not found executable, got %s", err)
}
if !bytes.Contains(out.Bytes(), []byte("executable file not found")) {
t.Fatalf("executable file not found error not delivered to stdio:\n%s", out.String())
}
}
}
func TestExecInTTY(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
container, err := newContainer(config)
ok(t, err)
defer container.Destroy()
// Execute a first process in the container
stdinR, stdinW, err := os.Pipe()
ok(t, err)
process := &libcontainer.Process{
Cwd: "/",
Args: []string{"cat"},
Env: standardEnvironment,
Stdin: stdinR,
}
err = container.Run(process)
stdinR.Close()
defer stdinW.Close()
ok(t, err)
var stdout bytes.Buffer
ps := &libcontainer.Process{
Cwd: "/",
Args: []string{"ps"},
Env: standardEnvironment,
}
parent, child, err := utils.NewSockPair("console")
if err != nil {
ok(t, err)
}
defer parent.Close()
defer child.Close()
ps.ConsoleSocket = child
type cdata struct {
c console.Console
err error
}
dc := make(chan *cdata, 1)
go func() {
f, err := utils.RecvFd(parent)
if err != nil {
dc <- &cdata{
err: err,
}
return
}
c, err := console.ConsoleFromFile(f)
if err != nil {
dc <- &cdata{
err: err,
}
return
}
console.ClearONLCR(c.Fd())
dc <- &cdata{
c: c,
}
}()
err = container.Run(ps)
ok(t, err)
data := <-dc
if data.err != nil {
ok(t, data.err)
}
console := data.c
copy := make(chan struct{})
go func() {
io.Copy(&stdout, console)
close(copy)
}()
ok(t, err)
select {
case <-time.After(5 * time.Second):
t.Fatal("Waiting for copy timed out")
case <-copy:
}
waitProcess(ps, t)
stdinW.Close()
waitProcess(process, t)
out := stdout.String()
if !strings.Contains(out, "cat") || !strings.Contains(out, "ps") {
t.Fatalf("unexpected running process, output %q", out)
}
if strings.Contains(out, "\r") {
t.Fatalf("unexpected carriage-return in output")
}
}
func TestExecInEnvironment(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
container, err := newContainer(config)
ok(t, err)
defer container.Destroy()
// Execute a first process in the container
stdinR, stdinW, err := os.Pipe()
ok(t, err)
process := &libcontainer.Process{
Cwd: "/",
Args: []string{"cat"},
Env: standardEnvironment,
Stdin: stdinR,
}
err = container.Run(process)
stdinR.Close()
defer stdinW.Close()
ok(t, err)
buffers := newStdBuffers()
process2 := &libcontainer.Process{
Cwd: "/",
Args: []string{"env"},
Env: []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"DEBUG=true",
"DEBUG=false",
"ENV=test",
},
Stdin: buffers.Stdin,
Stdout: buffers.Stdout,
Stderr: buffers.Stderr,
}
err = container.Run(process2)
ok(t, err)
waitProcess(process2, t)
stdinW.Close()
waitProcess(process, t)
out := buffers.Stdout.String()
// check execin's process environment
if !strings.Contains(out, "DEBUG=false") ||
!strings.Contains(out, "ENV=test") ||
!strings.Contains(out, "HOME=/root") ||
!strings.Contains(out, "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin") ||
strings.Contains(out, "DEBUG=true") {
t.Fatalf("unexpected running process, output %q", out)
}
}
func TestExecinPassExtraFiles(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
if err != nil {
t.Fatal(err)
}
defer remove(rootfs)
config := newTemplateConfig(rootfs)
container, err := newContainer(config)
if err != nil {
t.Fatal(err)
}
defer container.Destroy()
// Execute a first process in the container
stdinR, stdinW, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
process := &libcontainer.Process{
Cwd: "/",
Args: []string{"cat"},
Env: standardEnvironment,
Stdin: stdinR,
}
err = container.Run(process)
stdinR.Close()
defer stdinW.Close()
if err != nil {
t.Fatal(err)
}
var stdout bytes.Buffer
pipeout1, pipein1, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
pipeout2, pipein2, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
inprocess := &libcontainer.Process{
Cwd: "/",
Args: []string{"sh", "-c", "cd /proc/$$/fd; echo -n *; echo -n 1 >3; echo -n 2 >4"},
Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
ExtraFiles: []*os.File{pipein1, pipein2},
Stdin: nil,
Stdout: &stdout,
}
err = container.Run(inprocess)
if err != nil {
t.Fatal(err)
}
waitProcess(inprocess, t)
stdinW.Close()
waitProcess(process, t)
out := string(stdout.Bytes())
// fd 5 is the directory handle for /proc/$$/fd
if out != "0 1 2 3 4 5" {
t.Fatalf("expected to have the file descriptors '0 1 2 3 4 5' passed to exec, got '%s'", out)
}
var buf = []byte{0}
_, err = pipeout1.Read(buf)
if err != nil {
t.Fatal(err)
}
out1 := string(buf)
if out1 != "1" {
t.Fatalf("expected first pipe to receive '1', got '%s'", out1)
}
_, err = pipeout2.Read(buf)
if err != nil {
t.Fatal(err)
}
out2 := string(buf)
if out2 != "2" {
t.Fatalf("expected second pipe to receive '2', got '%s'", out2)
}
}
func TestExecInOomScoreAdj(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
config.OomScoreAdj = 200
container, err := newContainer(config)
ok(t, err)
defer container.Destroy()
stdinR, stdinW, err := os.Pipe()
ok(t, err)
process := &libcontainer.Process{
Cwd: "/",
Args: []string{"cat"},
Env: standardEnvironment,
Stdin: stdinR,
}
err = container.Run(process)
stdinR.Close()
defer stdinW.Close()
ok(t, err)
buffers := newStdBuffers()
ps := &libcontainer.Process{
Cwd: "/",
Args: []string{"/bin/sh", "-c", "cat /proc/self/oom_score_adj"},
Env: standardEnvironment,
Stdin: buffers.Stdin,
Stdout: buffers.Stdout,
Stderr: buffers.Stderr,
}
err = container.Run(ps)
ok(t, err)
waitProcess(ps, t)
stdinW.Close()
waitProcess(process, t)
out := buffers.Stdout.String()
if oomScoreAdj := strings.TrimSpace(out); oomScoreAdj != strconv.Itoa(config.OomScoreAdj) {
t.Fatalf("expected oomScoreAdj to be %d, got %s", config.OomScoreAdj, oomScoreAdj)
}
}
func TestExecInUserns(t *testing.T) {
if _, err := os.Stat("/proc/self/ns/user"); os.IsNotExist(err) {
t.Skip("userns is unsupported")
}
if testing.Short() {
return
}
rootfs, err := newRootfs()
ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
config.UidMappings = []configs.IDMap{{HostID: 0, ContainerID: 0, Size: 1000}}
config.GidMappings = []configs.IDMap{{HostID: 0, ContainerID: 0, Size: 1000}}
config.Namespaces = append(config.Namespaces, configs.Namespace{Type: configs.NEWUSER})
container, err := newContainer(config)
ok(t, err)
defer container.Destroy()
// Execute a first process in the container
stdinR, stdinW, err := os.Pipe()
ok(t, err)
process := &libcontainer.Process{
Cwd: "/",
Args: []string{"cat"},
Env: standardEnvironment,
Stdin: stdinR,
}
err = container.Run(process)
stdinR.Close()
defer stdinW.Close()
ok(t, err)
initPID, err := process.Pid()
ok(t, err)
initUserns, err := os.Readlink(fmt.Sprintf("/proc/%d/ns/user", initPID))
ok(t, err)
buffers := newStdBuffers()
process2 := &libcontainer.Process{
Cwd: "/",
Args: []string{"readlink", "/proc/self/ns/user"},
Env: []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
},
Stdout: buffers.Stdout,
Stderr: os.Stderr,
}
err = container.Run(process2)
ok(t, err)
waitProcess(process2, t)
stdinW.Close()
waitProcess(process, t)
if out := strings.TrimSpace(buffers.Stdout.String()); out != initUserns {
t.Errorf("execin userns(%s), wanted %s", out, initUserns)
}
}

View file

@ -1,61 +0,0 @@
package integration
import (
"os"
"runtime"
"testing"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
_ "github.com/opencontainers/runc/libcontainer/nsenter"
"github.com/sirupsen/logrus"
)
// init runs the libcontainer initialization code because of the busybox style needs
// to work around the go runtime and the issues with forking
func init() {
if len(os.Args) < 2 || os.Args[1] != "init" {
return
}
runtime.GOMAXPROCS(1)
runtime.LockOSThread()
factory, err := libcontainer.New("")
if err != nil {
logrus.Fatalf("unable to initialize for container: %s", err)
}
if err := factory.StartInitialization(); err != nil {
logrus.Fatal(err)
}
}
var (
factory libcontainer.Factory
systemdFactory libcontainer.Factory
)
func TestMain(m *testing.M) {
var (
err error
ret int
)
logrus.SetOutput(os.Stderr)
logrus.SetLevel(logrus.InfoLevel)
factory, err = libcontainer.New("/run/libctTests", libcontainer.Cgroupfs)
if err != nil {
logrus.Error(err)
os.Exit(1)
}
if systemd.UseSystemd() {
systemdFactory, err = libcontainer.New("/run/libctTests", libcontainer.SystemdCgroups)
if err != nil {
logrus.Error(err)
os.Exit(1)
}
}
ret = m.Run()
os.Exit(ret)
}

View file

@ -1,419 +0,0 @@
// +build linux,cgo,seccomp
package integration
import (
"strings"
"syscall"
"testing"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/configs"
libseccomp "github.com/seccomp/libseccomp-golang"
)
func TestSeccompDenyGetcwd(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
if err != nil {
t.Fatal(err)
}
defer remove(rootfs)
config := newTemplateConfig(rootfs)
config.Seccomp = &configs.Seccomp{
DefaultAction: configs.Allow,
Syscalls: []*configs.Syscall{
{
Name: "getcwd",
Action: configs.Errno,
},
},
}
container, err := newContainer(config)
if err != nil {
t.Fatal(err)
}
defer container.Destroy()
buffers := newStdBuffers()
pwd := &libcontainer.Process{
Cwd: "/",
Args: []string{"pwd"},
Env: standardEnvironment,
Stdin: buffers.Stdin,
Stdout: buffers.Stdout,
Stderr: buffers.Stderr,
}
err = container.Run(pwd)
if err != nil {
t.Fatal(err)
}
ps, err := pwd.Wait()
if err == nil {
t.Fatal("Expecting error (negative return code); instead exited cleanly!")
}
var exitCode int
status := ps.Sys().(syscall.WaitStatus)
if status.Exited() {
exitCode = status.ExitStatus()
} else if status.Signaled() {
exitCode = -int(status.Signal())
} else {
t.Fatalf("Unrecognized exit reason!")
}
if exitCode == 0 {
t.Fatalf("Getcwd should fail with negative exit code, instead got %d!", exitCode)
}
expected := "pwd: getcwd: Operation not permitted"
actual := strings.Trim(buffers.Stderr.String(), "\n")
if actual != expected {
t.Fatalf("Expected output %s but got %s\n", expected, actual)
}
}
func TestSeccompPermitWriteConditional(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
if err != nil {
t.Fatal(err)
}
defer remove(rootfs)
config := newTemplateConfig(rootfs)
config.Seccomp = &configs.Seccomp{
DefaultAction: configs.Allow,
Syscalls: []*configs.Syscall{
{
Name: "write",
Action: configs.Errno,
Args: []*configs.Arg{
{
Index: 0,
Value: 2,
Op: configs.EqualTo,
},
},
},
},
}
container, err := newContainer(config)
if err != nil {
t.Fatal(err)
}
defer container.Destroy()
buffers := newStdBuffers()
dmesg := &libcontainer.Process{
Cwd: "/",
Args: []string{"busybox", "ls", "/"},
Env: standardEnvironment,
Stdin: buffers.Stdin,
Stdout: buffers.Stdout,
Stderr: buffers.Stderr,
}
err = container.Run(dmesg)
if err != nil {
t.Fatal(err)
}
if _, err := dmesg.Wait(); err != nil {
t.Fatalf("%s: %s", err, buffers.Stderr)
}
}
func TestSeccompDenyWriteConditional(t *testing.T) {
if testing.Short() {
return
}
// Only test if library version is v2.2.1 or higher
// Conditional filtering will always error in v2.2.0 and lower
major, minor, micro := libseccomp.GetLibraryVersion()
if (major == 2 && minor < 2) || (major == 2 && minor == 2 && micro < 1) {
return
}
rootfs, err := newRootfs()
if err != nil {
t.Fatal(err)
}
defer remove(rootfs)
config := newTemplateConfig(rootfs)
config.Seccomp = &configs.Seccomp{
DefaultAction: configs.Allow,
Syscalls: []*configs.Syscall{
{
Name: "write",
Action: configs.Errno,
Args: []*configs.Arg{
{
Index: 0,
Value: 2,
Op: configs.EqualTo,
},
},
},
},
}
container, err := newContainer(config)
if err != nil {
t.Fatal(err)
}
defer container.Destroy()
buffers := newStdBuffers()
dmesg := &libcontainer.Process{
Cwd: "/",
Args: []string{"busybox", "ls", "does_not_exist"},
Env: standardEnvironment,
Stdin: buffers.Stdin,
Stdout: buffers.Stdout,
Stderr: buffers.Stderr,
}
err = container.Run(dmesg)
if err != nil {
t.Fatal(err)
}
ps, err := dmesg.Wait()
if err == nil {
t.Fatal("Expecting negative return, instead got 0!")
}
var exitCode int
status := ps.Sys().(syscall.WaitStatus)
if status.Exited() {
exitCode = status.ExitStatus()
} else if status.Signaled() {
exitCode = -int(status.Signal())
} else {
t.Fatalf("Unrecognized exit reason!")
}
if exitCode == 0 {
t.Fatalf("Busybox should fail with negative exit code, instead got %d!", exitCode)
}
// We're denying write to stderr, so we expect an empty buffer
expected := ""
actual := strings.Trim(buffers.Stderr.String(), "\n")
if actual != expected {
t.Fatalf("Expected output %s but got %s\n", expected, actual)
}
}
func TestSeccompPermitWriteMultipleConditions(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
if err != nil {
t.Fatal(err)
}
defer remove(rootfs)
config := newTemplateConfig(rootfs)
config.Seccomp = &configs.Seccomp{
DefaultAction: configs.Allow,
Syscalls: []*configs.Syscall{
{
Name: "write",
Action: configs.Errno,
Args: []*configs.Arg{
{
Index: 0,
Value: 2,
Op: configs.EqualTo,
},
{
Index: 2,
Value: 0,
Op: configs.NotEqualTo,
},
},
},
},
}
buffers, exitCode, err := runContainer(config, "", "ls", "/")
if err != nil {
t.Fatalf("%s: %s", buffers, err)
}
if exitCode != 0 {
t.Fatalf("exit code not 0. code %d buffers %s", exitCode, buffers)
}
// We don't need to verify the actual thing printed
// Just that something was written to stdout
if len(buffers.Stdout.String()) == 0 {
t.Fatalf("Nothing was written to stdout, write call failed!\n")
}
}
func TestSeccompDenyWriteMultipleConditions(t *testing.T) {
if testing.Short() {
return
}
// Only test if library version is v2.2.1 or higher
// Conditional filtering will always error in v2.2.0 and lower
major, minor, micro := libseccomp.GetLibraryVersion()
if (major == 2 && minor < 2) || (major == 2 && minor == 2 && micro < 1) {
return
}
rootfs, err := newRootfs()
if err != nil {
t.Fatal(err)
}
defer remove(rootfs)
config := newTemplateConfig(rootfs)
config.Seccomp = &configs.Seccomp{
DefaultAction: configs.Allow,
Syscalls: []*configs.Syscall{
{
Name: "write",
Action: configs.Errno,
Args: []*configs.Arg{
{
Index: 0,
Value: 2,
Op: configs.EqualTo,
},
{
Index: 2,
Value: 0,
Op: configs.NotEqualTo,
},
},
},
},
}
buffers, exitCode, err := runContainer(config, "", "ls", "/does_not_exist")
if err == nil {
t.Fatalf("Expecting error return, instead got 0")
}
if exitCode == 0 {
t.Fatalf("Busybox should fail with negative exit code, instead got %d!", exitCode)
}
expected := ""
actual := strings.Trim(buffers.Stderr.String(), "\n")
if actual != expected {
t.Fatalf("Expected output %s but got %s\n", expected, actual)
}
}
func TestSeccompMultipleConditionSameArgDeniesStdout(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
if err != nil {
t.Fatal(err)
}
defer remove(rootfs)
// Prevent writing to both stdout and stderr
config := newTemplateConfig(rootfs)
config.Seccomp = &configs.Seccomp{
DefaultAction: configs.Allow,
Syscalls: []*configs.Syscall{
{
Name: "write",
Action: configs.Errno,
Args: []*configs.Arg{
{
Index: 0,
Value: 1,
Op: configs.EqualTo,
},
{
Index: 0,
Value: 2,
Op: configs.EqualTo,
},
},
},
},
}
buffers, exitCode, err := runContainer(config, "", "ls", "/")
if err != nil {
t.Fatalf("%s: %s", buffers, err)
}
if exitCode != 0 {
t.Fatalf("exit code not 0. code %d buffers %s", exitCode, buffers)
}
// Verify that nothing was printed
if len(buffers.Stdout.String()) != 0 {
t.Fatalf("Something was written to stdout, write call succeeded!\n")
}
}
func TestSeccompMultipleConditionSameArgDeniesStderr(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
if err != nil {
t.Fatal(err)
}
defer remove(rootfs)
// Prevent writing to both stdout and stderr
config := newTemplateConfig(rootfs)
config.Seccomp = &configs.Seccomp{
DefaultAction: configs.Allow,
Syscalls: []*configs.Syscall{
{
Name: "write",
Action: configs.Errno,
Args: []*configs.Arg{
{
Index: 0,
Value: 1,
Op: configs.EqualTo,
},
{
Index: 0,
Value: 2,
Op: configs.EqualTo,
},
},
},
},
}
buffers, exitCode, err := runContainer(config, "", "ls", "/does_not_exist")
if err == nil {
t.Fatalf("Expecting error return, instead got 0")
}
if exitCode == 0 {
t.Fatalf("Busybox should fail with negative exit code, instead got %d!", exitCode)
}
// Verify nothing was printed
if len(buffers.Stderr.String()) != 0 {
t.Fatalf("Something was written to stderr, write call succeeded!\n")
}
}

View file

@ -1,191 +0,0 @@
package integration
import (
"github.com/opencontainers/runc/libcontainer/configs"
"golang.org/x/sys/unix"
)
var standardEnvironment = []string{
"HOME=/root",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"HOSTNAME=integration",
"TERM=xterm",
}
const defaultMountFlags = unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_NODEV
// newTemplateConfig returns a base template for running a container
//
// it uses a network strategy of just setting a loopback interface
// and the default setup for devices
func newTemplateConfig(rootfs string) *configs.Config {
allowAllDevices := false
return &configs.Config{
Rootfs: rootfs,
Capabilities: &configs.Capabilities{
Bounding: []string{
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE",
},
Permitted: []string{
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE",
},
Inheritable: []string{
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE",
},
Ambient: []string{
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE",
},
Effective: []string{
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE",
},
},
Namespaces: configs.Namespaces([]configs.Namespace{
{Type: configs.NEWNS},
{Type: configs.NEWUTS},
{Type: configs.NEWIPC},
{Type: configs.NEWPID},
{Type: configs.NEWNET},
}),
Cgroups: &configs.Cgroup{
Path: "integration/test",
Resources: &configs.Resources{
MemorySwappiness: nil,
AllowAllDevices: &allowAllDevices,
AllowedDevices: configs.DefaultAllowedDevices,
},
},
MaskPaths: []string{
"/proc/kcore",
"/sys/firmware",
},
ReadonlyPaths: []string{
"/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
},
Devices: configs.DefaultAutoCreatedDevices,
Hostname: "integration",
Mounts: []*configs.Mount{
{
Source: "proc",
Destination: "/proc",
Device: "proc",
Flags: defaultMountFlags,
},
{
Source: "tmpfs",
Destination: "/dev",
Device: "tmpfs",
Flags: unix.MS_NOSUID | unix.MS_STRICTATIME,
Data: "mode=755",
},
{
Source: "devpts",
Destination: "/dev/pts",
Device: "devpts",
Flags: unix.MS_NOSUID | unix.MS_NOEXEC,
Data: "newinstance,ptmxmode=0666,mode=0620,gid=5",
},
{
Device: "tmpfs",
Source: "shm",
Destination: "/dev/shm",
Data: "mode=1777,size=65536k",
Flags: defaultMountFlags,
},
/*
CI is broken on the debian based kernels with this
{
Source: "mqueue",
Destination: "/dev/mqueue",
Device: "mqueue",
Flags: defaultMountFlags,
},
*/
{
Source: "sysfs",
Destination: "/sys",
Device: "sysfs",
Flags: defaultMountFlags | unix.MS_RDONLY,
},
},
Networks: []*configs.Network{
{
Type: "loopback",
Address: "127.0.0.1/0",
Gateway: "localhost",
},
},
Rlimits: []configs.Rlimit{
{
Type: unix.RLIMIT_NOFILE,
Hard: uint64(1025),
Soft: uint64(1025),
},
},
}
}

View file

@ -1,170 +0,0 @@
package integration
import (
"bytes"
"crypto/md5"
"encoding/hex"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"syscall"
"testing"
"time"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/configs"
)
func newStdBuffers() *stdBuffers {
return &stdBuffers{
Stdin: bytes.NewBuffer(nil),
Stdout: bytes.NewBuffer(nil),
Stderr: bytes.NewBuffer(nil),
}
}
type stdBuffers struct {
Stdin *bytes.Buffer
Stdout *bytes.Buffer
Stderr *bytes.Buffer
}
func (b *stdBuffers) String() string {
s := []string{}
if b.Stderr != nil {
s = append(s, b.Stderr.String())
}
if b.Stdout != nil {
s = append(s, b.Stdout.String())
}
return strings.Join(s, "|")
}
// ok fails the test if an err is not nil.
func ok(t testing.TB, err error) {
if err != nil {
_, file, line, _ := runtime.Caller(1)
t.Fatalf("%s:%d: unexpected error: %s\n\n", filepath.Base(file), line, err.Error())
}
}
func waitProcess(p *libcontainer.Process, t *testing.T) {
_, file, line, _ := runtime.Caller(1)
status, err := p.Wait()
if err != nil {
t.Fatalf("%s:%d: unexpected error: %s\n\n", filepath.Base(file), line, err.Error())
}
if !status.Success() {
t.Fatalf("%s:%d: unexpected status: %s\n\n", filepath.Base(file), line, status.String())
}
}
func newTestRoot() (string, error) {
dir, err := ioutil.TempDir("", "libcontainer")
if err != nil {
return "", err
}
if err := os.MkdirAll(dir, 0700); err != nil {
return "", err
}
return dir, nil
}
func newTestBundle() (string, error) {
dir, err := ioutil.TempDir("", "bundle")
if err != nil {
return "", err
}
if err := os.MkdirAll(dir, 0700); err != nil {
return "", err
}
return dir, nil
}
// newRootfs creates a new tmp directory and copies the busybox root filesystem
func newRootfs() (string, error) {
dir, err := ioutil.TempDir("", "")
if err != nil {
return "", err
}
if err := os.MkdirAll(dir, 0700); err != nil {
return "", err
}
if err := copyBusybox(dir); err != nil {
return "", err
}
return dir, nil
}
func remove(dir string) {
os.RemoveAll(dir)
}
// copyBusybox copies the rootfs for a busybox container created for the test image
// into the new directory for the specific test
func copyBusybox(dest string) error {
out, err := exec.Command("sh", "-c", fmt.Sprintf("cp -a /busybox/* %s/", dest)).CombinedOutput()
if err != nil {
return fmt.Errorf("copy error %q: %q", err, out)
}
return nil
}
func newContainer(config *configs.Config) (libcontainer.Container, error) {
h := md5.New()
h.Write([]byte(time.Now().String()))
return newContainerWithName(hex.EncodeToString(h.Sum(nil)), config)
}
func newContainerWithName(name string, config *configs.Config) (libcontainer.Container, error) {
f := factory
if config.Cgroups != nil && config.Cgroups.Parent == "system.slice" {
f = systemdFactory
}
return f.Create(name, config)
}
// runContainer runs the container with the specific config and arguments
//
// buffers are returned containing the STDOUT and STDERR output for the run
// along with the exit code and any go error
func runContainer(config *configs.Config, console string, args ...string) (buffers *stdBuffers, exitCode int, err error) {
container, err := newContainer(config)
if err != nil {
return nil, -1, err
}
defer container.Destroy()
buffers = newStdBuffers()
process := &libcontainer.Process{
Cwd: "/",
Args: args,
Env: standardEnvironment,
Stdin: buffers.Stdin,
Stdout: buffers.Stdout,
Stderr: buffers.Stderr,
}
err = container.Run(process)
if err != nil {
return buffers, -1, err
}
ps, err := process.Wait()
if err != nil {
return buffers, -1, err
}
status := ps.Sys().(syscall.WaitStatus)
if status.Exited() {
exitCode = status.ExitStatus()
} else if status.Signaled() {
exitCode = -int(status.Signal())
} else {
return buffers, -1, err
}
return
}

View file

@ -1,47 +0,0 @@
Name: cat
State: R (running)
Tgid: 19383
Ngid: 0
Pid: 19383
PPid: 19275
TracerPid: 0
Uid: 1000 1000 1000 1000
Gid: 1000 1000 1000 1000
FDSize: 256
Groups: 24 25 27 29 30 44 46 102 104 108 111 1000 1001
NStgid: 19383
NSpid: 19383
NSpgid: 19383
NSsid: 19275
VmPeak: 5944 kB
VmSize: 5944 kB
VmLck: 0 kB
VmPin: 0 kB
VmHWM: 744 kB
VmRSS: 744 kB
VmData: 324 kB
VmStk: 136 kB
VmExe: 48 kB
VmLib: 1776 kB
VmPTE: 32 kB
VmPMD: 12 kB
VmSwap: 0 kB
Threads: 1
SigQ: 0/30067
SigPnd: 0000000000000000
ShdPnd: 0000000000000000
SigBlk: 0000000000000000
SigIgn: 0000000000000080
SigCgt: 0000000000000000
CapInh: 0000000000000000
CapPrm: 0000000000000000
CapEff: 0000000000000000
CapBnd: 0000003fffffffff
CapAmb: 0000000000000000
Seccomp: 0
Cpus_allowed: f
Cpus_allowed_list: 0-3
Mems_allowed: 00000000,00000001
Mems_allowed_list: 0
voluntary_ctxt_switches: 0
nonvoluntary_ctxt_switches: 1

View file

@ -67,6 +67,15 @@ func main() {
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir != "" {
root = runtimeDir + "/runc"
// According to the XDG specification, we need to set anything in
// XDG_RUNTIME_DIR to have a sticky bit if we don't want it to get
// auto-pruned.
if err := os.MkdirAll(root, 0700); err != nil {
fatal(err)
}
if err := os.Chmod(root, 0700|os.ModeSticky); err != nil {
fatal(err)
}
}
}

View file

@ -1,11 +0,0 @@
runc man pages
====================
This directory contains man pages for runc in markdown format.
To generate man pages from it, use this command
./md2man-all.sh
You will see man pages generated under the man8 directory.

View file

@ -1,27 +0,0 @@
#!/bin/bash
set -e
# get into this script's directory
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
[ "$1" = '-q' ] || {
set -x
pwd
}
if ! ( which go-md2man &>/dev/null ); then
echo "To install man pages, please install 'go-md2man'."
exit 0
fi
for FILE in *.md; do
base="$(basename "$FILE")"
name="${base%.md}"
num="${name##*.}"
if [ -z "$num" -o "$name" = "$num" ]; then
# skip files that aren't of the format xxxx.N.md (like README.md)
continue
fi
mkdir -p "./man${num}"
go-md2man -in "$FILE" -out "./man${num}/${name}"
done

View file

@ -1,25 +0,0 @@
# NAME
runc checkpoint - checkpoint a running container
# SYNOPSIS
runc checkpoint [command options] <container-id>
Where "<container-id>" is the name for the instance of the container to be
checkpointed.
# DESCRIPTION
The checkpoint command saves the state of the container instance.
# OPTIONS
--image-path value path for saving criu image files
--work-path value path for saving work files and logs
--parent-path value path for previous criu image files in pre-dump
--leave-running leave the process running after checkpointing
--tcp-established allow open tcp connections
--ext-unix-sk allow external unix sockets
--shell-job allow shell jobs
--page-server value ADDRESS:PORT of the page server
--file-locks handle file locks, for safety
--pre-dump dump container's memory information only, leave the container running after this
--manage-cgroups-mode value cgroups mode: 'soft' (default), 'full' and 'strict'
--empty-ns value create a namespace, but don't restore its properties

View file

@ -1,27 +0,0 @@
# NAME
runc create - create a container
# SYNOPSIS
runc create [command options] <container-id>
Where "<container-id>" is your name for the instance of the container that you
are starting. The name you provide for the container instance must be unique on
your host.
# DESCRIPTION
The create command creates an instance of a container for a bundle. The bundle
is a directory with a specification file named "config.json" and a root
filesystem.
The specification file includes an args parameter. The args parameter is used
to specify command(s) that get run when the container is started. To change the
command(s) that get executed on start, edit the args parameter of the spec. See
"runc spec --help" for more explanation.
# OPTIONS
--bundle value, -b value path to the root of the bundle directory, defaults to the current directory
--console-socket value path to an AF_UNIX socket which will receive a file descriptor referencing the master end of the console's pseudoterminal
--pid-file value specify the file to write the process id to
--no-pivot do not use pivot root to jail process inside rootfs. This should be used whenever the rootfs is on top of a ramdisk
--no-new-keyring do not create a new session keyring for the container. This will cause the container to inherit the calling processes session key
--preserve-fds value Pass N additional file descriptors to the container (stdio + $LISTEN_FDS + N in total) (default: 0)

View file

@ -1,17 +0,0 @@
# NAME
runc delete - delete any resources held by the container often used with detached container
# SYNOPSIS
runc delete [command options] <container-id>
Where "<container-id>" is the name for the instance of the container.
# OPTIONS
--force, -f Forcibly deletes the container if it is still running (uses SIGKILL)
# EXAMPLE
For example, if the container id is "ubuntu01" and runc list currently shows the
status of "ubuntu01" as "stopped" the following will delete resources held for
"ubuntu01" removing "ubuntu01" from the runc list of containers:
# runc delete ubuntu01

View file

@ -1,15 +0,0 @@
# NAME
runc events - display container events such as OOM notifications, cpu, memory, and IO usage statistics
# SYNOPSIS
runc events [command options] <container-id>
Where "<container-id>" is the name for the instance of the container.
# DESCRIPTION
The events command displays information about the container. By default the
information is displayed once every 5 seconds.
# OPTIONS
--interval value set the stats collection interval (default: 5s)
--stats display the container's stats then exit

View file

@ -1,30 +0,0 @@
# NAME
runc exec - execute new process inside the container
# SYNOPSIS
runc exec [command options] <container-id> -- <container command> [args...]
Where "<container-id>" is the name for the instance of the container and
"<container command>" is the command to be executed in the container.
# EXAMPLE
For example, if the container is configured to run the linux ps command the
following will output a list of processes running in the container:
# runc exec <container-id> ps
# OPTIONS
--console value specify the pty slave path for use with the container
--cwd value current working directory in the container
--env value, -e value set environment variables
--tty, -t allocate a pseudo-TTY
--user value, -u value UID (format: <uid>[:<gid>])
--additional-gids value, -g value additional gids
--process value, -p value path to the process.json
--detach, -d detach from the container's process
--pid-file value specify the file to write the process id to
--process-label value set the asm process label for the process commonly used with selinux
--apparmor value set the apparmor profile for the process
--no-new-privs set the no new privileges value for the process
--cap value, -c value add a capability to the bounding set for the process
--no-subreaper disable the use of the subreaper used to reap reparented processes

View file

@ -1,18 +0,0 @@
# NAME
runc kill - kill sends the specified signal (default: SIGTERM) to the container's init process
# SYNOPSIS
runc kill [command options] <container-id> <signal>
Where "<container-id>" is the name for the instance of the container and
"<signal>" is the signal to be sent to the init process.
# OPTIONS
--all, -a send the specified signal to all processes inside the container
# EXAMPLE
For example, if the container id is "ubuntu01" the following will send a "KILL"
signal to the init process of the "ubuntu01" container:
# runc kill ubuntu01 KILL

View file

@ -1,19 +0,0 @@
# NAME
runc list - lists containers started by runc with the given root
# SYNOPSIS
runc list [command options]
# EXAMPLE
Where the given root is specified via the global option "--root"
(default: "/run/runc").
To list containers created via the default "--root":
# runc list
To list containers created using a non-default value for "--root":
# runc --root value list
# OPTIONS
--format value, -f value select one of: table or json (default: "table")
--quiet, -q display only container IDs

View file

@ -1,12 +0,0 @@
# NAME
runc pause - pause suspends all processes inside the container
# SYNOPSIS
runc pause <container-id>
Where "<container-id>" is the name for the instance of the container to be
paused.
# DESCRIPTION
The pause command suspends all processes in the instance of the container.
Use runc list to identiy instances of containers and their current status.

View file

@ -1,13 +0,0 @@
# NAME
runc ps - ps displays the processes running inside a container
# SYNOPSIS
runc ps [command options] <container-id> [ps options]
# OPTIONS
--format value, -f value select one of: table(default) or json
The default format is table. The following will output the processes of a container
in json format:
# runc ps -f json <container-id>

View file

@ -1,26 +0,0 @@
# NAME
runc restore - restore a container from a previous checkpoint
# SYNOPSIS
runc restore [command options] <container-id>
Where "<container-id>" is the name for the instance of the container to be
restored.
# DESCRIPTION
Restores the saved state of the container instance that was previously saved
using the runc checkpoint command.
# OPTIONS
--image-path value path to criu image files for restoring
--work-path value path for saving work files and logs
--tcp-established allow open tcp connections
--ext-unix-sk allow external unix sockets
--shell-job allow shell jobs
--file-locks handle file locks, for safety
--manage-cgroups-mode value cgroups mode: 'soft' (default), 'full' and 'strict'
--bundle value, -b value path to the root of the bundle directory
--detach, -d detach from the container's process
--pid-file value specify the file to write the process id to
--no-subreaper disable the use of the subreaper used to reap reparented processes
--no-pivot do not use pivot root to jail process inside rootfs. This should be used whenever the rootfs is on top of a ramdisk

View file

@ -1,12 +0,0 @@
# NAME
runc resume - resumes all processes that have been previously paused
# SYNOPSIS
runc resume <container-id>
Where "<container-id>" is the name for the instance of the container to be
resumed.
# DESCRIPTION
The resume command resumes all processes in the instance of the container.
Use runc list to identiy instances of containers and their current status.

View file

@ -1,29 +0,0 @@
# NAME
runc run - create and run a container
# SYNOPSIS
runc run [command options] <container-id>
Where "<container-id>" is your name for the instance of the container that you
are starting. The name you provide for the container instance must be unique on
your host.
# DESCRIPTION
The run command creates an instance of a container for a bundle. The bundle
is a directory with a specification file named "config.json" and a root
filesystem.
The specification file includes an args parameter. The args parameter is used
to specify command(s) that get run when the container is started. To change the
command(s) that get executed on start, edit the args parameter of the spec. See
"runc spec --help" for more explanation.
# OPTIONS
--bundle value, -b value path to the root of the bundle directory, defaults to the current directory
--console-socket value path to an AF_UNIX socket which will receive a file descriptor referencing the master end of the console's pseudoterminal
--detach, -d detach from the container's process
--pid-file value specify the file to write the process id to
--no-subreaper disable the use of the subreaper used to reap reparented processes
--no-pivot do not use pivot root to jail process inside rootfs. This should be used whenever the rootfs is on top of a ramdisk
--no-new-keyring do not create a new session keyring for the container. This will cause the container to inherit the calling processes session key
--preserve-fds value Pass N additional file descriptors to the container (stdio + $LISTEN_FDS + N in total) (default: 0)

View file

@ -1,52 +0,0 @@
# NAME
runc spec - create a new specification file
# SYNOPSIS
runc spec [command options] [arguments...]
# DESCRIPTION
The spec command creates the new specification file named "config.json" for
the bundle.
The spec generated is just a starter file. Editing of the spec is required to
achieve desired results. For example, the newly generated spec includes an args
parameter that is initially set to call the "sh" command when the container is
started. Calling "sh" may work for an ubuntu container or busybox, but will not
work for containers that do not include the "sh" program.
# EXAMPLE
To run docker's hello-world container one needs to set the args parameter
in the spec to call hello. This can be done using the sed command or a text
editor. The following commands create a bundle for hello-world, change the
default args parameter in the spec from "sh" to "/hello", then run the hello
command in a new hello-world container named container1:
mkdir hello
cd hello
docker pull hello-world
docker export $(docker create hello-world) > hello-world.tar
mkdir rootfs
tar -C rootfs -xf hello-world.tar
runc spec
sed -i 's;"sh";"/hello";' config.json
runc start container1
In the start command above, "container1" is the name for the instance of the
container that you are starting. The name you provide for the container instance
must be unique on your host.
An alternative for generating a customized spec config is to use "oci-runtime-tool", the
sub-command "oci-runtime-tool generate" has lots of options that can be used to do any
customizations as you want, see [runtime-tools](https://github.com/opencontainers/runtime-tools)
to get more information.
When starting a container through runc, runc needs root privilege. If not
already running as root, you can use sudo to give runc root privilege. For
example: "sudo runc start container1" will give runc root privilege to start the
container on your host.
Alternatively, you can start a rootless container, which has the ability to run without root privileges. For this to work, the specification file needs to be adjusted accordingly. You can pass the parameter --rootless to this command to generate a proper rootless spec file.
# OPTIONS
--bundle value, -b value path to the root of the bundle directory
--rootless generate a configuration for a rootless container

View file

@ -1,12 +0,0 @@
# NAME
runc start - start executes the user defined process in a created container
# SYNOPSIS
runc start <container-id>
Where "<container-id>" is your name for the instance of the container that you
are starting. The name you provide for the container instance must be unique on
your host.
# DESCRIPTION
The start command executes the user defined process in a created container.

View file

@ -1,11 +0,0 @@
# NAME
runc state - output the state of a container
# SYNOPSIS
runc state <container-id>
Where "<container-id>" is your name for the instance of the container.
# DESCRIPTION
The state command outputs current state information for the
instance of a container.

View file

@ -1,51 +0,0 @@
# NAME
runc update - update container resource constraints
# SYNOPSIS
runc update [command options] <container-id>
# DESCRIPTION
The data can be read from a file or the standard input, the
accepted format is as follow (unchanged values can be omitted):
{
"memory": {
"limit": 0,
"reservation": 0,
"swap": 0,
"kernel": 0,
"kernelTCP": 0
},
"cpu": {
"shares": 0,
"quota": 0,
"period": 0,
"realtimeRuntime": 0,
"realtimePeriod": 0,
"cpus": "",
"mems": ""
},
"blockIO": {
"blkioWeight": 0
}
}
Note: if data is to be read from a file or the standard input, all
other options are ignored.
# OPTIONS
--resources value, -r value path to the file containing the resources to update or '-' to read from the standard input
--blkio-weight value Specifies per cgroup weight, range is from 10 to 1000 (default: 0)
--cpu-period value CPU CFS period to be used for hardcapping (in usecs). 0 to use system default
--cpu-quota value CPU CFS hardcap limit (in usecs). Allowed cpu time in a given period
--cpu-rt-period value CPU realtime period to be used for hardcapping (in usecs). 0 to use system default
--cpu-rt-runtime value CPU realtime hardcap limit (in usecs). Allowed cpu time in a given period
--cpu-share value CPU shares (relative weight vs. other containers)
--cpuset-cpus value CPU(s) to use
--cpuset-mems value Memory node(s) to use
--kernel-memory value Kernel memory limit (in bytes)
--kernel-memory-tcp value Kernel memory limit (in bytes) for tcp buffer
--memory value Memory limit (in bytes)
--memory-reservation value Memory reservation or soft_limit (in bytes)
--memory-swap value Total memory usage (memory + swap); set '-1' to enable unlimited swap
--pids-limit value Maximum number of pids allowed in the container (default: 0)

View file

@ -1,57 +0,0 @@
# NAME
runc - Open Container Initiative runtime
# SYNOPSIS
runc [global options] command [command options] [arguments...]
# DESCRIPTION
runc is a command line client for running applications packaged according to
the Open Container Initiative (OCI) format and is a compliant implementation of the
Open Container Initiative specification.
runc integrates well with existing process supervisors to provide a production
container runtime environment for applications. It can be used with your
existing process monitoring tools and the container will be spawned as a
direct child of the process supervisor.
Containers are configured using bundles. A bundle for a container is a directory
that includes a specification file named "config.json" and a root filesystem.
The root filesystem contains the contents of the container.
To start a new instance of a container:
# runc start [ -b bundle ] <container-id>
Where "<container-id>" is your name for the instance of the container that you
are starting. The name you provide for the container instance must be unique on
your host. Providing the bundle directory using "-b" is optional. The default
value for "bundle" is the current directory.
# COMMANDS
checkpoint checkpoint a running container
delete delete any resources held by the container often used with detached containers
events display container events such as OOM notifications, cpu, memory, IO and network stats
exec execute new process inside the container
init initialize the namespaces and launch the process (do not call it outside of runc)
kill kill sends the specified signal (default: SIGTERM) to the container's init process
list lists containers started by runc with the given root
pause pause suspends all processes inside the container
ps displays the processes running inside a container
restore restore a container from a previous checkpoint
resume resumes all processes that have been previously paused
run create and run a container
spec create a new specification file
start executes the user defined process in a created container
state output the state of a container
update update container resource constraints
help, h Shows a list of commands or help for one command
# GLOBAL OPTIONS
--debug enable debug output for logging
--log value set the log file path where internal debug information is written (default: "/dev/null")
--log-format value set the format used by logs ('text' (default), or 'json') (default: "text")
--root value root directory for storage of container state (this should be located in tmpfs) (default: "/run/runc" or $XDG_RUNTIME_DIR/runc for rootless containers)
--criu value path to the criu binary used for checkpoint and restore (default: "criu")
--systemd-cgroup enable systemd cgroup support, expects cgroupsPath to be of form "slice:prefix:name" for e.g. "system.slice:runc:434234"
--help, -h show help
--version, -v print the version

View file

@ -1,33 +0,0 @@
#!/bin/bash
if [ -z "$VALIDATE_UPSTREAM" ]; then
# this is kind of an expensive check, so let's not do this twice if we
# are running more than one validate bundlescript
VALIDATE_REPO='https://github.com/opencontainers/runc.git'
VALIDATE_BRANCH='master'
if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git"
VALIDATE_BRANCH="${TRAVIS_BRANCH}"
fi
VALIDATE_HEAD="$(git rev-parse --verify HEAD)"
git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH"
VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)"
VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD"
VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD"
validate_diff() {
if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then
git diff "$VALIDATE_COMMIT_DIFF" "$@"
fi
}
validate_log() {
if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then
git log "$VALIDATE_COMMIT_LOG" "$@"
fi
}
fi

View file

@ -1,247 +0,0 @@
#!/usr/bin/env bash
set -e
# bits of this were adapted from check_config.sh in docker
# see also https://github.com/docker/docker/blob/master/contrib/check-config.sh
possibleConfigs=(
'/proc/config.gz'
"/boot/config-$(uname -r)"
"/usr/src/linux-$(uname -r)/.config"
'/usr/src/linux/.config'
)
possibleConfigFiles=(
'config.gz'
"config-$(uname -r)"
'.config'
)
if ! command -v zgrep &>/dev/null; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" >/dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" >/dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" >/dev/null
}
color() {
local codes=()
if [ "$1" = 'bold' ]; then
codes=("${codes[@]}" '1')
shift
fi
if [ "$#" -gt 0 ]; then
local code
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes=("${codes[@]}" "$code")
fi
fi
local IFS=';'
echo -en '\033['"${codes[*]}"'m'
}
wrap_color() {
text="$1"
shift
color "$@"
echo -n "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
fi
}
check_flags() {
for flag in "$@"; do
echo "- $(check_flag "$flag")"
done
}
check_distro_userns() {
source /etc/os-release 2>/dev/null || /bin/true
if [[ "${ID}" =~ ^(centos|rhel)$ && "${VERSION_ID}" =~ ^7 ]]; then
# this is a CentOS7 or RHEL7 system
grep -q "user_namespace.enable=1" /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
}
fi
}
is_config() {
local config="$1"
# Todo: more check
[[ -f "$config" ]] && return 0
return 1
}
search_config() {
local target_dir="$1"
[[ "$target_dir" ]] || target_dir=("${possibleConfigs[@]}")
local tryConfig
for tryConfig in "${target_dir[@]}"; do
is_config "$tryConfig" && {
CONFIG="$tryConfig"
return
}
[[ -d "$tryConfig" ]] && {
for tryFile in "${possibleConfigFiles[@]}"; do
is_config "$tryConfig/$tryFile" && {
CONFIG="$tryConfig/$tryFile"
return
}
done
}
done
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
}
CONFIG="$1"
is_config "$CONFIG" || {
if [[ ! "$CONFIG" ]]; then
wrap_color "info: no config specified, searching for kernel config ..." white
search_config
elif [[ -d "$CONFIG" ]]; then
wrap_color "info: input is a directory, searching for kernel config in this directory..." white
search_config "$CONFIG"
else
wrap_warning "warning: $CONFIG seems not a kernel config, searching other paths for kernel config ..."
search_config
fi
}
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
echo -n '- '
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" -o -d "$cgroupDir/cpuacct" -o -d "$cgroupDir/cpuset" -o -d "$cgroupDir/devices" -o -d "$cgroupDir/freezer" -o -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
echo "$(wrap_bad 'cgroup hierarchy' 'nonexistent??')"
fi
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then
echo -n '- '
if command -v apparmor_parser &>/dev/null; then
echo "$(wrap_good 'apparmor' 'enabled and tools installed')"
else
echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')"
echo -n ' '
if command -v apt-get &>/dev/null; then
echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')"
elif command -v yum &>/dev/null; then
echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')"
else
echo "$(wrap_color '(look for an "apparmor" package for your distribution)')"
fi
fi
fi
flags=(
NAMESPACES {NET,PID,IPC,UTS}_NS
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG
KEYS
MACVLAN VETH BRIDGE BRIDGE_NETFILTER
NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE
NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK}
NF_NAT NF_NAT_NEEDED
# required for bind-mounting /dev/mqueue into containers
POSIX_MQUEUE
)
check_flags "${flags[@]}"
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
check_flags SECCOMP
check_flags CGROUP_PIDS
check_flags MEMCG_SWAP MEMCG_SWAP_ENABLED
if is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(note that cgroup swap accounting is not enabled in your kernel config, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
}
if [ "$kernelMajor" -lt 4 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -le 5 ]; then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 18 ]; then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 13 ]; then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
flags=(
BLK_CGROUP BLK_DEV_THROTTLING IOSCHED_CFQ CFQ_GROUP_IOSCHED
CGROUP_PERF
CGROUP_HUGETLB
NET_CLS_CGROUP $netprio
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED
)
check_flags "${flags[@]}"

View file

@ -1,130 +0,0 @@
#!/bin/bash
# Copyright (C) 2017 SUSE LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
## --->
# Project-specific options and functions. In *theory* you shouldn't need to
# touch anything else in this script in order to use this elsewhere.
project="runc"
root="$(readlink -f "$(dirname "${BASH_SOURCE}")/..")"
# This function takes an output path as an argument, where the built
# (preferably static) binary should be placed.
function build_project() {
builddir="$(dirname "$1")"
# Build with all tags enabled.
make -C "$root" COMMIT_NO= BUILDTAGS="seccomp selinux apparmor" static
mv "$root/$project" "$1"
}
# End of the easy-to-configure portion.
## <---
# Print usage information.
function usage() {
echo "usage: release.sh [-S <gpg-key-id>] [-c <commit-ish>] [-r <release-dir>] [-v <version>]" >&2
exit 1
}
# Log something to stderr.
function log() {
echo "[*] $*" >&2
}
# Log something to stderr and then exit with 0.
function bail() {
log "$@"
exit 0
}
# Conduct a sanity-check to make sure that GPG provided with the given
# arguments can sign something. Inability to sign things is not a fatal error.
function gpg_cansign() {
gpg "$@" --clear-sign </dev/null >/dev/null
}
# When creating releases we need to build static binaries, an archive of the
# current commit, and generate detached signatures for both.
keyid=""
commit="HEAD"
version=""
releasedir=""
hashcmd=""
while getopts "S:c:r:v:h:" opt; do
case "$opt" in
S)
keyid="$OPTARG"
;;
c)
commit="$OPTARG"
;;
r)
releasedir="$OPTARG"
;;
v)
version="$OPTARG"
;;
h)
hashcmd="$OPTARG"
;;
\:)
echo "Missing argument: -$OPTARG" >&2
usage
;;
\?)
echo "Invalid option: -$OPTARG" >&2
usage
;;
esac
done
version="${version:-$(<"$root/VERSION")}"
releasedir="${releasedir:-release/$version}"
hashcmd="${hashcmd:-sha256sum}"
goarch="$(go env GOARCH || echo "amd64")"
log "creating $project release in '$releasedir'"
log " version: $version"
log " commit: $commit"
log " key: ${keyid:-DEFAULT}"
log " hash: $hashcmd"
# Make explicit what we're doing.
set -x
# Make the release directory.
rm -rf "$releasedir" && mkdir -p "$releasedir"
# Build project.
build_project "$releasedir/$project.$goarch"
# Generate new archive.
git archive --format=tar --prefix="$project-$version/" "$commit" | xz > "$releasedir/$project.tar.xz"
# Generate sha256 checksums for both.
( cd "$releasedir" ; "$hashcmd" "$project".{"$goarch",tar.xz} > "$project.$hashcmd" ; )
# Set up the gpgflags.
[[ "$keyid" ]] && export gpgflags="--default-key $keyid"
gpg_cansign $gpgflags || bail "Could not find suitable GPG key, skipping signing step."
# Sign everything.
gpg $gpgflags --detach-sign --armor "$releasedir/$project.$goarch"
gpg $gpgflags --detach-sign --armor "$releasedir/$project.tar.xz"
gpg $gpgflags --clear-sign --armor \
--output "$releasedir/$project.$hashcmd"{.tmp,} && \
mv "$releasedir/$project.$hashcmd"{.tmp,}

View file

@ -1,4 +0,0 @@
#!/bin/bash
mount -t tmpfs none /tmp
exec "$@"

View file

@ -1,42 +0,0 @@
#!/bin/bash
source "$(dirname "$BASH_SOURCE")/.validate"
IFS=$'\n'
files=($(validate_diff --diff-filter=ACMR --name-only -- '*.c' | grep -v '^vendor/' || true))
unset IFS
# indent(1): "You must use the -T option to tell indent the name of all the typenames in your program that are defined by typedef."
INDENT="indent -linux -l120 -T size_t -T jmp_buf"
if [ -z "$(indent --version 2>&1 | grep GNU)" ]; then
echo "Skipping C indentation checks, as GNU indent is not installed."
exit 0
fi
badFiles=()
for f in "${files[@]}"; do
orig=$(mktemp)
formatted=$(mktemp)
# we use "git show" here to validate that what's committed is formatted
git show "$VALIDATE_HEAD:$f" > ${orig}
${INDENT} ${orig} -o ${formatted}
if [ "$(diff -u ${orig} ${formatted})" ]; then
badFiles+=("$f")
fi
rm -f ${orig} ${formatted}
done
if [ ${#badFiles[@]} -eq 0 ]; then
echo 'Congratulations! All C source files are properly formatted.'
else
{
echo "These files are not properly formatted:"
for f in "${badFiles[@]}"; do
echo " - $f"
done
echo
echo "Please reformat the above files using \"${INDENT}\" and commit the result."
echo
} >&2
false
fi

View file

@ -1,30 +0,0 @@
#!/bin/bash
source "$(dirname "$BASH_SOURCE")/.validate"
IFS=$'\n'
files=($(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true))
unset IFS
badFiles=()
for f in "${files[@]}"; do
# we use "git show" here to validate that what's committed is formatted
if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then
badFiles+=("$f")
fi
done
if [ ${#badFiles[@]} -eq 0 ]; then
echo 'Congratulations! All Go source files are properly formatted.'
else
{
echo "These files are not properly gofmt'd:"
for f in "${badFiles[@]}"; do
echo " - $f"
done
echo
echo 'Please reformat the above files using "gofmt -s -w" and commit the result.'
echo
} >&2
false
fi

View file

@ -1,83 +0,0 @@
# runc Integration Tests
Integration tests provide end-to-end testing of runc.
Note that integration tests do **not** replace unit tests.
As a rule of thumb, code should be tested thoroughly with unit tests.
Integration tests on the other hand are meant to test a specific feature end
to end.
Integration tests are written in *bash* using the
[bats](https://github.com/sstephenson/bats) framework.
## Running integration tests
The easiest way to run integration tests is with Docker:
```
$ make integration
```
Alternatively, you can run integration tests directly on your host through make:
```
$ sudo make localintegration
```
Or you can just run them directly using bats
```
$ sudo bats tests/integration
```
To run a single test bucket:
```
$ make integration TESTFLAGS="/checkpoint.bats"
```
To run them on your host, you will need to setup a development environment plus
[bats](https://github.com/sstephenson/bats#installing-bats-from-source)
For example:
```
$ cd ~/go/src/github.com
$ git clone https://github.com/sstephenson/bats.git
$ cd bats
$ ./install.sh /usr/local
```
> **Note**: There are known issues running the integration tests using
> **devicemapper** as a storage driver, make sure that your docker daemon
> is using **aufs** if you want to successfully run the integration tests.
## Writing integration tests
[helper functions]
(https://github.com/opencontainers/runc/blob/master/test/integration/helpers.bash)
are provided in order to facilitate writing tests.
```sh
#!/usr/bin/env bats
# This will load the helpers.
load helpers
# setup is called at the beginning of every test.
function setup() {
# see functions teardown_hello and setup_hello in helpers.bash, used to
# create a pristine environment for running your tests
teardown_hello
setup_hello
}
# teardown is called at the end of every test.
function teardown() {
teardown_hello
}
@test "this is a simple test" {
runc run containerid
# "The runc macro" automatically populates $status, $output and $lines.
# Please refer to bats documentation to find out more.
[ "$status" -eq 0 ]
# check expected output
[[ "${output}" == *"Hello"* ]]
}
```

View file

@ -1,127 +0,0 @@
#!/usr/bin/env bats
load helpers
function teardown() {
rm -f $BATS_TMPDIR/runc-cgroups-integration-test.json
teardown_running_container test_cgroups_kmem
teardown_running_container test_cgroups_permissions
teardown_busybox
}
function setup() {
teardown
setup_busybox
}
function check_cgroup_value() {
cgroup=$1
source=$2
expected=$3
current=$(cat $cgroup/$source)
echo $cgroup/$source
echo "current" $current "!?" "$expected"
[ "$current" -eq "$expected" ]
}
@test "runc update --kernel-memory (initialized)" {
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_cgroup
requires cgroups_kmem
set_cgroups_path "$BUSYBOX_BUNDLE"
# Set some initial known values
DATA=$(cat <<-EOF
"memory": {
"kernel": 16777216
},
EOF
)
DATA=$(echo ${DATA} | sed 's/\n/\\n/g')
sed -i "s/\(\"resources\": {\)/\1\n${DATA}/" ${BUSYBOX_BUNDLE}/config.json
# run a detached busybox to work with
runc run -d --console-socket $CONSOLE_SOCKET test_cgroups_kmem
[ "$status" -eq 0 ]
# update kernel memory limit
runc update test_cgroups_kmem --kernel-memory 50331648
[ "$status" -eq 0 ]
# check the value
check_cgroup_value $CGROUP_MEMORY "memory.kmem.limit_in_bytes" 50331648
}
@test "runc update --kernel-memory (uninitialized)" {
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_cgroup
requires cgroups_kmem
set_cgroups_path "$BUSYBOX_BUNDLE"
# run a detached busybox to work with
runc run -d --console-socket $CONSOLE_SOCKET test_cgroups_kmem
[ "$status" -eq 0 ]
# update kernel memory limit
runc update test_cgroups_kmem --kernel-memory 50331648
# Since kernel 4.6, we can update kernel memory without initialization
# because it's accounted by default.
if [ "$KERNEL_MAJOR" -lt 4 ] || [ "$KERNEL_MAJOR" -eq 4 -a "$KERNEL_MINOR" -le 5 ]; then
[ ! "$status" -eq 0 ]
else
[ "$status" -eq 0 ]
check_cgroup_value $CGROUP_MEMORY "memory.kmem.limit_in_bytes" 50331648
fi
}
@test "runc create (no limits + no cgrouppath + no permission) succeeds" {
runc run -d --console-socket $CONSOLE_SOCKET test_cgroups_permissions
[ "$status" -eq 0 ]
}
@test "runc create (rootless + no limits + cgrouppath + no permission) fails with permission error" {
requires rootless
requires rootless_no_cgroup
set_cgroups_path "$BUSYBOX_BUNDLE"
runc run -d --console-socket $CONSOLE_SOCKET test_cgroups_permissions
[ "$status" -eq 1 ]
[[ ${lines[1]} == *"permission denied"* ]]
}
@test "runc create (rootless + limits + no cgrouppath + no permission) fails with informative error" {
requires rootless
requires rootless_no_cgroup
set_resources_limit "$BUSYBOX_BUNDLE"
runc run -d --console-socket $CONSOLE_SOCKET test_cgroups_permissions
[ "$status" -eq 1 ]
[[ ${lines[1]} == *"cannot set limits on the pids cgroup, as the container has not joined it"* ]]
}
@test "runc create (limits + cgrouppath + permission on the cgroup dir) succeeds" {
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_cgroup
set_cgroups_path "$BUSYBOX_BUNDLE"
set_resources_limit "$BUSYBOX_BUNDLE"
runc run -d --console-socket $CONSOLE_SOCKET test_cgroups_permissions
[ "$status" -eq 0 ]
}
@test "runc exec (limits + cgrouppath + permission on the cgroup dir) succeeds" {
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_cgroup
set_cgroups_path "$BUSYBOX_BUNDLE"
set_resources_limit "$BUSYBOX_BUNDLE"
runc run -d --console-socket $CONSOLE_SOCKET test_cgroups_permissions
[ "$status" -eq 0 ]
runc exec test_cgroups_permissions echo "cgroups_exec"
[ "$status" -eq 0 ]
[[ ${lines[0]} == *"cgroups_exec"* ]]
}

View file

@ -1,232 +0,0 @@
#!/usr/bin/env bats
load helpers
function setup() {
teardown_busybox
setup_busybox
}
function teardown() {
teardown_busybox
}
@test "checkpoint and restore" {
# XXX: currently criu require root containers.
requires criu root
# criu does not work with external terminals so..
# setting terminal and root:readonly: to false
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
testcontainer test_busybox running
for i in `seq 2`; do
# checkpoint the running container
runc --criu "$CRIU" checkpoint --work-path ./work-dir test_busybox
ret=$?
# if you are having problems getting criu to work uncomment the following dump:
#cat /run/opencontainer/containers/test_busybox/criu.work/dump.log
cat ./work-dir/dump.log | grep -B 5 Error || true
[ "$ret" -eq 0 ]
# after checkpoint busybox is no longer running
runc state test_busybox
[ "$status" -ne 0 ]
# restore from checkpoint
runc --criu "$CRIU" restore -d --work-path ./work-dir --console-socket $CONSOLE_SOCKET test_busybox
ret=$?
cat ./work-dir/restore.log | grep -B 5 Error || true
[ "$ret" -eq 0 ]
# busybox should be back up and running
testcontainer test_busybox running
done
}
@test "checkpoint --pre-dump and restore" {
# XXX: currently criu require root containers.
requires criu root
sed -i 's;"terminal": true;"terminal": false;' config.json
sed -i 's;"readonly": true;"readonly": false;' config.json
sed -i 's/"sh"/"sh","-c","for i in `seq 10`; do read xxx || continue; echo ponG $xxx; done"/' config.json
# The following code creates pipes for stdin and stdout.
# CRIU can't handle fifo-s, so we need all these tricks.
fifo=`mktemp -u /tmp/runc-fifo-XXXXXX`
mkfifo $fifo
# stdout
cat $fifo | cat $fifo &
pid=$!
exec 50</proc/$pid/fd/0
exec 51>/proc/$pid/fd/0
# stdin
cat $fifo | cat $fifo &
pid=$!
exec 60</proc/$pid/fd/0
exec 61>/proc/$pid/fd/0
echo -n > $fifo
unlink $fifo
# run busybox (not detached)
__runc run -d test_busybox <&60 >&51 2>&51
[ $? -eq 0 ]
testcontainer test_busybox running
#test checkpoint pre-dump
mkdir parent-dir
runc --criu "$CRIU" checkpoint --pre-dump --image-path ./parent-dir test_busybox
[ "$status" -eq 0 ]
# busybox should still be running
runc state test_busybox
[ "$status" -eq 0 ]
[[ "${output}" == *"running"* ]]
# checkpoint the running container
mkdir image-dir
mkdir work-dir
runc --criu "$CRIU" checkpoint --parent-path ./parent-dir --work-path ./work-dir --image-path ./image-dir test_busybox
cat ./work-dir/dump.log | grep -B 5 Error || true
[ "$status" -eq 0 ]
# after checkpoint busybox is no longer running
runc state test_busybox
[ "$status" -ne 0 ]
# restore from checkpoint
__runc --criu "$CRIU" restore -d --work-path ./work-dir --image-path ./image-dir test_busybox <&60 >&51 2>&51
ret=$?
cat ./work-dir/restore.log | grep -B 5 Error || true
[ $ret -eq 0 ]
# busybox should be back up and running
testcontainer test_busybox running
runc exec --cwd /bin test_busybox echo ok
[ "$status" -eq 0 ]
[[ ${output} == "ok" ]]
echo Ping >&61
exec 61>&-
exec 51>&-
run cat <&50
[ "$status" -eq 0 ]
[[ "${output}" == *"ponG Ping"* ]]
}
@test "checkpoint --lazy-pages and restore" {
# XXX: currently criu require root containers.
requires criu root
# check if lazy-pages is supported
run ${CRIU} check --feature lazy_pages
if [ "$status" -eq 1 ]; then
# this criu does not support lazy migration; skip the test
skip "this criu does not support lazy migration"
fi
sed -i 's;"terminal": true;"terminal": false;' config.json
sed -i 's;"readonly": true;"readonly": false;' config.json
sed -i 's/"sh"/"sh","-c","for i in `seq 10`; do read xxx || continue; echo ponG $xxx; done"/' config.json
# The following code creates pipes for stdin and stdout.
# CRIU can't handle fifo-s, so we need all these tricks.
fifo=`mktemp -u /tmp/runc-fifo-XXXXXX`
mkfifo $fifo
# For lazy migration we need to know when CRIU is ready to serve
# the memory pages via TCP.
lazy_pipe=`mktemp -u /tmp/lazy-pipe-XXXXXX`
mkfifo $lazy_pipe
# TCP port for lazy migration
port=27277
# stdout
cat $fifo | cat $fifo &
pid=$!
exec 50</proc/$pid/fd/0
exec 51>/proc/$pid/fd/0
# stdin
cat $fifo | cat $fifo &
pid=$!
exec 60</proc/$pid/fd/0
exec 61>/proc/$pid/fd/0
echo -n > $fifo
unlink $fifo
# run busybox
__runc run -d test_busybox <&60 >&51 2>&51
[ $? -eq 0 ]
testcontainer test_busybox running
# checkpoint the running container
mkdir image-dir
mkdir work-dir
# Double fork taken from helpers.bats
# We need to start 'runc checkpoint --lazy-pages' in the background,
# so we double fork in the shell.
(runc --criu "$CRIU" checkpoint --lazy-pages --page-server 0.0.0.0:${port} --status-fd ${lazy_pipe} --work-path ./work-dir --image-path ./image-dir test_busybox & ) &
# Sleeping here. This is ugly, but not sure how else to handle it.
# The return code of the in the background running runc is needed, if
# there is some basic error. If the lazy migration is ready can
# be handled by $lazy_pipe. Which probably will always be ready
# after sleeping two seconds.
sleep 2
# Check if inventory.img was written
[ -e image-dir/inventory.img ]
# If the inventory.img exists criu checkpointed some things, let's see
# if there were other errors in the log file.
run grep -B 5 Error ./work-dir/dump.log -q
[ "$status" -eq 1 ]
# This will block until CRIU is ready to serve memory pages
cat $lazy_pipe
[ "$status" -eq 1 ]
unlink $lazy_pipe
# Double fork taken from helpers.bats
# We need to start 'criu lazy-pages' in the background,
# so we double fork in the shell.
# Start CRIU in lazy-daemon mode
$(${CRIU} lazy-pages --page-server --address 127.0.0.1 --port ${port} -D image-dir &) &
# Restore lazily from checkpoint.
# The restored container needs a different name as the checkpointed
# container is not yet destroyed. It is only destroyed at that point
# in time when the last page is lazily transferred to the destination.
# Killing the CRIU on the checkpoint side will let the container
# continue to run if the migration failed at some point.
__runc --criu "$CRIU" restore -d --work-path ./image-dir --image-path ./image-dir --lazy-pages test_busybox_restore <&60 >&51 2>&51
ret=$?
[ $ret -eq 0 ]
run grep -B 5 Error ./work-dir/dump.log -q
[ "$status" -eq 1 ]
# busybox should be back up and running
testcontainer test_busybox_restore running
runc exec --cwd /bin test_busybox_restore echo ok
[ "$status" -eq 0 ]
[[ ${output} == "ok" ]]
echo Ping >&61
exec 61>&-
exec 51>&-
run cat <&50
[ "$status" -eq 0 ]
[[ "${output}" == *"ponG Ping"* ]]
}

View file

@ -1,89 +0,0 @@
#!/usr/bin/env bats
load helpers
function setup() {
teardown_busybox
setup_busybox
}
function teardown() {
teardown_busybox
}
@test "runc create" {
runc create --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
testcontainer test_busybox created
# start the command
runc start test_busybox
[ "$status" -eq 0 ]
testcontainer test_busybox running
}
@test "runc create exec" {
runc create --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
testcontainer test_busybox created
runc exec test_busybox true
[ "$status" -eq 0 ]
testcontainer test_busybox created
# start the command
runc start test_busybox
[ "$status" -eq 0 ]
testcontainer test_busybox running
}
@test "runc create --pid-file" {
runc create --pid-file pid.txt --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
testcontainer test_busybox created
# check pid.txt was generated
[ -e pid.txt ]
run cat pid.txt
[ "$status" -eq 0 ]
[[ ${lines[0]} == $(__runc state test_busybox | jq '.pid') ]]
# start the command
runc start test_busybox
[ "$status" -eq 0 ]
testcontainer test_busybox running
}
@test "runc create --pid-file with new CWD" {
# create pid_file directory as the CWD
run mkdir pid_file
[ "$status" -eq 0 ]
run cd pid_file
[ "$status" -eq 0 ]
runc create --pid-file pid.txt -b $BUSYBOX_BUNDLE --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
testcontainer test_busybox created
# check pid.txt was generated
[ -e pid.txt ]
run cat pid.txt
[ "$status" -eq 0 ]
[[ ${lines[0]} == $(__runc state test_busybox | jq '.pid') ]]
# start the command
runc start test_busybox
[ "$status" -eq 0 ]
testcontainer test_busybox running
}

View file

@ -1,70 +0,0 @@
#!/usr/bin/env bats
load helpers
function setup() {
teardown_hello
setup_hello
}
function teardown() {
teardown_hello
}
@test "global --debug" {
# run hello-world
runc --debug run test_hello
echo "${output}"
[ "$status" -eq 0 ]
}
@test "global --debug to --log" {
# run hello-world
runc --log log.out --debug run test_hello
[ "$status" -eq 0 ]
# check output does not include debug info
[[ "${output}" != *"level=debug"* ]]
# check log.out was generated
[ -e log.out ]
# check expected debug output was sent to log.out
run cat log.out
[ "$status" -eq 0 ]
[[ "${output}" == *"level=debug"* ]]
}
@test "global --debug to --log --log-format 'text'" {
# run hello-world
runc --log log.out --log-format "text" --debug run test_hello
[ "$status" -eq 0 ]
# check output does not include debug info
[[ "${output}" != *"level=debug"* ]]
# check log.out was generated
[ -e log.out ]
# check expected debug output was sent to log.out
run cat log.out
[ "$status" -eq 0 ]
[[ "${output}" == *"level=debug"* ]]
}
@test "global --debug to --log --log-format 'json'" {
# run hello-world
runc --log log.out --log-format "json" --debug run test_hello
[ "$status" -eq 0 ]
# check output does not include debug info
[[ "${output}" != *"level=debug"* ]]
# check log.out was generated
[ -e log.out ]
# check expected debug output was sent to log.out
run cat log.out
[ "$status" -eq 0 ]
[[ "${output}" == *'"level":"debug"'* ]]
}

View file

@ -1,53 +0,0 @@
#!/usr/bin/env bats
load helpers
function setup() {
teardown_busybox
setup_busybox
}
function teardown() {
teardown_busybox
}
@test "runc delete" {
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# check state
testcontainer test_busybox running
runc kill test_busybox KILL
[ "$status" -eq 0 ]
# wait for busybox to be in the destroyed state
retry 10 1 eval "__runc state test_busybox | grep -q 'stopped'"
# delete test_busybox
runc delete test_busybox
[ "$status" -eq 0 ]
runc state test_busybox
[ "$status" -ne 0 ]
}
@test "runc delete --force" {
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# check state
testcontainer test_busybox running
# force delete test_busybox
runc delete --force test_busybox
runc state test_busybox
[ "$status" -ne 0 ]
}
@test "runc delete --force ignore not exist" {
runc delete --force notexists
[ "$status" -eq 0 ]
}

View file

@ -1,109 +0,0 @@
#!/usr/bin/env bats
load helpers
function setup() {
teardown_busybox
setup_busybox
}
function teardown() {
teardown_busybox
}
@test "events --stats" {
# XXX: currently cgroups require root containers.
requires root
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# generate stats
runc events --stats test_busybox
[ "$status" -eq 0 ]
[[ "${lines[0]}" == [\{]"\"type\""[:]"\"stats\""[,]"\"id\""[:]"\"test_busybox\""[,]* ]]
[[ "${lines[0]}" == *"data"* ]]
}
@test "events --interval default " {
# XXX: currently cgroups require root containers.
requires root
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# spawn two sub processes (shells)
# the first sub process is an event logger that sends stats events to events.log
# the second sub process waits for an event that incudes test_busybox then
# kills the test_busybox container which causes the event logger to exit
(__runc events test_busybox > events.log) &
(
retry 10 1 eval "grep -q 'test_busybox' events.log"
teardown_running_container test_busybox
) &
wait # wait for the above sub shells to finish
[ -e events.log ]
run cat events.log
[ "$status" -eq 0 ]
[[ "${lines[0]}" == [\{]"\"type\""[:]"\"stats\""[,]"\"id\""[:]"\"test_busybox\""[,]* ]]
[[ "${lines[0]}" == *"data"* ]]
}
@test "events --interval 1s " {
# XXX: currently cgroups require root containers.
requires root
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# spawn two sub processes (shells)
# the first sub process is an event logger that sends stats events to events.log once a second
# the second sub process tries 3 times for an event that incudes test_busybox
# pausing 1s between each attempt then kills the test_busybox container which
# causes the event logger to exit
(__runc events --interval 1s test_busybox > events.log) &
(
retry 3 1 eval "grep -q 'test_busybox' events.log"
teardown_running_container test_busybox
) &
wait # wait for the above sub shells to finish
[ -e events.log ]
run eval "grep -q 'test_busybox' events.log"
[ "$status" -eq 0 ]
}
@test "events --interval 100ms " {
# XXX: currently cgroups require root containers.
requires root
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
#prove there is no carry over of events.log from a prior test
[ ! -e events.log ]
# spawn two sub processes (shells)
# the first sub process is an event logger that sends stats events to events.log once every 100ms
# the second sub process tries 3 times for an event that incudes test_busybox
# pausing 100s between each attempt then kills the test_busybox container which
# causes the event logger to exit
(__runc events --interval 100ms test_busybox > events.log) &
(
retry 3 0.100 eval "grep -q 'test_busybox' events.log"
teardown_running_container test_busybox
) &
wait # wait for the above sub shells to finish
[ -e events.log ]
run eval "grep -q 'test_busybox' events.log"
[ "$status" -eq 0 ]
}

View file

@ -1,129 +0,0 @@
#!/usr/bin/env bats
load helpers
function setup() {
teardown_busybox
setup_busybox
}
function teardown() {
teardown_busybox
}
@test "runc exec" {
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
runc exec test_busybox echo Hello from exec
[ "$status" -eq 0 ]
echo text echoed = "'""${output}""'"
[[ "${output}" == *"Hello from exec"* ]]
}
@test "runc exec --pid-file" {
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
runc exec --pid-file pid.txt test_busybox echo Hello from exec
[ "$status" -eq 0 ]
echo text echoed = "'""${output}""'"
[[ "${output}" == *"Hello from exec"* ]]
# check pid.txt was generated
[ -e pid.txt ]
run cat pid.txt
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ [0-9]+ ]]
[[ ${lines[0]} != $(__runc state test_busybox | jq '.pid') ]]
}
@test "runc exec --pid-file with new CWD" {
# create pid_file directory as the CWD
run mkdir pid_file
[ "$status" -eq 0 ]
run cd pid_file
[ "$status" -eq 0 ]
# run busybox detached
runc run -d -b $BUSYBOX_BUNDLE --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
runc exec --pid-file pid.txt test_busybox echo Hello from exec
[ "$status" -eq 0 ]
echo text echoed = "'""${output}""'"
[[ "${output}" == *"Hello from exec"* ]]
# check pid.txt was generated
[ -e pid.txt ]
run cat pid.txt
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ [0-9]+ ]]
[[ ${lines[0]} != $(__runc state test_busybox | jq '.pid') ]]
}
@test "runc exec ls -la" {
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
runc exec test_busybox ls -la
[ "$status" -eq 0 ]
[[ ${lines[0]} == *"total"* ]]
[[ ${lines[1]} == *"."* ]]
[[ ${lines[2]} == *".."* ]]
}
@test "runc exec ls -la with --cwd" {
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
runc exec --cwd /bin test_busybox pwd
[ "$status" -eq 0 ]
[[ ${output} == "/bin"* ]]
}
@test "runc exec --env" {
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
runc exec --env RUNC_EXEC_TEST=true test_busybox env
[ "$status" -eq 0 ]
[[ ${output} == *"RUNC_EXEC_TEST=true"* ]]
}
@test "runc exec --user" {
# --user can't work in rootless containers that don't have idmap.
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_idmap
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
runc exec --user 1000:1000 test_busybox id
[ "$status" -eq 0 ]
[[ "${output}" == "uid=1000 gid=1000"* ]]
}
@test "runc exec --additional-gids" {
requires root
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
wait_for_container 15 1 test_busybox
runc exec --user 1000:1000 --additional-gids 100 --additional-gids 99 test_busybox id
[ "$status" -eq 0 ]
[[ ${output} == "uid=1000 gid=1000 groups=99(nogroup),100(users)" ]]
}

View file

@ -1,87 +0,0 @@
#!/usr/bin/env bats
load helpers
@test "runc -h" {
runc -h
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ NAME:+ ]]
[[ ${lines[1]} =~ runc\ '-'\ Open\ Container\ Initiative\ runtime+ ]]
runc --help
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ NAME:+ ]]
[[ ${lines[1]} =~ runc\ '-'\ Open\ Container\ Initiative\ runtime+ ]]
}
@test "runc command -h" {
runc checkpoint -h
[ "$status" -eq 0 ]
[[ ${lines[1]} =~ runc\ checkpoint+ ]]
runc delete -h
[ "$status" -eq 0 ]
[[ ${lines[1]} =~ runc\ delete+ ]]
runc events -h
[ "$status" -eq 0 ]
[[ ${lines[1]} =~ runc\ events+ ]]
runc exec -h
[ "$status" -eq 0 ]
[[ ${lines[1]} =~ runc\ exec+ ]]
runc kill -h
[ "$status" -eq 0 ]
[[ ${lines[1]} =~ runc\ kill+ ]]
runc list -h
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ NAME:+ ]]
[[ ${lines[1]} =~ runc\ list+ ]]
runc list --help
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ NAME:+ ]]
[[ ${lines[1]} =~ runc\ list+ ]]
runc pause -h
[ "$status" -eq 0 ]
[[ ${lines[1]} =~ runc\ pause+ ]]
runc restore -h
[ "$status" -eq 0 ]
[[ ${lines[1]} =~ runc\ restore+ ]]
runc resume -h
[ "$status" -eq 0 ]
[[ ${lines[1]} =~ runc\ resume+ ]]
# We don't use runc_spec here, because we're just testing the help page.
runc spec -h
[ "$status" -eq 0 ]
[[ ${lines[1]} =~ runc\ spec+ ]]
runc start -h
[ "$status" -eq 0 ]
[[ ${lines[1]} =~ runc\ start+ ]]
runc run -h
[ "$status" -eq 0 ]
[[ ${lines[1]} =~ runc\ run+ ]]
runc state -h
[ "$status" -eq 0 ]
[[ ${lines[1]} =~ runc\ state+ ]]
runc update -h
[ "$status" -eq 0 ]
[[ ${lines[1]} =~ runc\ update+ ]]
}
@test "runc foo -h" {
runc foo -h
[ "$status" -ne 0 ]
[[ "${output}" == *"No help topic for 'foo'"* ]]
}

View file

@ -1,330 +0,0 @@
#!/bin/bash
# Root directory of integration tests.
INTEGRATION_ROOT=$(dirname "$(readlink -f "$BASH_SOURCE")")
. ${INTEGRATION_ROOT}/multi-arch.bash
RUNC="${INTEGRATION_ROOT}/../../runc"
RECVTTY="${INTEGRATION_ROOT}/../../contrib/cmd/recvtty/recvtty"
GOPATH="$(mktemp -d --tmpdir runc-integration-gopath.XXXXXX)"
# Test data path.
TESTDATA="${INTEGRATION_ROOT}/testdata"
# Busybox image
BUSYBOX_IMAGE="$BATS_TMPDIR/busybox.tar"
BUSYBOX_BUNDLE="$BATS_TMPDIR/busyboxtest"
# hello-world in tar format
HELLO_FILE=`get_hello`
HELLO_IMAGE="$TESTDATA/$HELLO_FILE"
HELLO_BUNDLE="$BATS_TMPDIR/hello-world"
# CRIU PATH
CRIU="$(which criu || true)"
# Kernel version
KERNEL_VERSION="$(uname -r)"
KERNEL_MAJOR="${KERNEL_VERSION%%.*}"
KERNEL_MINOR="${KERNEL_VERSION#$KERNEL_MAJOR.}"
KERNEL_MINOR="${KERNEL_MINOR%%.*}"
# Root state path.
ROOT=$(mktemp -d "$BATS_TMPDIR/runc.XXXXXX")
# Path to console socket.
CONSOLE_SOCKET="$BATS_TMPDIR/console.sock"
# Cgroup paths
CGROUP_MEMORY_BASE_PATH=$(grep "cgroup" /proc/self/mountinfo | gawk 'toupper($NF) ~ /\<MEMORY\>/ { print $5; exit }')
CGROUP_CPU_BASE_PATH=$(grep "cgroup" /proc/self/mountinfo | gawk 'toupper($NF) ~ /\<CPU\>/ { print $5; exit }')
CGROUPS_PATH="/runc-cgroups-integration-test/test-cgroup"
CGROUP_MEMORY="${CGROUP_MEMORY_BASE_PATH}${CGROUPS_PATH}"
# CONFIG_MEMCG_KMEM support
KMEM="${CGROUP_MEMORY_BASE_PATH}/memory.kmem.limit_in_bytes"
RT_PERIOD="${CGROUP_CPU_BASE_PATH}/cpu.rt_period_us"
# Check if we're in rootless mode.
ROOTLESS=$(id -u)
# Wrapper for runc.
function runc() {
run __runc "$@"
# Some debug information to make life easier. bats will only print it if the
# test failed, in which case the output is useful.
echo "runc $@ (status=$status):" >&2
echo "$output" >&2
}
# Raw wrapper for runc.
function __runc() {
"$RUNC" --log /proc/self/fd/2 --root "$ROOT" "$@"
}
# Wrapper for runc spec, which takes only one argument (the bundle path).
function runc_spec() {
! [[ "$#" > 1 ]]
local args=()
local bundle=""
if [ "$ROOTLESS" -ne 0 ]; then
args+=("--rootless")
fi
if [ "$#" -ne 0 ]; then
bundle="$1"
args+=("--bundle" "$bundle")
fi
runc spec "${args[@]}"
# Always add additional mappings if we have idmaps.
if [[ "$ROOTLESS" -ne 0 ]] && [[ "$ROOTLESS_FEATURES" == *"idmap"* ]]; then
runc_rootless_idmap "$bundle"
fi
# Ensure config.json contains linux.resources
if [[ "$ROOTLESS" -ne 0 ]] && [[ "$ROOTLESS_FEATURES" == *"cgroup"* ]]; then
runc_rootless_cgroup "$bundle"
fi
}
# Shortcut to add additional uids and gids, based on the values set as part of
# a rootless configuration.
function runc_rootless_idmap() {
bundle="${1:-.}"
cat "$bundle/config.json" \
| jq '.mounts |= map((select(.type == "devpts") | .options += ["gid=5"]) // .)' \
| jq '.linux.uidMappings |= .+ [{"hostID": '"$ROOTLESS_UIDMAP_START"', "containerID": 1000, "size": '"$ROOTLESS_UIDMAP_LENGTH"'}]' \
| jq '.linux.gidMappings |= .+ [{"hostID": '"$ROOTLESS_GIDMAP_START"', "containerID": 100, "size": 1}]' \
| jq '.linux.gidMappings |= .+ [{"hostID": '"$(($ROOTLESS_GIDMAP_START+10))"', "containerID": 1, "size": 20}]' \
| jq '.linux.gidMappings |= .+ [{"hostID": '"$(($ROOTLESS_GIDMAP_START+100))"', "containerID": 1000, "size": '"$(($ROOTLESS_GIDMAP_LENGTH-1000))"'}]' \
>"$bundle/config.json.tmp"
mv "$bundle/config.json"{.tmp,}
}
# Shortcut to add empty resources as part of a rootless configuration.
function runc_rootless_cgroup() {
bundle="${1:-.}"
cat "$bundle/config.json" \
| jq '.linux.resources |= .+ {"memory":{},"cpu":{},"blockio":{},"pids":{}}' \
>"$bundle/config.json.tmp"
mv "$bundle/config.json"{.tmp,}
}
# Helper function to set cgroupsPath to the value of $CGROUPS_PATH
function set_cgroups_path() {
bundle="${1:-.}"
sed -i 's/\("linux": {\)/\1\n "cgroupsPath": "\/runc-cgroups-integration-test\/test-cgroup",/' "$bundle/config.json"
}
# Helper function to set a resouces limit
function set_resources_limit() {
bundle="${1:-.}"
sed -i 's/\("linux": {\)/\1\n "resources": { "pids": { "limit": 100 } },/' "$bundle/config.json"
}
# Fails the current test, providing the error given.
function fail() {
echo "$@" >&2
exit 1
}
# Allows a test to specify what things it requires. If the environment can't
# support it, the test is skipped with a message.
function requires() {
for var in "$@"; do
case $var in
criu)
if [ ! -e "$CRIU" ]; then
skip "test requires ${var}"
fi
;;
root)
if [ "$ROOTLESS" -ne 0 ]; then
skip "test requires ${var}"
fi
;;
rootless)
if [ "$ROOTLESS" -eq 0 ]; then
skip "test requires ${var}"
fi
;;
rootless_idmap)
if [[ "$ROOTLESS_FEATURES" != *"idmap"* ]]; then
skip "test requires ${var}"
fi
;;
rootless_cgroup)
if [[ "$ROOTLESS_FEATURES" != *"cgroup"* ]]; then
skip "test requires ${var}"
fi
;;
rootless_no_cgroup)
if [[ "$ROOTLESS_FEATURES" == *"cgroup"* ]]; then
skip "test requires ${var}"
fi
;;
cgroups_kmem)
if [ ! -e "$KMEM" ]; then
skip "Test requires ${var}"
fi
;;
cgroups_rt)
if [ ! -e "$RT_PERIOD" ]; then
skip "Test requires ${var}"
fi
;;
*)
fail "BUG: Invalid requires ${var}."
;;
esac
done
}
# Retry a command $1 times until it succeeds. Wait $2 seconds between retries.
function retry() {
local attempts=$1
shift
local delay=$1
shift
local i
for ((i = 0; i < attempts; i++)); do
run "$@"
if [[ "$status" -eq 0 ]]; then
return 0
fi
sleep $delay
done
echo "Command \"$@\" failed $attempts times. Output: $output"
false
}
# retry until the given container has state
function wait_for_container() {
local attempts=$1
local delay=$2
local cid=$3
local i
for ((i = 0; i < attempts; i++)); do
runc state $cid
if [[ "$status" -eq 0 ]]; then
return 0
fi
sleep $delay
done
echo "runc state failed to return state $statecheck $attempts times. Output: $output"
false
}
# retry until the given container has state
function wait_for_container_inroot() {
local attempts=$1
local delay=$2
local cid=$3
local i
for ((i = 0; i < attempts; i++)); do
ROOT=$4 runc state $cid
if [[ "$status" -eq 0 ]]; then
return 0
fi
sleep $delay
done
echo "runc state failed to return state $statecheck $attempts times. Output: $output"
false
}
function testcontainer() {
# test state of container
runc state $1
[ "$status" -eq 0 ]
[[ "${output}" == *"$2"* ]]
}
function setup_recvtty() {
# We need to start recvtty in the background, so we double fork in the shell.
("$RECVTTY" --pid-file "$BATS_TMPDIR/recvtty.pid" --mode null "$CONSOLE_SOCKET" &) &
}
function teardown_recvtty() {
# When we kill recvtty, the container will also be killed.
if [ -f "$BATS_TMPDIR/recvtty.pid" ]; then
kill -9 $(cat "$BATS_TMPDIR/recvtty.pid")
fi
# Clean up the files that might be left over.
rm -f "$BATS_TMPDIR/recvtty.pid"
rm -f "$CONSOLE_SOCKET"
}
function setup_busybox() {
setup_recvtty
run mkdir "$BUSYBOX_BUNDLE"
run mkdir "$BUSYBOX_BUNDLE"/rootfs
if [ -e "/testdata/busybox.tar" ]; then
BUSYBOX_IMAGE="/testdata/busybox.tar"
fi
if [ ! -e $BUSYBOX_IMAGE ]; then
curl -o $BUSYBOX_IMAGE -sSL `get_busybox`
fi
tar --exclude './dev/*' -C "$BUSYBOX_BUNDLE"/rootfs -xf "$BUSYBOX_IMAGE"
cd "$BUSYBOX_BUNDLE"
runc_spec
}
function setup_hello() {
setup_recvtty
run mkdir "$HELLO_BUNDLE"
run mkdir "$HELLO_BUNDLE"/rootfs
tar --exclude './dev/*' -C "$HELLO_BUNDLE"/rootfs -xf "$HELLO_IMAGE"
cd "$HELLO_BUNDLE"
runc_spec
sed -i 's;"sh";"/hello";' config.json
}
function teardown_running_container() {
runc list
# $1 should be a container name such as "test_busybox"
# here we detect "test_busybox "(with one extra blank) to avoid conflict prefix
# e.g. "test_busybox" and "test_busybox_update"
if [[ "${output}" == *"$1 "* ]]; then
runc kill $1 KILL
retry 10 1 eval "__runc state '$1' | grep -q 'stopped'"
runc delete $1
fi
}
function teardown_running_container_inroot() {
ROOT=$2 runc list
# $1 should be a container name such as "test_busybox"
# here we detect "test_busybox "(with one extra blank) to avoid conflict prefix
# e.g. "test_busybox" and "test_busybox_update"
if [[ "${output}" == *"$1 "* ]]; then
ROOT=$2 runc kill $1 KILL
retry 10 1 eval "ROOT='$2' __runc state '$1' | grep -q 'stopped'"
ROOT=$2 runc delete $1
fi
}
function teardown_busybox() {
cd "$INTEGRATION_ROOT"
teardown_recvtty
teardown_running_container test_busybox
run rm -f -r "$BUSYBOX_BUNDLE"
}
function teardown_hello() {
cd "$INTEGRATION_ROOT"
teardown_recvtty
teardown_running_container test_hello
run rm -f -r "$HELLO_BUNDLE"
}

View file

@ -1,30 +0,0 @@
#!/usr/bin/env bats
load helpers
function setup() {
teardown_busybox
setup_busybox
}
function teardown() {
teardown_busybox
}
@test "kill detached busybox" {
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# check state
testcontainer test_busybox running
runc kill test_busybox KILL
[ "$status" -eq 0 ]
retry 10 1 eval "__runc state test_busybox | grep -q 'stopped'"
runc delete test_busybox
[ "$status" -eq 0 ]
}

View file

@ -1,56 +0,0 @@
#!/usr/bin/env bats
load helpers
function setup() {
teardown_running_container_inroot test_box1 $HELLO_BUNDLE
teardown_running_container_inroot test_box2 $HELLO_BUNDLE
teardown_running_container_inroot test_box3 $HELLO_BUNDLE
teardown_busybox
setup_busybox
}
function teardown() {
teardown_running_container_inroot test_box1 $HELLO_BUNDLE
teardown_running_container_inroot test_box2 $HELLO_BUNDLE
teardown_running_container_inroot test_box3 $HELLO_BUNDLE
teardown_busybox
}
@test "list" {
# run a few busyboxes detached
ROOT=$HELLO_BUNDLE runc run -d --console-socket $CONSOLE_SOCKET test_box1
[ "$status" -eq 0 ]
ROOT=$HELLO_BUNDLE runc run -d --console-socket $CONSOLE_SOCKET test_box2
[ "$status" -eq 0 ]
ROOT=$HELLO_BUNDLE runc run -d --console-socket $CONSOLE_SOCKET test_box3
[ "$status" -eq 0 ]
ROOT=$HELLO_BUNDLE runc list
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ ID\ +PID\ +STATUS\ +BUNDLE\ +CREATED+ ]]
[[ "${lines[1]}" == *"test_box1"*[0-9]*"running"*$BUSYBOX_BUNDLE*[0-9]* ]]
[[ "${lines[2]}" == *"test_box2"*[0-9]*"running"*$BUSYBOX_BUNDLE*[0-9]* ]]
[[ "${lines[3]}" == *"test_box3"*[0-9]*"running"*$BUSYBOX_BUNDLE*[0-9]* ]]
ROOT=$HELLO_BUNDLE runc list -q
[ "$status" -eq 0 ]
[[ "${lines[0]}" == "test_box1" ]]
[[ "${lines[1]}" == "test_box2" ]]
[[ "${lines[2]}" == "test_box3" ]]
ROOT=$HELLO_BUNDLE runc list --format table
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ ID\ +PID\ +STATUS\ +BUNDLE\ +CREATED+ ]]
[[ "${lines[1]}" == *"test_box1"*[0-9]*"running"*$BUSYBOX_BUNDLE*[0-9]* ]]
[[ "${lines[2]}" == *"test_box2"*[0-9]*"running"*$BUSYBOX_BUNDLE*[0-9]* ]]
[[ "${lines[3]}" == *"test_box3"*[0-9]*"running"*$BUSYBOX_BUNDLE*[0-9]* ]]
ROOT=$HELLO_BUNDLE runc list --format json
[ "$status" -eq 0 ]
[[ "${lines[0]}" == [\[][\{]"\"ociVersion\""[:]"\""*[0-9][\.]*[0-9][\.]*[0-9]*"\""[,]"\"id\""[:]"\"test_box1\""[,]"\"pid\""[:]*[0-9][,]"\"status\""[:]*"\"running\""[,]"\"bundle\""[:]*$BUSYBOX_BUNDLE*[,]"\"rootfs\""[:]"\""*"\""[,]"\"created\""[:]*[0-9]*[\}]* ]]
[[ "${lines[0]}" == *[,][\{]"\"ociVersion\""[:]"\""*[0-9][\.]*[0-9][\.]*[0-9]*"\""[,]"\"id\""[:]"\"test_box2\""[,]"\"pid\""[:]*[0-9][,]"\"status\""[:]*"\"running\""[,]"\"bundle\""[:]*$BUSYBOX_BUNDLE*[,]"\"rootfs\""[:]"\""*"\""[,]"\"created\""[:]*[0-9]*[\}]* ]]
[[ "${lines[0]}" == *[,][\{]"\"ociVersion\""[:]"\""*[0-9][\.]*[0-9][\.]*[0-9]*"\""[,]"\"id\""[:]"\"test_box3\""[,]"\"pid\""[:]*[0-9][,]"\"status\""[:]*"\"running\""[,]"\"bundle\""[:]*$BUSYBOX_BUNDLE*[,]"\"rootfs\""[:]"\""*"\""[,]"\"created\""[:]*[0-9]*[\}][\]] ]]
}

View file

@ -1,59 +0,0 @@
#!/usr/bin/env bats
load helpers
function setup() {
teardown_busybox
setup_busybox
# Create fake rootfs.
mkdir rootfs/testdir
echo "Forbidden information!" > rootfs/testfile
# add extra masked paths
sed -i 's;"maskedPaths": \[;"maskedPaths": \["/testdir","/testfile",;g' config.json
}
function teardown() {
teardown_busybox
}
@test "mask paths [file]" {
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
runc exec test_busybox cat /testfile
[ "$status" -eq 0 ]
[[ "${output}" == "" ]]
runc exec test_busybox rm -f /testfile
[ "$status" -eq 1 ]
[[ "${output}" == *"Read-only file system"* ]]
runc exec test_busybox umount /testfile
[ "$status" -eq 1 ]
[[ "${output}" == *"Operation not permitted"* ]]
}
@test "mask paths [directory]" {
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
runc exec test_busybox ls /testdir
[ "$status" -eq 0 ]
[[ "${output}" == "" ]]
runc exec test_busybox touch /testdir/foo
[ "$status" -eq 1 ]
[[ "${output}" == *"Read-only file system"* ]]
runc exec test_busybox rm -rf /testdir
[ "$status" -eq 1 ]
[[ "${output}" == *"Read-only file system"* ]]
runc exec test_busybox umount /testdir
[ "$status" -eq 1 ]
[[ "${output}" == *"Operation not permitted"* ]]
}

View file

@ -1,22 +0,0 @@
#!/bin/bash
get_busybox(){
case $(go env GOARCH) in
arm64)
echo 'https://github.com/docker-library/busybox/raw/23fbd9c43e0f4bec7605091bfba23db278c367ac/glibc/busybox.tar.xz'
;;
*)
echo 'https://github.com/docker-library/busybox/raw/a0558a9006ce0dd6f6ec5d56cfd3f32ebeeb815f/glibc/busybox.tar.xz'
;;
esac
}
get_hello(){
case $(go env GOARCH) in
arm64)
echo 'hello-world-aarch64.tar'
;;
*)
echo 'hello-world.tar'
;;
esac
}

View file

@ -1,72 +0,0 @@
#!/usr/bin/env bats
load helpers
function setup() {
teardown_busybox
setup_busybox
}
function teardown() {
teardown_busybox
}
@test "runc pause and resume" {
# XXX: currently cgroups require root containers.
requires root
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
testcontainer test_busybox running
# pause busybox
runc pause test_busybox
[ "$status" -eq 0 ]
# test state of busybox is paused
testcontainer test_busybox paused
# resume busybox
runc resume test_busybox
[ "$status" -eq 0 ]
# test state of busybox is back to running
testcontainer test_busybox running
}
@test "runc pause and resume with nonexist container" {
# XXX: currently cgroups require root containers.
requires root
# run test_busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
testcontainer test_busybox running
# pause test_busybox and nonexistent container
runc pause test_busybox
[ "$status" -eq 0 ]
runc pause nonexistent
[ "$status" -ne 0 ]
# test state of test_busybox is paused
testcontainer test_busybox paused
# resume test_busybox and nonexistent container
runc resume test_busybox
[ "$status" -eq 0 ]
runc resume nonexistent
[ "$status" -ne 0 ]
# test state of test_busybox is back to running
testcontainer test_busybox running
# delete test_busybox
runc delete --force test_busybox
runc state test_busybox
[ "$status" -ne 0 ]
}

View file

@ -1,62 +0,0 @@
#!/usr/bin/env bats
load helpers
function setup() {
teardown_busybox
setup_busybox
}
function teardown() {
teardown_busybox
}
@test "ps" {
# ps is not supported, it requires cgroups
requires root
# start busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# check state
testcontainer test_busybox running
runc ps test_busybox
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ UID\ +PID\ +PPID\ +C\ +STIME\ +TTY\ +TIME\ +CMD+ ]]
[[ "${lines[1]}" == *"$(id -un 2>/dev/null)"*[0-9]* ]]
}
@test "ps -f json" {
# ps is not supported, it requires cgroups
requires root
# start busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# check state
testcontainer test_busybox running
runc ps -f json test_busybox
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ [0-9]+ ]]
}
@test "ps -e -x" {
# ps is not supported, it requires cgroups
requires root
# start busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# check state
testcontainer test_busybox running
runc ps test_busybox -e -x
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ \ +PID\ +TTY\ +STAT\ +TIME\ +COMMAND+ ]]
[[ "${lines[1]}" =~ [0-9]+ ]]
}

View file

@ -1,50 +0,0 @@
#!/usr/bin/env bats
load helpers
function setup() {
teardown_running_container_inroot test_dotbox $HELLO_BUNDLE
teardown_busybox
setup_busybox
}
function teardown() {
teardown_running_container_inroot test_dotbox $HELLO_BUNDLE
teardown_busybox
}
@test "global --root" {
# run busybox detached using $HELLO_BUNDLE for state
ROOT=$HELLO_BUNDLE runc run -d --console-socket $CONSOLE_SOCKET test_dotbox
[ "$status" -eq 0 ]
# run busybox detached in default root
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
runc state test_busybox
[ "$status" -eq 0 ]
[[ "${output}" == *"running"* ]]
ROOT=$HELLO_BUNDLE runc state test_dotbox
[ "$status" -eq 0 ]
[[ "${output}" == *"running"* ]]
ROOT=$HELLO_BUNDLE runc state test_busybox
[ "$status" -ne 0 ]
runc state test_dotbox
[ "$status" -ne 0 ]
runc kill test_busybox KILL
[ "$status" -eq 0 ]
retry 10 1 eval "__runc state test_busybox | grep -q 'stopped'"
runc delete test_busybox
[ "$status" -eq 0 ]
ROOT=$HELLO_BUNDLE runc kill test_dotbox KILL
[ "$status" -eq 0 ]
retry 10 1 eval "ROOT='$HELLO_BUNDLE' __runc state test_dotbox | grep -q 'stopped'"
ROOT=$HELLO_BUNDLE runc delete test_dotbox
[ "$status" -eq 0 ]
}

View file

@ -1,96 +0,0 @@
#!/usr/bin/env bats
load helpers
function setup() {
# initial cleanup in case a prior test exited and did not cleanup
cd "$INTEGRATION_ROOT"
run rm -f -r "$HELLO_BUNDLE"
# setup hello-world for spec generation testing
run mkdir "$HELLO_BUNDLE"
run mkdir "$HELLO_BUNDLE"/rootfs
run tar -C "$HELLO_BUNDLE"/rootfs -xf "$HELLO_IMAGE"
}
function teardown() {
cd "$INTEGRATION_ROOT"
run rm -f -r "$HELLO_BUNDLE"
}
@test "spec generation cwd" {
cd "$HELLO_BUNDLE"
# note this test runs from the bundle not the integration root
# test that config.json does not exist after the above partial setup
[ ! -e config.json ]
# test generation of spec does not return an error
runc_spec
[ "$status" -eq 0 ]
# test generation of spec created our config.json (spec)
[ -e config.json ]
# test existence of required args parameter in the generated config.json
run bash -c "grep -A2 'args' config.json | grep 'sh'"
[[ "${output}" == *"sh"* ]]
# change the default args parameter from sh to hello
sed -i 's;"sh";"/hello";' config.json
# ensure the generated spec works by running hello-world
runc run test_hello
[ "$status" -eq 0 ]
}
@test "spec generation --bundle" {
# note this test runs from the integration root not the bundle
# test that config.json does not exist after the above partial setup
[ ! -e "$HELLO_BUNDLE"/config.json ]
# test generation of spec does not return an error
runc_spec "$HELLO_BUNDLE"
[ "$status" -eq 0 ]
# test generation of spec created our config.json (spec)
[ -e "$HELLO_BUNDLE"/config.json ]
# change the default args parameter from sh to hello
sed -i 's;"sh";"/hello";' "$HELLO_BUNDLE"/config.json
# ensure the generated spec works by running hello-world
runc run --bundle "$HELLO_BUNDLE" test_hello
[ "$status" -eq 0 ]
}
@test "spec validator" {
TESTDIR=$(pwd)
cd "$HELLO_BUNDLE"
run git clone https://github.com/opencontainers/runtime-spec.git src/runtime-spec
[ "$status" -eq 0 ]
SPEC_COMMIT=$(grep '^github.com/opencontainers/runtime-spec' ${TESTDIR}/../../vendor.conf | cut -d ' ' -f 2)
run git -C src/runtime-spec reset --hard "${SPEC_COMMIT}"
[ "$status" -eq 0 ]
[ -e src/runtime-spec/schema/config-schema.json ]
run bash -c "GOPATH='$GOPATH' go get github.com/xeipuuv/gojsonschema"
[ "$status" -eq 0 ]
run git -C "${GOPATH}/src/github.com/xeipuuv/gojsonschema" reset --hard 6637feb73ee44cd4640bb3def285c29774234c7f
[ "$status" -eq 0 ]
GOPATH="$GOPATH" go build src/runtime-spec/schema/validate.go
[ -e ./validate ]
runc spec
[ -e config.json ]
run ./validate src/runtime-spec/schema/config-schema.json config.json
[ "$status" -eq 0 ]
[[ "${lines[0]}" == *"The document is valid"* ]]
}

View file

@ -1,31 +0,0 @@
#!/usr/bin/env bats
load helpers
function setup() {
teardown_busybox
setup_busybox
}
function teardown() {
teardown_busybox
}
@test "runc start" {
runc create --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
testcontainer test_busybox created
# start container test_busybox
runc start test_busybox
[ "$status" -eq 0 ]
testcontainer test_busybox running
# delete test_busybox
runc delete --force test_busybox
runc state test_busybox
[ "$status" -ne 0 ]
}

View file

@ -1,76 +0,0 @@
#!/usr/bin/env bats
load helpers
function setup() {
teardown_busybox
setup_busybox
}
function teardown() {
teardown_busybox
}
@test "runc run detached" {
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# check state
testcontainer test_busybox running
}
@test "runc run detached ({u,g}id != 0)" {
# cannot start containers as another user in rootless setup without idmap
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_idmap
# replace "uid": 0 with "uid": 1000
# and do a similar thing for gid.
sed -i 's;"uid": 0;"uid": 1000;g' config.json
sed -i 's;"gid": 0;"gid": 100;g' config.json
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# check state
testcontainer test_busybox running
}
@test "runc run detached --pid-file" {
# run busybox detached
runc run --pid-file pid.txt -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# check state
testcontainer test_busybox running
# check pid.txt was generated
[ -e pid.txt ]
run cat pid.txt
[ "$status" -eq 0 ]
[[ ${lines[0]} == $(__runc state test_busybox | jq '.pid') ]]
}
@test "runc run detached --pid-file with new CWD" {
# create pid_file directory as the CWD
run mkdir pid_file
[ "$status" -eq 0 ]
run cd pid_file
[ "$status" -eq 0 ]
# run busybox detached
runc run --pid-file pid.txt -d -b $BUSYBOX_BUNDLE --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# check state
testcontainer test_busybox running
# check pid.txt was generated
[ -e pid.txt ]
run cat pid.txt
[ "$status" -eq 0 ]
[[ ${lines[0]} == $(__runc state test_busybox | jq '.pid') ]]
}

View file

@ -1,64 +0,0 @@
#!/usr/bin/env bats
load helpers
function setup() {
teardown_hello
setup_hello
}
function teardown() {
teardown_hello
}
@test "runc run" {
# run hello-world
runc run test_hello
[ "$status" -eq 0 ]
# check expected output
[[ "${output}" == *"Hello"* ]]
}
@test "runc run ({u,g}id != 0)" {
# cannot start containers as another user in rootless setup without idmap
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_idmap
# replace "uid": 0 with "uid": 1000
# and do a similar thing for gid.
sed -i 's;"uid": 0;"uid": 1000;g' config.json
sed -i 's;"gid": 0;"gid": 100;g' config.json
# run hello-world
runc run test_hello
[ "$status" -eq 0 ]
# check expected output
[[ "${output}" == *"Hello"* ]]
}
@test "runc run with rootfs set to ." {
cp config.json rootfs/.
rm config.json
cd rootfs
sed -i 's;"rootfs";".";' config.json
# run hello-world
runc run test_hello
[ "$status" -eq 0 ]
[[ "${output}" == *"Hello"* ]]
}
@test "runc run --pid-file" {
# run hello-world
runc run --pid-file pid.txt test_hello
[ "$status" -eq 0 ]
[[ "${output}" == *"Hello"* ]]
# check pid.txt was generated
[ -e pid.txt ]
run cat pid.txt
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ [0-9]+ ]]
}

View file

@ -1,66 +0,0 @@
#!/usr/bin/env bats
load helpers
function setup() {
teardown_busybox
setup_busybox
}
function teardown() {
teardown_busybox
}
@test "state (kill + delete)" {
runc state test_busybox
[ "$status" -ne 0 ]
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# check state
testcontainer test_busybox running
runc kill test_busybox KILL
[ "$status" -eq 0 ]
# wait for busybox to be in the destroyed state
retry 10 1 eval "__runc state test_busybox | grep -q 'stopped'"
# delete test_busybox
runc delete test_busybox
[ "$status" -eq 0 ]
runc state test_busybox
[ "$status" -ne 0 ]
}
@test "state (pause + resume)" {
# XXX: pause and resume require cgroups.
requires root
runc state test_busybox
[ "$status" -ne 0 ]
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# check state
testcontainer test_busybox running
# pause busybox
runc pause test_busybox
[ "$status" -eq 0 ]
# test state of busybox is paused
testcontainer test_busybox paused
# resume busybox
runc resume test_busybox
[ "$status" -eq 0 ]
# test state of busybox is back to running
testcontainer test_busybox running
}

View file

@ -1,230 +0,0 @@
#!/usr/bin/env bats
load helpers
function setup() {
teardown_busybox
setup_busybox
}
function teardown() {
teardown_busybox
}
@test "runc run [tty ptsname]" {
# Replace sh script with readlink.
sed -i 's|"sh"|"sh", "-c", "for file in /proc/self/fd/[012]; do readlink $file; done"|' config.json
# run busybox
runc run test_busybox
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ /dev/pts/+ ]]
[[ ${lines[1]} =~ /dev/pts/+ ]]
[[ ${lines[2]} =~ /dev/pts/+ ]]
}
@test "runc run [tty owner]" {
# tty chmod is not doable in rootless containers without idmap.
# TODO: this can be made as a change to the gid test.
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_idmap
# Replace sh script with stat.
sed -i 's/"sh"/"sh", "-c", "stat -c %u:%g $(tty) | tr : \\\\\\\\n"/' config.json
# run busybox
runc run test_busybox
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ 0 ]]
# This is set by the default config.json (it corresponds to the standard tty group).
[[ ${lines[1]} =~ 5 ]]
}
@test "runc run [tty owner] ({u,g}id != 0)" {
# tty chmod is not doable in rootless containers without idmap.
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_idmap
# replace "uid": 0 with "uid": 1000
# and do a similar thing for gid.
sed -i 's;"uid": 0;"uid": 1000;g' config.json
sed -i 's;"gid": 0;"gid": 100;g' config.json
# Replace sh script with stat.
sed -i 's/"sh"/"sh", "-c", "stat -c %u:%g $(tty) | tr : \\\\\\\\n"/' config.json
# run busybox
runc run test_busybox
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ 1000 ]]
# This is set by the default config.json (it corresponds to the standard tty group).
[[ ${lines[1]} =~ 5 ]]
}
@test "runc exec [tty ptsname]" {
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# make sure we're running
testcontainer test_busybox running
# run the exec
runc exec test_busybox sh -c 'for file in /proc/self/fd/[012]; do readlink $file; done'
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ /dev/pts/+ ]]
[[ ${lines[1]} =~ /dev/pts/+ ]]
[[ ${lines[2]} =~ /dev/pts/+ ]]
}
@test "runc exec [tty owner]" {
# tty chmod is not doable in rootless containers without idmap.
# TODO: this can be made as a change to the gid test.
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_idmap
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# make sure we're running
testcontainer test_busybox running
# run the exec
runc exec test_busybox sh -c 'stat -c %u:%g $(tty) | tr : \\n'
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ 0 ]]
[[ ${lines[1]} =~ 5 ]]
}
@test "runc exec [tty owner] ({u,g}id != 0)" {
# tty chmod is not doable in rootless containers without idmap.
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_idmap
# replace "uid": 0 with "uid": 1000
# and do a similar thing for gid.
sed -i 's;"uid": 0;"uid": 1000;g' config.json
sed -i 's;"gid": 0;"gid": 100;g' config.json
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# make sure we're running
testcontainer test_busybox running
# run the exec
runc exec test_busybox sh -c 'stat -c %u:%g $(tty) | tr : \\n'
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ 1000 ]]
[[ ${lines[1]} =~ 5 ]]
}
@test "runc exec [tty consolesize]" {
# allow writing to filesystem
sed -i 's/"readonly": true/"readonly": false/' config.json
# run busybox detached
runc run -d --console-socket $CONSOLE_SOCKET test_busybox
[ "$status" -eq 0 ]
# make sure we're running
testcontainer test_busybox running
tty_info_with_consize_size=$( cat <<EOF
{
"terminal": true,
"consoleSize": {
"height": 10,
"width": 110
},
"args": [
"/bin/sh",
"-c",
"/bin/stty -a > /tmp/tty-info"
],
"cwd": "/"
}
EOF
)
# run the exec
runc exec --pid-file pid.txt -d --console-socket $CONSOLE_SOCKET -p <( echo $tty_info_with_consize_size ) test_busybox
[ "$status" -eq 0 ]
# check the pid was generated
[ -e pid.txt ]
#wait user process to finish
timeout 1 tail --pid=$(head -n 1 pid.txt) -f /dev/null
tty_info=$( cat <<EOF
{
"args": [
"/bin/cat",
"/tmp/tty-info"
],
"cwd": "/"
}
EOF
)
# run the exec
runc exec -p <( echo $tty_info ) test_busybox
[ "$status" -eq 0 ]
# test tty width and height against original process.json
[[ ${lines[0]} =~ "rows 10; columns 110" ]]
}
@test "runc create [terminal=false]" {
# Disable terminal creation.
sed -i 's|"terminal": true,|"terminal": false,|g' config.json
# Replace sh script with sleep.
sed -i 's|"sh"|"sleep", "1000s"|' config.json
# Make sure that the handling of detached IO is done properly. See #1354.
__runc create test_busybox
# Start the command.
runc start test_busybox
[ "$status" -eq 0 ]
testcontainer test_busybox running
# Kill the container.
runc kill test_busybox KILL
[ "$status" -eq 0 ]
}
@test "runc run [terminal=false]" {
# Disable terminal creation.
sed -i 's|"terminal": true,|"terminal": false,|g' config.json
# Replace sh script with sleep.
sed -i 's|"sh"|"sleep", "1000s"|' config.json
# Make sure that the handling of non-detached IO is done properly. See #1354.
(
__runc run test_busybox
) &
wait_for_container 15 1 test_busybox
testcontainer test_busybox running
# Kill the container.
runc kill test_busybox KILL
[ "$status" -eq 0 ]
}
@test "runc run -d [terminal=false]" {
# Disable terminal creation.
sed -i 's|"terminal": true,|"terminal": false,|g' config.json
# Replace sh script with sleep.
sed -i 's|"sh"|"sleep", "1000s"|' config.json
# Make sure that the handling of detached IO is done properly. See #1354.
__runc run -d test_busybox
testcontainer test_busybox running
# Kill the container.
runc kill test_busybox KILL
[ "$status" -eq 0 ]
}

View file

@ -1,287 +0,0 @@
#!/usr/bin/env bats
load helpers
function teardown() {
rm -f $BATS_TMPDIR/runc-cgroups-integration-test.json
teardown_running_container test_update
teardown_running_container test_update_rt
teardown_busybox
}
function setup() {
teardown
setup_busybox
set_cgroups_path "$BUSYBOX_BUNDLE"
# Set some initial known values
DATA=$(cat <<EOF
"memory": {
"limit": 33554432,
"reservation": 25165824,
"kernel": 16777216,
"kernelTCP": 11534336
},
"cpu": {
"shares": 100,
"quota": 500000,
"period": 1000000,
"cpus": "0"
},
"blockio": {
"weight": 1000
},
"pids": {
"limit": 20
},
EOF
)
DATA=$(echo ${DATA} | sed 's/\n/\\n/g')
sed -i "s/\(\"resources\": {\)/\1\n${DATA}/" ${BUSYBOX_BUNDLE}/config.json
}
function check_cgroup_value() {
cgroup=$1
source=$2
expected=$3
current=$(cat $cgroup/$source)
[ "$current" == "$expected" ]
}
# TODO: test rt cgroup updating
@test "update" {
# XXX: Also, this test should be split into separate sections so that we
# can skip kmem without skipping update tests overall.
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_cgroup
requires cgroups_kmem
# run a few busyboxes detached
runc run -d --console-socket $CONSOLE_SOCKET test_update
[ "$status" -eq 0 ]
# get the cgroup paths
for g in MEMORY CPUSET CPU BLKIO PIDS; do
base_path=$(grep "cgroup" /proc/self/mountinfo | gawk 'toupper($NF) ~ /\<'${g}'\>/ { print $5; exit }')
eval CGROUP_${g}="${base_path}${CGROUPS_PATH}"
done
CGROUP_SYSTEM_MEMORY=$(grep "cgroup" /proc/self/mountinfo | gawk 'toupper($NF) ~ /\<'MEMORY'\>/ { print $5; exit }')
# check that initial values were properly set
check_cgroup_value $CGROUP_BLKIO "blkio.weight" 1000
check_cgroup_value $CGROUP_CPU "cpu.cfs_period_us" 1000000
check_cgroup_value $CGROUP_CPU "cpu.cfs_quota_us" 500000
check_cgroup_value $CGROUP_CPU "cpu.shares" 100
check_cgroup_value $CGROUP_CPUSET "cpuset.cpus" 0
check_cgroup_value $CGROUP_MEMORY "memory.kmem.limit_in_bytes" 16777216
check_cgroup_value $CGROUP_MEMORY "memory.kmem.tcp.limit_in_bytes" 11534336
check_cgroup_value $CGROUP_MEMORY "memory.limit_in_bytes" 33554432
check_cgroup_value $CGROUP_MEMORY "memory.soft_limit_in_bytes" 25165824
check_cgroup_value $CGROUP_PIDS "pids.max" 20
# update blkio-weight
runc update test_update --blkio-weight 500
[ "$status" -eq 0 ]
check_cgroup_value $CGROUP_BLKIO "blkio.weight" 500
# update cpu-period
runc update test_update --cpu-period 900000
[ "$status" -eq 0 ]
check_cgroup_value $CGROUP_CPU "cpu.cfs_period_us" 900000
# update cpu-quota
runc update test_update --cpu-quota 600000
[ "$status" -eq 0 ]
check_cgroup_value $CGROUP_CPU "cpu.cfs_quota_us" 600000
# update cpu-shares
runc update test_update --cpu-share 200
[ "$status" -eq 0 ]
check_cgroup_value $CGROUP_CPU "cpu.shares" 200
# update cpuset if supported (i.e. we're running on a multicore cpu)
cpu_count=$(grep '^processor' /proc/cpuinfo | wc -l)
if [ $cpu_count -gt 1 ]; then
runc update test_update --cpuset-cpus "1"
[ "$status" -eq 0 ]
check_cgroup_value $CGROUP_CPUSET "cpuset.cpus" 1
fi
# update memory limit
runc update test_update --memory 67108864
[ "$status" -eq 0 ]
check_cgroup_value $CGROUP_MEMORY "memory.limit_in_bytes" 67108864
runc update test_update --memory 50M
[ "$status" -eq 0 ]
check_cgroup_value $CGROUP_MEMORY "memory.limit_in_bytes" 52428800
# update memory soft limit
runc update test_update --memory-reservation 33554432
[ "$status" -eq 0 ]
check_cgroup_value $CGROUP_MEMORY "memory.soft_limit_in_bytes" 33554432
# Run swap memory tests if swap is avaialble
if [ -f "$CGROUP_MEMORY/memory.memsw.limit_in_bytes" ]; then
# try to remove memory swap limit
runc update test_update --memory-swap -1
[ "$status" -eq 0 ]
# Get System memory swap limit
SYSTEM_MEMORY_SW=$(cat "${CGROUP_SYSTEM_MEMORY}/memory.memsw.limit_in_bytes")
check_cgroup_value $CGROUP_MEMORY "memory.memsw.limit_in_bytes" ${SYSTEM_MEMORY_SW}
# update memory swap
runc update test_update --memory-swap 96468992
[ "$status" -eq 0 ]
check_cgroup_value $CGROUP_MEMORY "memory.memsw.limit_in_bytes" 96468992
fi;
# try to remove memory limit
runc update test_update --memory -1
[ "$status" -eq 0 ]
# Get System memory limit
SYSTEM_MEMORY=$(cat "${CGROUP_SYSTEM_MEMORY}/memory.limit_in_bytes")
# check memory limited is gone
check_cgroup_value $CGROUP_MEMORY "memory.limit_in_bytes" ${SYSTEM_MEMORY}
# check swap memory limited is gone
if [ -f "$CGROUP_MEMORY/memory.memsw.limit_in_bytes" ]; then
check_cgroup_value $CGROUP_MEMORY "memory.memsw.limit_in_bytes" ${SYSTEM_MEMORY}
fi
# update kernel memory limit
runc update test_update --kernel-memory 50331648
[ "$status" -eq 0 ]
check_cgroup_value $CGROUP_MEMORY "memory.kmem.limit_in_bytes" 50331648
# update kernel memory tcp limit
runc update test_update --kernel-memory-tcp 41943040
[ "$status" -eq 0 ]
check_cgroup_value $CGROUP_MEMORY "memory.kmem.tcp.limit_in_bytes" 41943040
# update pids limit
runc update test_update --pids-limit 10
[ "$status" -eq 0 ]
check_cgroup_value $CGROUP_PIDS "pids.max" 10
# Revert to the test initial value via json on stding
runc update -r - test_update <<EOF
{
"memory": {
"limit": 33554432,
"reservation": 25165824,
"kernel": 16777216,
"kernelTCP": 11534336
},
"cpu": {
"shares": 100,
"quota": 500000,
"period": 1000000,
"cpus": "0"
},
"blockIO": {
"weight": 1000
},
"pids": {
"limit": 20
}
}
EOF
[ "$status" -eq 0 ]
check_cgroup_value $CGROUP_BLKIO "blkio.weight" 1000
check_cgroup_value $CGROUP_CPU "cpu.cfs_period_us" 1000000
check_cgroup_value $CGROUP_CPU "cpu.cfs_quota_us" 500000
check_cgroup_value $CGROUP_CPU "cpu.shares" 100
check_cgroup_value $CGROUP_CPUSET "cpuset.cpus" 0
check_cgroup_value $CGROUP_MEMORY "memory.kmem.limit_in_bytes" 16777216
check_cgroup_value $CGROUP_MEMORY "memory.kmem.tcp.limit_in_bytes" 11534336
check_cgroup_value $CGROUP_MEMORY "memory.limit_in_bytes" 33554432
check_cgroup_value $CGROUP_MEMORY "memory.soft_limit_in_bytes" 25165824
check_cgroup_value $CGROUP_PIDS "pids.max" 20
# redo all the changes at once
runc update test_update --blkio-weight 500 \
--cpu-period 900000 --cpu-quota 600000 --cpu-share 200 --memory 67108864 \
--memory-reservation 33554432 --kernel-memory 50331648 --kernel-memory-tcp 41943040 \
--pids-limit 10
[ "$status" -eq 0 ]
check_cgroup_value $CGROUP_BLKIO "blkio.weight" 500
check_cgroup_value $CGROUP_CPU "cpu.cfs_period_us" 900000
check_cgroup_value $CGROUP_CPU "cpu.cfs_quota_us" 600000
check_cgroup_value $CGROUP_CPU "cpu.shares" 200
check_cgroup_value $CGROUP_MEMORY "memory.kmem.limit_in_bytes" 50331648
check_cgroup_value $CGROUP_MEMORY "memory.kmem.tcp.limit_in_bytes" 41943040
check_cgroup_value $CGROUP_MEMORY "memory.limit_in_bytes" 67108864
check_cgroup_value $CGROUP_MEMORY "memory.soft_limit_in_bytes" 33554432
check_cgroup_value $CGROUP_PIDS "pids.max" 10
# reset to initial test value via json file
DATA=$(cat <<"EOF"
{
"memory": {
"limit": 33554432,
"reservation": 25165824,
"kernel": 16777216,
"kernelTCP": 11534336
},
"cpu": {
"shares": 100,
"quota": 500000,
"period": 1000000,
"cpus": "0"
},
"blockIO": {
"weight": 1000
},
"pids": {
"limit": 20
}
}
EOF
)
echo $DATA > $BATS_TMPDIR/runc-cgroups-integration-test.json
runc update -r $BATS_TMPDIR/runc-cgroups-integration-test.json test_update
[ "$status" -eq 0 ]
check_cgroup_value $CGROUP_BLKIO "blkio.weight" 1000
check_cgroup_value $CGROUP_CPU "cpu.cfs_period_us" 1000000
check_cgroup_value $CGROUP_CPU "cpu.cfs_quota_us" 500000
check_cgroup_value $CGROUP_CPU "cpu.shares" 100
check_cgroup_value $CGROUP_CPUSET "cpuset.cpus" 0
check_cgroup_value $CGROUP_MEMORY "memory.kmem.limit_in_bytes" 16777216
check_cgroup_value $CGROUP_MEMORY "memory.kmem.tcp.limit_in_bytes" 11534336
check_cgroup_value $CGROUP_MEMORY "memory.limit_in_bytes" 33554432
check_cgroup_value $CGROUP_MEMORY "memory.soft_limit_in_bytes" 25165824
check_cgroup_value $CGROUP_PIDS "pids.max" 20
}
@test "update rt period and runtime" {
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_cgroup
requires cgroups_kmem cgroups_rt
# run a detached busybox
runc run -d --console-socket $CONSOLE_SOCKET test_update_rt
[ "$status" -eq 0 ]
# get the cgroup paths
eval CGROUP_CPU="${CGROUP_CPU_BASE_PATH}${CGROUPS_PATH}"
runc update -r - test_update_rt <<EOF
{
"cpu": {
"realtimePeriod": 800001,
"realtimeRuntime": 500001
}
}
EOF
check_cgroup_value $CGROUP_CPU "cpu.rt_period_us" 800001
check_cgroup_value $CGROUP_CPU "cpu.rt_runtime_us" 500001
runc update test_update_rt --cpu-rt-period 900001 --cpu-rt-runtime 600001
check_cgroup_value $CGROUP_CPU "cpu.rt_period_us" 900001
check_cgroup_value $CGROUP_CPU "cpu.rt_runtime_us" 600001
}

View file

@ -1,11 +0,0 @@
#!/usr/bin/env bats
load helpers
@test "runc version" {
runc -v
[ "$status" -eq 0 ]
[[ ${lines[0]} =~ runc\ version\ [0-9]+\.[0-9]+\.[0-9]+ ]]
[[ ${lines[1]} =~ commit:+ ]]
[[ ${lines[2]} =~ spec:\ [0-9]+\.[0-9]+\.[0-9]+ ]]
}

View file

@ -1,125 +0,0 @@
#!/bin/bash
# Copyright (C) 2017 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# rootless.sh -- Runner for rootless container tests. The purpose of this
# script is to allow for the addition (and testing) of "opportunistic" features
# to rootless containers while still testing the base features. In order to add
# a new feature, please match the existing style. Add an entry to $ALL_FEATURES,
# and add an enable_* and disable_* hook.
ALL_FEATURES=("idmap" "cgroup")
ROOT="$(readlink -f "$(dirname "${BASH_SOURCE}")/..")"
# FEATURE: Opportunistic new{uid,gid}map support, allowing a rootless container
# to be set up with the usage of helper setuid binaries.
function enable_idmap() {
export ROOTLESS_UIDMAP_START=100000 ROOTLESS_UIDMAP_LENGTH=65536
export ROOTLESS_GIDMAP_START=200000 ROOTLESS_GIDMAP_LENGTH=65536
# Set up sub{uid,gid} mappings.
[ -e /etc/subuid.tmp ] && mv /etc/subuid{.tmp,}
( grep -v '^rootless' /etc/subuid ; echo "rootless:$ROOTLESS_UIDMAP_START:$ROOTLESS_UIDMAP_LENGTH" ) > /etc/subuid.tmp
mv /etc/subuid{.tmp,}
[ -e /etc/subgid.tmp ] && mv /etc/subgid{.tmp,}
( grep -v '^rootless' /etc/subgid ; echo "rootless:$ROOTLESS_GIDMAP_START:$ROOTLESS_GIDMAP_LENGTH" ) > /etc/subgid.tmp
mv /etc/subgid{.tmp,}
# Reactivate new{uid,gid}map helpers if applicable.
[ -e /usr/bin/unused-newuidmap ] && mv /usr/bin/{unused-,}newuidmap
[ -e /usr/bin/unused-newgidmap ] && mv /usr/bin/{unused-,}newgidmap
}
function disable_idmap() {
export ROOTLESS_UIDMAP_START ROOTLESS_UIDMAP_LENGTH
export ROOTLESS_GIDMAP_START ROOTLESS_GIDMAP_LENGTH
# Deactivate sub{uid,gid} mappings.
[ -e /etc/subuid ] && mv /etc/subuid{,.tmp}
[ -e /etc/subgid ] && mv /etc/subgid{,.tmp}
# Deactivate new{uid,gid}map helpers. setuid is preserved with mv(1).
[ -e /usr/bin/newuidmap ] && mv /usr/bin/{,unused-}newuidmap
[ -e /usr/bin/newgidmap ] && mv /usr/bin/{,unused-}newgidmap
}
# FEATURE: Opportunistic cgroups support, allowing a rootless container to set
# resource limits on condition that cgroupsPath is set to a path the
# rootless user has permissions on.
# List of cgroups. We handle name= cgroups as well as combined
# (comma-separated) cgroups and correctly split and/or strip them.
ALL_CGROUPS=( $(cat /proc/self/cgroup | cut -d: -f2 | sed -E '{s/^name=//;s/,/\n/;/^$/D}') )
CGROUP_MOUNT="/sys/fs/cgroup"
CGROUP_PATH="/runc-cgroups-integration-test"
function enable_cgroup() {
# Set up cgroups for use in rootless containers.
for cg in "${ALL_CGROUPS[@]}"
do
mkdir -p "$CGROUP_MOUNT/$cg$CGROUP_PATH"
# We only need to allow write access to {cgroup.procs,tasks} and the
# directory. Rather than changing the owner entirely, we just change
# the group and then allow write access to the group (in order to
# further limit the possible DAC permissions that runc could use).
chown root:rootless "$CGROUP_MOUNT/$cg$CGROUP_PATH/"{,cgroup.procs,tasks}
chmod g+rwx "$CGROUP_MOUNT/$cg$CGROUP_PATH/"{,cgroup.procs,tasks}
# Due to cpuset's semantics we need to give extra permissions to allow
# for runc to set up the hierarchy. XXX: This really shouldn't be
# necessary, and might actually be a bug in our impl of cgroup
# handling.
[[ "$cg" == "cpuset" ]] && chown rootless:rootless "$CGROUP_MOUNT/$cg$CGROUP_PATH/cpuset."{cpus,mems}
done
}
function disable_cgroup() {
# Remove cgroups used in rootless containers.
for cg in "${ALL_CGROUPS[@]}"
do
[ -d "$CGROUP_MOUNT/$cg$CGROUP_PATH" ] && rmdir "$CGROUP_MOUNT/$cg$CGROUP_PATH"
done
}
# Create a powerset of $ALL_FEATURES (the set of all subsets of $ALL_FEATURES).
# We test all of the possible combinations (as long as we don't add too many
# feature knobs this shouldn't take too long -- but the number of tested
# combinations is O(2^n)).
function powerset() {
eval printf '%s' $(printf '{,%s+}' "$@"):
}
features_powerset="$(powerset "${ALL_FEATURES[@]}")"
# Iterate over the powerset of all features.
IFS=:
for enabled_features in $features_powerset
do
idx="$(($idx+1))"
echo "[$(printf '%.2d' "$idx")] run rootless tests ... (${enabled_features%%+})"
unset IFS
for feature in "${ALL_FEATURES[@]}"
do
hook_func="disable_$feature"
grep -E "(^|\+)$feature(\+|$)" <<<$enabled_features &>/dev/null && hook_func="enable_$feature"
"$hook_func"
done
# Run the test suite!
set -e
echo path: $PATH
export ROOTLESS_FEATURES="$enabled_features"
sudo -HE -u rootless PATH="$PATH" bats -t "$ROOT/tests/integration$TESTFLAGS"
set +e
done