Update specs and runc dependencies
Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
This commit is contained in:
parent
319af0765b
commit
a46c45d05d
120 changed files with 746 additions and 10857 deletions
|
@ -14,8 +14,8 @@ clone git github.com/docker/go-units master
|
|||
clone git github.com/godbus/dbus master
|
||||
clone git github.com/golang/glog master
|
||||
clone git github.com/golang/protobuf master
|
||||
clone git github.com/opencontainers/runc master
|
||||
clone git github.com/opencontainers/specs master
|
||||
clone git github.com/opencontainers/runc 7b6c4c418d5090f4f11eee949fdf49afd15838c9
|
||||
clone git github.com/opencontainers/specs a1e32a8ead2ba57adce3e36e956b4dc32c1b85c4
|
||||
clone git github.com/rcrowley/go-metrics master
|
||||
clone git github.com/satori/go.uuid master
|
||||
clone git github.com/syndtr/gocapability master
|
||||
|
|
|
@ -1,104 +0,0 @@
|
|||
package pubsub
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewPublisher creates a new pub/sub publisher to broadcast messages.
|
||||
// The duration is used as the send timeout as to not block the publisher publishing
|
||||
// messages to other clients if one client is slow or unresponsive.
|
||||
// The buffer is used when creating new channels for subscribers.
|
||||
func NewPublisher(publishTimeout time.Duration, buffer int) *Publisher {
|
||||
return &Publisher{
|
||||
buffer: buffer,
|
||||
timeout: publishTimeout,
|
||||
subscribers: make(map[subscriber]topicFunc),
|
||||
}
|
||||
}
|
||||
|
||||
type subscriber chan interface{}
|
||||
type topicFunc func(v interface{}) bool
|
||||
|
||||
// Publisher is basic pub/sub structure. Allows to send events and subscribe
|
||||
// to them. Can be safely used from multiple goroutines.
|
||||
type Publisher struct {
|
||||
m sync.RWMutex
|
||||
buffer int
|
||||
timeout time.Duration
|
||||
subscribers map[subscriber]topicFunc
|
||||
}
|
||||
|
||||
// Len returns the number of subscribers for the publisher
|
||||
func (p *Publisher) Len() int {
|
||||
p.m.RLock()
|
||||
i := len(p.subscribers)
|
||||
p.m.RUnlock()
|
||||
return i
|
||||
}
|
||||
|
||||
// Subscribe adds a new subscriber to the publisher returning the channel.
|
||||
func (p *Publisher) Subscribe() chan interface{} {
|
||||
return p.SubscribeTopic(nil)
|
||||
}
|
||||
|
||||
// SubscribeTopic adds a new subscriber that filters messages sent by a topic.
|
||||
func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} {
|
||||
ch := make(chan interface{}, p.buffer)
|
||||
p.m.Lock()
|
||||
p.subscribers[ch] = topic
|
||||
p.m.Unlock()
|
||||
return ch
|
||||
}
|
||||
|
||||
// Evict removes the specified subscriber from receiving any more messages.
|
||||
func (p *Publisher) Evict(sub chan interface{}) {
|
||||
p.m.Lock()
|
||||
delete(p.subscribers, sub)
|
||||
close(sub)
|
||||
p.m.Unlock()
|
||||
}
|
||||
|
||||
// Publish sends the data in v to all subscribers currently registered with the publisher.
|
||||
func (p *Publisher) Publish(v interface{}) {
|
||||
p.m.RLock()
|
||||
wg := new(sync.WaitGroup)
|
||||
for sub, topic := range p.subscribers {
|
||||
wg.Add(1)
|
||||
|
||||
go p.sendTopic(sub, topic, v, wg)
|
||||
}
|
||||
wg.Wait()
|
||||
p.m.RUnlock()
|
||||
}
|
||||
|
||||
// Close closes the channels to all subscribers registered with the publisher.
|
||||
func (p *Publisher) Close() {
|
||||
p.m.Lock()
|
||||
for sub := range p.subscribers {
|
||||
delete(p.subscribers, sub)
|
||||
close(sub)
|
||||
}
|
||||
p.m.Unlock()
|
||||
}
|
||||
|
||||
func (p *Publisher) sendTopic(sub subscriber, topic topicFunc, v interface{}, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
if topic != nil && !topic(v) {
|
||||
return
|
||||
}
|
||||
|
||||
// send under a select as to not block if the receiver is unavailable
|
||||
if p.timeout > 0 {
|
||||
select {
|
||||
case sub <- v:
|
||||
case <-time.After(p.timeout):
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case sub <- v:
|
||||
default:
|
||||
}
|
||||
}
|
|
@ -1,2 +0,0 @@
|
|||
vendor/pkg
|
||||
runc
|
|
@ -1,117 +0,0 @@
|
|||
## Contribution Guidelines
|
||||
|
||||
### Pull requests are always welcome
|
||||
|
||||
We are always thrilled to receive pull requests, and do our best to
|
||||
process them as fast as possible. Not sure if that typo is worth a pull
|
||||
request? Do it! We will appreciate it.
|
||||
|
||||
If your pull request is not accepted on the first try, don't be
|
||||
discouraged! If there's a problem with the implementation, hopefully you
|
||||
received feedback on what to improve.
|
||||
|
||||
We're trying very hard to keep runc lean and focused. We don't want it
|
||||
to do everything for everybody. This means that we might decide against
|
||||
incorporating a new feature. However, there might be a way to implement
|
||||
that feature *on top of* runc.
|
||||
|
||||
|
||||
### Conventions
|
||||
|
||||
Fork the repo and make changes on your fork in a feature branch:
|
||||
|
||||
- If it's a bugfix branch, name it XXX-something where XXX is the number of the
|
||||
issue
|
||||
- If it's a feature branch, create an enhancement issue to announce your
|
||||
intentions, and name it XXX-something where XXX is the number of the issue.
|
||||
|
||||
Submit unit tests for your changes. Go has a great test framework built in; use
|
||||
it! Take a look at existing tests for inspiration. Run the full test suite on
|
||||
your branch before submitting a pull request.
|
||||
|
||||
Update the documentation when creating or modifying features. Test
|
||||
your documentation changes for clarity, concision, and correctness, as
|
||||
well as a clean documentation build. See ``docs/README.md`` for more
|
||||
information on building the docs and how docs get released.
|
||||
|
||||
Write clean code. Universally formatted code promotes ease of writing, reading,
|
||||
and maintenance. Always run `gofmt -s -w file.go` on each changed file before
|
||||
committing your changes. Most editors have plugins that do this automatically.
|
||||
|
||||
Pull requests descriptions should be as clear as possible and include a
|
||||
reference to all the issues that they address.
|
||||
|
||||
Pull requests must not contain commits from other users or branches.
|
||||
|
||||
Commit messages must start with a capitalized and short summary (max. 50
|
||||
chars) written in the imperative, followed by an optional, more detailed
|
||||
explanatory text which is separated from the summary by an empty line.
|
||||
|
||||
Code review comments may be added to your pull request. Discuss, then make the
|
||||
suggested modifications and push additional commits to your feature branch. Be
|
||||
sure to post a comment after pushing. The new commits will show up in the pull
|
||||
request automatically, but the reviewers will not be notified unless you
|
||||
comment.
|
||||
|
||||
Before the pull request is merged, make sure that you squash your commits into
|
||||
logical units of work using `git rebase -i` and `git push -f`. After every
|
||||
commit the test suite should be passing. Include documentation changes in the
|
||||
same commit so that a revert would remove all traces of the feature or fix.
|
||||
|
||||
Commits that fix or close an issue should include a reference like `Closes #XXX`
|
||||
or `Fixes #XXX`, which will automatically close the issue when merged.
|
||||
|
||||
### Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the
|
||||
patch, which certifies that you wrote it or otherwise have the right to
|
||||
pass it on as an open-source patch. The rules are pretty simple: if you
|
||||
can certify the below (from
|
||||
[developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe@gmail.com>
|
||||
|
||||
using your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
You can add the sign off when creating the git commit via `git commit -s`.
|
|
@ -1,7 +0,0 @@
|
|||
Michael Crosby <michael@docker.com> (@crosbymichael)
|
||||
Rohit Jnagal <jnagal@google.com> (@rjnagal)
|
||||
Victor Marmol <vmarmol@google.com> (@vmarmol)
|
||||
Mrunal Patel <mpatel@redhat.com> (@mrunalp)
|
||||
Alexander Morozov <lk4d4@docker.com> (@LK4D4)
|
||||
Daniel, Dao Quang Minh <dqminh89@gmail.com> (@dqminh)
|
||||
Andrey Vagin <avagin@virtuozzo.com> (@avagin)
|
|
@ -1,120 +0,0 @@
|
|||
## Introduction
|
||||
|
||||
Dear maintainer. Thank you for investing the time and energy to help
|
||||
make runc as useful as possible. Maintaining a project is difficult,
|
||||
sometimes unrewarding work. Sure, you will get to contribute cool
|
||||
features to the project. But most of your time will be spent reviewing,
|
||||
cleaning up, documenting, answering questions, justifying design
|
||||
decisions - while everyone has all the fun! But remember - the quality
|
||||
of the maintainers work is what distinguishes the good projects from the
|
||||
great. So please be proud of your work, even the unglamourous parts,
|
||||
and encourage a culture of appreciation and respect for *every* aspect
|
||||
of improving the project - not just the hot new features.
|
||||
|
||||
This document is a manual for maintainers old and new. It explains what
|
||||
is expected of maintainers, how they should work, and what tools are
|
||||
available to them.
|
||||
|
||||
This is a living document - if you see something out of date or missing,
|
||||
speak up!
|
||||
|
||||
## What are a maintainer's responsibility?
|
||||
|
||||
It is every maintainer's responsibility to:
|
||||
|
||||
* 1) Expose a clear roadmap for improving their component.
|
||||
* 2) Deliver prompt feedback and decisions on pull requests.
|
||||
* 3) Be available to anyone with questions, bug reports, criticism etc.
|
||||
on their component. This includes IRC and GitHub issues and pull requests.
|
||||
* 4) Make sure their component respects the philosophy, design and
|
||||
roadmap of the project.
|
||||
|
||||
## How are decisions made?
|
||||
|
||||
Short answer: with pull requests to the runc repository.
|
||||
|
||||
runc is an open-source project with an open design philosophy. This
|
||||
means that the repository is the source of truth for EVERY aspect of the
|
||||
project, including its philosophy, design, roadmap and APIs. *If it's
|
||||
part of the project, it's in the repo. It's in the repo, it's part of
|
||||
the project.*
|
||||
|
||||
As a result, all decisions can be expressed as changes to the
|
||||
repository. An implementation change is a change to the source code. An
|
||||
API change is a change to the API specification. A philosophy change is
|
||||
a change to the philosophy manifesto. And so on.
|
||||
|
||||
All decisions affecting runc, big and small, follow the same 3 steps:
|
||||
|
||||
* Step 1: Open a pull request. Anyone can do this.
|
||||
|
||||
* Step 2: Discuss the pull request. Anyone can do this.
|
||||
|
||||
* Step 3: Accept (`LGTM`) or refuse a pull request. The relevant maintainers do
|
||||
this (see below "Who decides what?")
|
||||
|
||||
### I'm a maintainer, should I make pull requests too?
|
||||
|
||||
Yes. Nobody should ever push to master directly. All changes should be
|
||||
made through a pull request.
|
||||
|
||||
## Who decides what?
|
||||
|
||||
All decisions are pull requests, and the relevant maintainers make
|
||||
decisions by accepting or refusing the pull request. Review and acceptance
|
||||
by anyone is denoted by adding a comment in the pull request: `LGTM`.
|
||||
However, only currently listed `MAINTAINERS` are counted towards the required
|
||||
two LGTMs.
|
||||
|
||||
Overall the maintainer system works because of mutual respect across the
|
||||
maintainers of the project. The maintainers trust one another to make decisions
|
||||
in the best interests of the project. Sometimes maintainers can disagree and
|
||||
this is part of a healthy project to represent the point of views of various people.
|
||||
In the case where maintainers cannot find agreement on a specific change the
|
||||
role of a Chief Maintainer comes into play.
|
||||
|
||||
The Chief Maintainer for the project is responsible for overall architecture
|
||||
of the project to maintain conceptual integrity. Large decisions and
|
||||
architecture changes should be reviewed by the chief maintainer.
|
||||
The current chief maintainer for the project is Michael Crosby (@crosbymichael).
|
||||
|
||||
Even though the maintainer system is built on trust, if there is a conflict
|
||||
with the chief maintainer on a decision, their decision can be challenged
|
||||
and brought to the technical oversight board if two-thirds of the
|
||||
maintainers vote for an appeal. It is expected that this would be a
|
||||
very exceptional event.
|
||||
|
||||
|
||||
### How are maintainers added?
|
||||
|
||||
The best maintainers have a vested interest in the project. Maintainers
|
||||
are first and foremost contributors that have shown they are committed to
|
||||
the long term success of the project. Contributors wanting to become
|
||||
maintainers are expected to be deeply involved in contributing code,
|
||||
pull request review, and triage of issues in the project for more than two months.
|
||||
|
||||
Just contributing does not make you a maintainer, it is about building trust
|
||||
with the current maintainers of the project and being a person that they can
|
||||
depend on and trust to make decisions in the best interest of the project. The
|
||||
final vote to add a new maintainer should be approved by over 66% of the current
|
||||
maintainers with the chief maintainer having veto power. In case of a veto,
|
||||
conflict resolution rules expressed above apply. The voting period is
|
||||
five business days on the Pull Request to add the new maintainer.
|
||||
|
||||
|
||||
### What is expected of maintainers?
|
||||
|
||||
Part of a healthy project is to have active maintainers to support the community
|
||||
in contributions and perform tasks to keep the project running. Maintainers are
|
||||
expected to be able to respond in a timely manner if their help is required on specific
|
||||
issues where they are pinged. Being a maintainer is a time consuming commitment and should
|
||||
not be taken lightly.
|
||||
|
||||
When a maintainer is unable to perform the required duties they can be removed with
|
||||
a vote by 66% of the current maintainers with the chief maintainer having veto power.
|
||||
The voting period is ten business days. Issues related to a maintainer's performance should
|
||||
be discussed with them among the other maintainers so that they are not surprised by
|
||||
a pull request removing them.
|
||||
|
||||
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
RUNC_TEST_IMAGE=runc_test
|
||||
PROJECT=github.com/opencontainers/runc
|
||||
TEST_DOCKERFILE=script/test_Dockerfile
|
||||
BUILDTAGS=seccomp
|
||||
export GOPATH:=$(CURDIR)/Godeps/_workspace:$(GOPATH)
|
||||
|
||||
all:
|
||||
go build -tags "$(BUILDTAGS)" -o runc .
|
||||
|
||||
static:
|
||||
CGO_ENABLED=1 go build -tags "$(BUILDTAGS) cgo static_build" -ldflags "-w -extldflags -static" -o runc .
|
||||
|
||||
vet:
|
||||
go get golang.org/x/tools/cmd/vet
|
||||
|
||||
lint: vet
|
||||
go vet ./...
|
||||
go fmt ./...
|
||||
|
||||
runctestimage:
|
||||
docker build -t $(RUNC_TEST_IMAGE) -f $(TEST_DOCKERFILE) .
|
||||
|
||||
test: runctestimage
|
||||
docker run -e TESTFLAGS --privileged --rm -v $(CURDIR):/go/src/$(PROJECT) $(RUNC_TEST_IMAGE) make localtest
|
||||
|
||||
localtest:
|
||||
go test -tags "$(BUILDTAGS)" ${TESTFLAGS} -v ./...
|
||||
|
||||
|
||||
install:
|
||||
cp runc /usr/local/bin/runc
|
||||
|
||||
clean:
|
||||
rm runc
|
||||
|
||||
validate: vet
|
||||
script/validate-gofmt
|
||||
go vet ./...
|
||||
|
||||
ci: validate localtest
|
17
vendor/src/github.com/opencontainers/runc/NOTICE
vendored
17
vendor/src/github.com/opencontainers/runc/NOTICE
vendored
|
@ -1,17 +0,0 @@
|
|||
runc
|
||||
|
||||
Copyright 2012-2015 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (http://www.docker.com).
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see http://www.bis.doc.gov
|
||||
|
||||
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
|
@ -1,19 +0,0 @@
|
|||
# runc principles
|
||||
|
||||
In the design and development of runc and libcontainer we try to follow these principles:
|
||||
|
||||
(Work in progress)
|
||||
|
||||
* Don't try to replace every tool. Instead, be an ingredient to improve them.
|
||||
* Less code is better.
|
||||
* Fewer components are better. Do you really need to add one more class?
|
||||
* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand.
|
||||
* Don't do later what you can do now. "//TODO: refactor" is not acceptable in new code.
|
||||
* When hesitating between two options, choose the one that is easier to reverse.
|
||||
* "No" is temporary; "Yes" is forever. If you're not sure about a new feature, say no. You can change your mind later.
|
||||
* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable.
|
||||
* The fewer moving parts in a container, the better.
|
||||
* Don't merge it unless you document it.
|
||||
* Don't document it unless you can keep it up-to-date.
|
||||
* Don't merge it unless you test it!
|
||||
* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that.
|
144
vendor/src/github.com/opencontainers/runc/README.md
vendored
144
vendor/src/github.com/opencontainers/runc/README.md
vendored
|
@ -1,144 +0,0 @@
|
|||
[![Build Status](https://jenkins.dockerproject.org/buildStatus/icon?job=runc Master)](https://jenkins.dockerproject.org/job/runc Master)
|
||||
|
||||
## runc
|
||||
|
||||
`runc` is a CLI tool for spawning and running containers according to the OCF specification.
|
||||
|
||||
## State of the project
|
||||
|
||||
Currently `runc` is an implementation of the OCI specification. We are currently sprinting
|
||||
to have a v1 of the spec out. So the `runc` config format will be constantly changing until
|
||||
the spec is finalized. However, we encourage you to try out the tool and give feedback.
|
||||
|
||||
### OCF
|
||||
|
||||
How does `runc` integrate with the Open Container Initiative Specification?
|
||||
`runc` depends on the types specified in the
|
||||
[specs](https://github.com/opencontainers/specs) repository. Whenever the
|
||||
specification is updated and ready to be versioned `runc` will update its dependency
|
||||
on the specs repository and support the update spec.
|
||||
|
||||
### Building:
|
||||
|
||||
At the time of writing, runc only builds on the Linux platform.
|
||||
|
||||
```bash
|
||||
# create a 'github.com/opencontainers' in your GOPATH/src
|
||||
cd github.com/opencontainers
|
||||
git clone https://github.com/opencontainers/runc
|
||||
cd runc
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
In order to enable seccomp support you will need to install libseccomp on your platform.
|
||||
If you do not with to build `runc` with seccomp support you can add `BUILDTAGS=""` when running make.
|
||||
|
||||
#### Build Tags
|
||||
|
||||
`runc` supports optional build tags for compiling in support for various features.
|
||||
|
||||
|
||||
| Build Tag | Feature | Dependency |
|
||||
|-----------|------------------------------------|-------------|
|
||||
| seccomp | Syscall filtering | libseccomp |
|
||||
| selinux | selinux process and mount labeling | <none> |
|
||||
| apparmor | apparmor profile support | libapparmor |
|
||||
|
||||
### Testing:
|
||||
|
||||
You can run tests for runC by using command:
|
||||
|
||||
```bash
|
||||
# make test
|
||||
```
|
||||
|
||||
Note that test cases are run in Docker container, so you need to install
|
||||
`docker` first. And test requires mounting cgroups inside container, it's
|
||||
done by docker now, so you need a docker version newer than 1.8.0-rc2.
|
||||
|
||||
You can also run specific test cases by:
|
||||
|
||||
```bash
|
||||
# make test TESTFLAGS="-run=SomeTestFunction"
|
||||
```
|
||||
|
||||
### Using:
|
||||
|
||||
To run a container with the id "test", execute `runc start` with the containers id as arg one
|
||||
in the bundle's root directory:
|
||||
|
||||
```bash
|
||||
runc start test
|
||||
/ $ ps
|
||||
PID USER COMMAND
|
||||
1 daemon sh
|
||||
5 daemon sh
|
||||
/ $
|
||||
```
|
||||
|
||||
### OCI Container JSON Format:
|
||||
|
||||
OCI container JSON format is based on OCI [specs](https://github.com/opencontainers/specs).
|
||||
You can generate JSON files by using `runc spec`.
|
||||
It assumes that the file-system is found in a directory called
|
||||
`rootfs` and there is a user with uid and gid of `0` defined within that file-system.
|
||||
|
||||
### Examples:
|
||||
|
||||
#### Using a Docker image (requires version 1.3 or later)
|
||||
|
||||
To test using Docker's `busybox` image follow these steps:
|
||||
* Install `docker` and download the `busybox` image: `docker pull busybox`
|
||||
* Create a container from that image and export its contents to a tar file:
|
||||
`docker export $(docker create busybox) > busybox.tar`
|
||||
* Untar the contents to create your filesystem directory:
|
||||
```
|
||||
mkdir rootfs
|
||||
tar -C rootfs -xf busybox.tar
|
||||
```
|
||||
* Create `config.json` by using `runc spec`.
|
||||
* Execute `runc start` and you should be placed into a shell where you can run `ps`:
|
||||
```
|
||||
$ runc start test
|
||||
/ # ps
|
||||
PID USER COMMAND
|
||||
1 root sh
|
||||
9 root ps
|
||||
```
|
||||
|
||||
#### Using runc with systemd
|
||||
|
||||
To use runc with systemd, you can create a unit file
|
||||
`/usr/lib/systemd/system/minecraft.service` as below (edit your
|
||||
own Description or WorkingDirectory or service name as you need).
|
||||
|
||||
```service
|
||||
[Unit]
|
||||
Description=Minecraft Build Server
|
||||
Documentation=http://minecraft.net
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
CPUQuota=200%
|
||||
MemoryLimit=1536M
|
||||
ExecStart=/usr/local/bin/runc start minecraft
|
||||
Restart=on-failure
|
||||
WorkingDirectory=/containers/minecraftbuild
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
Make sure you have the bundle's root directory and JSON configs in
|
||||
your WorkingDirectory, then use systemd commands to start the service:
|
||||
|
||||
```bash
|
||||
systemctl daemon-reload
|
||||
systemctl start minecraft.service
|
||||
```
|
||||
|
||||
Note that if you use JSON configs by `runc spec`, you need to modify
|
||||
`config.json` and change `process.terminal` to false so runc won't
|
||||
create tty, because we can't set terminal from the stdin when using
|
||||
systemd service.
|
|
@ -1,84 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/opencontainers/runc/libcontainer"
|
||||
)
|
||||
|
||||
var checkpointCommand = cli.Command{
|
||||
Name: "checkpoint",
|
||||
Usage: "checkpoint a running container",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{Name: "image-path", Value: "", Usage: "path for saving criu image files"},
|
||||
cli.StringFlag{Name: "work-path", Value: "", Usage: "path for saving work files and logs"},
|
||||
cli.BoolFlag{Name: "leave-running", Usage: "leave the process running after checkpointing"},
|
||||
cli.BoolFlag{Name: "tcp-established", Usage: "allow open tcp connections"},
|
||||
cli.BoolFlag{Name: "ext-unix-sk", Usage: "allow external unix sockets"},
|
||||
cli.BoolFlag{Name: "shell-job", Usage: "allow shell jobs"},
|
||||
cli.StringFlag{Name: "page-server", Value: "", Usage: "ADDRESS:PORT of the page server"},
|
||||
cli.BoolFlag{Name: "file-locks", Usage: "handle file locks, for safety"},
|
||||
cli.StringFlag{Name: "manage-cgroups-mode", Value: "", Usage: "cgroups mode: 'soft' (default), 'full' and 'strict'."},
|
||||
},
|
||||
Action: func(context *cli.Context) {
|
||||
container, err := getContainer(context)
|
||||
if err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
defer destroy(container)
|
||||
options := criuOptions(context)
|
||||
// these are the mandatory criu options for a container
|
||||
setPageServer(context, options)
|
||||
setManageCgroupsMode(context, options)
|
||||
if err := container.Checkpoint(options); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func getCheckpointImagePath(context *cli.Context) string {
|
||||
imagePath := context.String("image-path")
|
||||
if imagePath == "" {
|
||||
imagePath = getDefaultImagePath(context)
|
||||
}
|
||||
return imagePath
|
||||
}
|
||||
|
||||
func setPageServer(context *cli.Context, options *libcontainer.CriuOpts) {
|
||||
// xxx following criu opts are optional
|
||||
// The dump image can be sent to a criu page server
|
||||
if psOpt := context.String("page-server"); psOpt != "" {
|
||||
addressPort := strings.Split(psOpt, ":")
|
||||
if len(addressPort) != 2 {
|
||||
fatal(fmt.Errorf("Use --page-server ADDRESS:PORT to specify page server"))
|
||||
}
|
||||
portInt, err := strconv.Atoi(addressPort[1])
|
||||
if err != nil {
|
||||
fatal(fmt.Errorf("Invalid port number"))
|
||||
}
|
||||
options.PageServer = libcontainer.CriuPageServerInfo{
|
||||
Address: addressPort[0],
|
||||
Port: int32(portInt),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setManageCgroupsMode(context *cli.Context, options *libcontainer.CriuOpts) {
|
||||
if cgOpt := context.String("manage-cgroups-mode"); cgOpt != "" {
|
||||
switch cgOpt {
|
||||
case "soft":
|
||||
options.ManageCgroupsMode = libcontainer.CRIU_CG_MODE_SOFT
|
||||
case "full":
|
||||
options.ManageCgroupsMode = libcontainer.CRIU_CG_MODE_FULL
|
||||
case "strict":
|
||||
options.ManageCgroupsMode = libcontainer.CRIU_CG_MODE_STRICT
|
||||
default:
|
||||
fatal(fmt.Errorf("Invalid manage cgroups mode"))
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
package main
|
||||
|
||||
import "github.com/codegangsta/cli"
|
||||
|
||||
var deleteCommand = cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "delete any resources held by the container often used with detached containers",
|
||||
Action: func(context *cli.Context) {
|
||||
container, err := getContainer(context)
|
||||
if err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
destroy(container)
|
||||
},
|
||||
}
|
|
@ -1,95 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/opencontainers/runc/libcontainer"
|
||||
)
|
||||
|
||||
// event struct for encoding the event data to json.
|
||||
type event struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
Data interface{} `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
var eventsCommand = cli.Command{
|
||||
Name: "events",
|
||||
Usage: "display container events such as OOM notifications, cpu, memory, IO and network stats",
|
||||
Flags: []cli.Flag{
|
||||
cli.DurationFlag{Name: "interval", Value: 5 * time.Second, Usage: "set the stats collection interval"},
|
||||
cli.BoolFlag{Name: "stats", Usage: "display the container's stats then exit"},
|
||||
},
|
||||
Action: func(context *cli.Context) {
|
||||
container, err := getContainer(context)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
var (
|
||||
stats = make(chan *libcontainer.Stats, 1)
|
||||
events = make(chan *event, 1024)
|
||||
group = &sync.WaitGroup{}
|
||||
)
|
||||
group.Add(1)
|
||||
go func() {
|
||||
defer group.Done()
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
for e := range events {
|
||||
if err := enc.Encode(e); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
if context.Bool("stats") {
|
||||
s, err := container.Stats()
|
||||
if err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
events <- &event{Type: "stats", ID: container.ID(), Data: s}
|
||||
close(events)
|
||||
group.Wait()
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
for range time.Tick(context.Duration("interval")) {
|
||||
s, err := container.Stats()
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
stats <- s
|
||||
}
|
||||
}()
|
||||
n, err := container.NotifyOOM()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case _, ok := <-n:
|
||||
if ok {
|
||||
// this means an oom event was received, if it is !ok then
|
||||
// the channel was closed because the container stopped and
|
||||
// the cgroups no longer exist.
|
||||
events <- &event{Type: "oom", ID: container.ID()}
|
||||
} else {
|
||||
n = nil
|
||||
}
|
||||
case s := <-stats:
|
||||
events <- &event{Type: "stats", ID: container.ID(), Data: s}
|
||||
}
|
||||
if n == nil {
|
||||
close(events)
|
||||
break
|
||||
}
|
||||
}
|
||||
group.Wait()
|
||||
},
|
||||
}
|
140
vendor/src/github.com/opencontainers/runc/exec.go
vendored
140
vendor/src/github.com/opencontainers/runc/exec.go
vendored
|
@ -1,140 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/opencontainers/specs"
|
||||
)
|
||||
|
||||
var execCommand = cli.Command{
|
||||
Name: "exec",
|
||||
Usage: "execute new process inside the container",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "console",
|
||||
Usage: "specify the pty slave path for use with the container",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cwd",
|
||||
Usage: "current working directory in the container",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "env, e",
|
||||
Usage: "set environment variables",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "tty, t",
|
||||
Usage: "allocate a pseudo-TTY",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "user, u",
|
||||
Usage: "UID (format: <uid>[:<gid>])",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "process,p",
|
||||
Usage: "path to the process.json",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "detach,d",
|
||||
Usage: "detach from the container's process",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "pid-file",
|
||||
Value: "",
|
||||
Usage: "specify the file to write the process id to",
|
||||
},
|
||||
},
|
||||
Action: func(context *cli.Context) {
|
||||
if os.Geteuid() != 0 {
|
||||
logrus.Fatal("runc should be run as root")
|
||||
}
|
||||
status, err := execProcess(context)
|
||||
if err != nil {
|
||||
logrus.Fatalf("exec failed: %v", err)
|
||||
}
|
||||
os.Exit(status)
|
||||
},
|
||||
}
|
||||
|
||||
func execProcess(context *cli.Context) (int, error) {
|
||||
container, err := getContainer(context)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
var (
|
||||
detach = context.Bool("detach")
|
||||
rootfs = container.Config().Rootfs
|
||||
)
|
||||
|
||||
p, err := getProcess(context, path.Dir(rootfs))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return runProcess(container, p, nil, context.String("console"), context.String("pid-file"), detach)
|
||||
|
||||
}
|
||||
|
||||
func getProcess(context *cli.Context, bundle string) (*specs.Process, error) {
|
||||
if path := context.String("process"); path != "" {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
var p specs.Process
|
||||
if err := json.NewDecoder(f).Decode(&p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &p, nil
|
||||
}
|
||||
// process via cli flags
|
||||
if err := os.Chdir(bundle); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
spec, err := loadSpec(specConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := spec.Process
|
||||
p.Args = context.Args()[1:]
|
||||
// override the cwd, if passed
|
||||
if context.String("cwd") != "" {
|
||||
p.Cwd = context.String("cwd")
|
||||
}
|
||||
// append the passed env variables
|
||||
for _, e := range context.StringSlice("env") {
|
||||
p.Env = append(p.Env, e)
|
||||
}
|
||||
// set the tty
|
||||
if context.IsSet("tty") {
|
||||
p.Terminal = context.Bool("tty")
|
||||
}
|
||||
// override the user, if passed
|
||||
if context.String("user") != "" {
|
||||
u := strings.SplitN(context.String("user"), ":", 2)
|
||||
if len(u) > 1 {
|
||||
gid, err := strconv.Atoi(u[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing %s as int for gid failed: %v", u[1], err)
|
||||
}
|
||||
p.User.GID = uint32(gid)
|
||||
}
|
||||
uid, err := strconv.Atoi(u[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing %s as int for uid failed: %v", u[0], err)
|
||||
}
|
||||
p.User.UID = uint32(uid)
|
||||
}
|
||||
return &p, nil
|
||||
}
|
|
@ -1,87 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
var signalMap = map[string]syscall.Signal{
|
||||
"ABRT": syscall.SIGABRT,
|
||||
"ALRM": syscall.SIGALRM,
|
||||
"BUS": syscall.SIGBUS,
|
||||
"CHLD": syscall.SIGCHLD,
|
||||
"CLD": syscall.SIGCLD,
|
||||
"CONT": syscall.SIGCONT,
|
||||
"FPE": syscall.SIGFPE,
|
||||
"HUP": syscall.SIGHUP,
|
||||
"ILL": syscall.SIGILL,
|
||||
"INT": syscall.SIGINT,
|
||||
"IO": syscall.SIGIO,
|
||||
"IOT": syscall.SIGIOT,
|
||||
"KILL": syscall.SIGKILL,
|
||||
"PIPE": syscall.SIGPIPE,
|
||||
"POLL": syscall.SIGPOLL,
|
||||
"PROF": syscall.SIGPROF,
|
||||
"PWR": syscall.SIGPWR,
|
||||
"QUIT": syscall.SIGQUIT,
|
||||
"SEGV": syscall.SIGSEGV,
|
||||
"STKFLT": syscall.SIGSTKFLT,
|
||||
"STOP": syscall.SIGSTOP,
|
||||
"SYS": syscall.SIGSYS,
|
||||
"TERM": syscall.SIGTERM,
|
||||
"TRAP": syscall.SIGTRAP,
|
||||
"TSTP": syscall.SIGTSTP,
|
||||
"TTIN": syscall.SIGTTIN,
|
||||
"TTOU": syscall.SIGTTOU,
|
||||
"UNUSED": syscall.SIGUNUSED,
|
||||
"URG": syscall.SIGURG,
|
||||
"USR1": syscall.SIGUSR1,
|
||||
"USR2": syscall.SIGUSR2,
|
||||
"VTALRM": syscall.SIGVTALRM,
|
||||
"WINCH": syscall.SIGWINCH,
|
||||
"XCPU": syscall.SIGXCPU,
|
||||
"XFSZ": syscall.SIGXFSZ,
|
||||
}
|
||||
|
||||
var killCommand = cli.Command{
|
||||
Name: "kill",
|
||||
Usage: "kill sends the specified signal (default: SIGTERM) to the container's init process",
|
||||
Action: func(context *cli.Context) {
|
||||
container, err := getContainer(context)
|
||||
if err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
sigstr := context.Args().Get(1)
|
||||
if sigstr == "" {
|
||||
sigstr = "SIGTERM"
|
||||
}
|
||||
|
||||
signal, err := parseSignal(sigstr)
|
||||
if err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
if err := container.Signal(signal); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func parseSignal(rawSignal string) (syscall.Signal, error) {
|
||||
s, err := strconv.Atoi(rawSignal)
|
||||
if err == nil {
|
||||
return syscall.Signal(s), nil
|
||||
}
|
||||
signal, ok := signalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]
|
||||
if !ok {
|
||||
return -1, fmt.Errorf("unknown signal %q", rawSignal)
|
||||
}
|
||||
return signal, nil
|
||||
}
|
|
@ -76,7 +76,7 @@ config := &configs.Config{
|
|||
Name: "test-container",
|
||||
Parent: "system",
|
||||
Resources: &configs.Resources{
|
||||
MemorySwappiness: -1,
|
||||
MemorySwappiness: nil,
|
||||
AllowAllDevices: false,
|
||||
AllowedDevices: configs.DefaultAllowedDevices,
|
||||
},
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package cgroups
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseCgroups(t *testing.T) {
|
||||
cgroups, err := ParseCgroupFile("/proc/self/cgroup")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, ok := cgroups["cpu"]; !ok {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
|
@ -14,6 +14,7 @@ import (
|
|||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -30,6 +31,7 @@ var (
|
|||
&NetPrioGroup{},
|
||||
&PerfEventGroup{},
|
||||
&FreezerGroup{},
|
||||
&NameGroup{GroupName: "name=systemd", Join: true},
|
||||
}
|
||||
CgroupProcesses = "cgroup.procs"
|
||||
HugePageSizes, _ = cgroups.GetHugePageSize()
|
||||
|
@ -128,12 +130,9 @@ func (m *Manager) Apply(pid int) (err error) {
|
|||
return cgroups.EnterPid(m.Paths, pid)
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
paths := make(map[string]string)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cgroups.RemovePaths(paths)
|
||||
}
|
||||
}()
|
||||
for _, sys := range subsystems {
|
||||
if err := sys.Apply(d); err != nil {
|
||||
return err
|
||||
|
@ -276,14 +275,19 @@ func getCgroupData(c *configs.Cgroup, pid int) (*cgroupData, error) {
|
|||
return nil, fmt.Errorf("cgroup: either Path or Name and Parent should be used")
|
||||
}
|
||||
|
||||
innerPath := c.Path
|
||||
// XXX: Do not remove this code. Path safety is important! -- cyphar
|
||||
cgPath := libcontainerUtils.CleanPath(c.Path)
|
||||
cgParent := libcontainerUtils.CleanPath(c.Parent)
|
||||
cgName := libcontainerUtils.CleanPath(c.Name)
|
||||
|
||||
innerPath := cgPath
|
||||
if innerPath == "" {
|
||||
innerPath = filepath.Join(c.Parent, c.Name)
|
||||
innerPath = filepath.Join(cgParent, cgName)
|
||||
}
|
||||
|
||||
return &cgroupData{
|
||||
root: root,
|
||||
innerPath: c.Path,
|
||||
innerPath: innerPath,
|
||||
config: c,
|
||||
pid: pid,
|
||||
}, nil
|
||||
|
|
|
@ -1,636 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
const (
|
||||
sectorsRecursiveContents = `8:0 1024`
|
||||
serviceBytesRecursiveContents = `8:0 Read 100
|
||||
8:0 Write 200
|
||||
8:0 Sync 300
|
||||
8:0 Async 500
|
||||
8:0 Total 500
|
||||
Total 500`
|
||||
servicedRecursiveContents = `8:0 Read 10
|
||||
8:0 Write 40
|
||||
8:0 Sync 20
|
||||
8:0 Async 30
|
||||
8:0 Total 50
|
||||
Total 50`
|
||||
queuedRecursiveContents = `8:0 Read 1
|
||||
8:0 Write 4
|
||||
8:0 Sync 2
|
||||
8:0 Async 3
|
||||
8:0 Total 5
|
||||
Total 5`
|
||||
serviceTimeRecursiveContents = `8:0 Read 173959
|
||||
8:0 Write 0
|
||||
8:0 Sync 0
|
||||
8:0 Async 173959
|
||||
8:0 Total 17395
|
||||
Total 17395`
|
||||
waitTimeRecursiveContents = `8:0 Read 15571
|
||||
8:0 Write 0
|
||||
8:0 Sync 0
|
||||
8:0 Async 15571
|
||||
8:0 Total 15571`
|
||||
mergedRecursiveContents = `8:0 Read 5
|
||||
8:0 Write 10
|
||||
8:0 Sync 0
|
||||
8:0 Async 0
|
||||
8:0 Total 15
|
||||
Total 15`
|
||||
timeRecursiveContents = `8:0 8`
|
||||
throttleServiceBytes = `8:0 Read 11030528
|
||||
8:0 Write 23
|
||||
8:0 Sync 42
|
||||
8:0 Async 11030528
|
||||
8:0 Total 11030528
|
||||
252:0 Read 11030528
|
||||
252:0 Write 23
|
||||
252:0 Sync 42
|
||||
252:0 Async 11030528
|
||||
252:0 Total 11030528
|
||||
Total 22061056`
|
||||
throttleServiced = `8:0 Read 164
|
||||
8:0 Write 23
|
||||
8:0 Sync 42
|
||||
8:0 Async 164
|
||||
8:0 Total 164
|
||||
252:0 Read 164
|
||||
252:0 Write 23
|
||||
252:0 Sync 42
|
||||
252:0 Async 164
|
||||
252:0 Total 164
|
||||
Total 328`
|
||||
)
|
||||
|
||||
func appendBlkioStatEntry(blkioStatEntries *[]cgroups.BlkioStatEntry, major, minor, value uint64, op string) {
|
||||
*blkioStatEntries = append(*blkioStatEntries, cgroups.BlkioStatEntry{Major: major, Minor: minor, Value: value, Op: op})
|
||||
}
|
||||
|
||||
func TestBlkioSetWeight(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
weightBefore = 100
|
||||
weightAfter = 200
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.weight": strconv.Itoa(weightBefore),
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.BlkioWeight = weightAfter
|
||||
blkio := &BlkioGroup{}
|
||||
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamUint(helper.CgroupPath, "blkio.weight")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse blkio.weight - %s", err)
|
||||
}
|
||||
|
||||
if value != weightAfter {
|
||||
t.Fatal("Got the wrong value, set blkio.weight failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioSetWeightDevice(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
weightDeviceBefore = "8:0 400"
|
||||
)
|
||||
|
||||
wd := configs.NewWeightDevice(8, 0, 500, 0)
|
||||
weightDeviceAfter := wd.WeightString()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.weight_device": weightDeviceBefore,
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.BlkioWeightDevice = []*configs.WeightDevice{wd}
|
||||
blkio := &BlkioGroup{}
|
||||
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "blkio.weight_device")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse blkio.weight_device - %s", err)
|
||||
}
|
||||
|
||||
if value != weightDeviceAfter {
|
||||
t.Fatal("Got the wrong value, set blkio.weight_device failed.")
|
||||
}
|
||||
}
|
||||
|
||||
// regression #274
|
||||
func TestBlkioSetMultipleWeightDevice(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
weightDeviceBefore = "8:0 400"
|
||||
)
|
||||
|
||||
wd1 := configs.NewWeightDevice(8, 0, 500, 0)
|
||||
wd2 := configs.NewWeightDevice(8, 16, 500, 0)
|
||||
// we cannot actually set and check both because normal ioutil.WriteFile
|
||||
// when writing to cgroup file will overwrite the whole file content instead
|
||||
// of updating it as the kernel is doing. Just check the second device
|
||||
// is present will suffice for the test to ensure multiple writes are done.
|
||||
weightDeviceAfter := wd2.WeightString()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.weight_device": weightDeviceBefore,
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.BlkioWeightDevice = []*configs.WeightDevice{wd1, wd2}
|
||||
blkio := &BlkioGroup{}
|
||||
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "blkio.weight_device")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse blkio.weight_device - %s", err)
|
||||
}
|
||||
|
||||
if value != weightDeviceAfter {
|
||||
t.Fatal("Got the wrong value, set blkio.weight_device failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStats(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify expected stats.
|
||||
expectedStats := cgroups.BlkioStats{}
|
||||
appendBlkioStatEntry(&expectedStats.SectorsRecursive, 8, 0, 1024, "")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 100, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 200, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 300, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Total")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 10, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 40, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 20, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 30, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 50, "Total")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 1, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 4, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 2, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 3, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 5, "Total")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 17395, "Total")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Total")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 5, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 10, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 15, "Total")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoTimeRecursive, 8, 0, 8, "")
|
||||
|
||||
expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoSectorsFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoServiceBytesFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoServicedFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoQueuedFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoServiceTimeFile(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoWaitTimeFile(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoMergedFile(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoTimeFile(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsUnexpectedNumberOfFields(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": "8:0 Read 100 100",
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected to fail, but did not")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsUnexpectedFieldType(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": "8:0 Read Write",
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected to fail, but did not")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNonCFQBlkioStats(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": "",
|
||||
"blkio.io_serviced_recursive": "",
|
||||
"blkio.io_queued_recursive": "",
|
||||
"blkio.sectors_recursive": "",
|
||||
"blkio.io_service_time_recursive": "",
|
||||
"blkio.io_wait_time_recursive": "",
|
||||
"blkio.io_merged_recursive": "",
|
||||
"blkio.time_recursive": "",
|
||||
"blkio.throttle.io_service_bytes": throttleServiceBytes,
|
||||
"blkio.throttle.io_serviced": throttleServiced,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify expected stats.
|
||||
expectedStats := cgroups.BlkioStats{}
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 23, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 42, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Total")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 23, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 42, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Total")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 23, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 42, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Total")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 23, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 42, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Total")
|
||||
|
||||
expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
|
||||
}
|
||||
|
||||
func TestBlkioSetThrottleReadBpsDevice(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
throttleBefore = `8:0 1024`
|
||||
)
|
||||
|
||||
td := configs.NewThrottleDevice(8, 0, 2048)
|
||||
throttleAfter := td.String()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.throttle.read_bps_device": throttleBefore,
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.BlkioThrottleReadBpsDevice = []*configs.ThrottleDevice{td}
|
||||
blkio := &BlkioGroup{}
|
||||
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.read_bps_device")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse blkio.throttle.read_bps_device - %s", err)
|
||||
}
|
||||
|
||||
if value != throttleAfter {
|
||||
t.Fatal("Got the wrong value, set blkio.throttle.read_bps_device failed.")
|
||||
}
|
||||
}
|
||||
func TestBlkioSetThrottleWriteBpsDevice(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
throttleBefore = `8:0 1024`
|
||||
)
|
||||
|
||||
td := configs.NewThrottleDevice(8, 0, 2048)
|
||||
throttleAfter := td.String()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.throttle.write_bps_device": throttleBefore,
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.BlkioThrottleWriteBpsDevice = []*configs.ThrottleDevice{td}
|
||||
blkio := &BlkioGroup{}
|
||||
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.write_bps_device")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse blkio.throttle.write_bps_device - %s", err)
|
||||
}
|
||||
|
||||
if value != throttleAfter {
|
||||
t.Fatal("Got the wrong value, set blkio.throttle.write_bps_device failed.")
|
||||
}
|
||||
}
|
||||
func TestBlkioSetThrottleReadIOpsDevice(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
throttleBefore = `8:0 1024`
|
||||
)
|
||||
|
||||
td := configs.NewThrottleDevice(8, 0, 2048)
|
||||
throttleAfter := td.String()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.throttle.read_iops_device": throttleBefore,
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.BlkioThrottleReadIOPSDevice = []*configs.ThrottleDevice{td}
|
||||
blkio := &BlkioGroup{}
|
||||
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.read_iops_device")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse blkio.throttle.read_iops_device - %s", err)
|
||||
}
|
||||
|
||||
if value != throttleAfter {
|
||||
t.Fatal("Got the wrong value, set blkio.throttle.read_iops_device failed.")
|
||||
}
|
||||
}
|
||||
func TestBlkioSetThrottleWriteIOpsDevice(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
throttleBefore = `8:0 1024`
|
||||
)
|
||||
|
||||
td := configs.NewThrottleDevice(8, 0, 2048)
|
||||
throttleAfter := td.String()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.throttle.write_iops_device": throttleBefore,
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.BlkioThrottleWriteIOPSDevice = []*configs.ThrottleDevice{td}
|
||||
blkio := &BlkioGroup{}
|
||||
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.write_iops_device")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse blkio.throttle.write_iops_device - %s", err)
|
||||
}
|
||||
|
||||
if value != throttleAfter {
|
||||
t.Fatal("Got the wrong value, set blkio.throttle.write_iops_device failed.")
|
||||
}
|
||||
}
|
|
@ -1,163 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
func TestCpuSetShares(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("cpu", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
sharesBefore = 1024
|
||||
sharesAfter = 512
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"cpu.shares": strconv.Itoa(sharesBefore),
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.CpuShares = sharesAfter
|
||||
cpu := &CpuGroup{}
|
||||
if err := cpu.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamUint(helper.CgroupPath, "cpu.shares")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse cpu.shares - %s", err)
|
||||
}
|
||||
|
||||
if value != sharesAfter {
|
||||
t.Fatal("Got the wrong value, set cpu.shares failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCpuSetBandWidth(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("cpu", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
quotaBefore = 8000
|
||||
quotaAfter = 5000
|
||||
periodBefore = 10000
|
||||
periodAfter = 7000
|
||||
rtRuntimeBefore = 8000
|
||||
rtRuntimeAfter = 5000
|
||||
rtPeriodBefore = 10000
|
||||
rtPeriodAfter = 7000
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"cpu.cfs_quota_us": strconv.Itoa(quotaBefore),
|
||||
"cpu.cfs_period_us": strconv.Itoa(periodBefore),
|
||||
"cpu.rt_runtime_us": strconv.Itoa(rtRuntimeBefore),
|
||||
"cpu.rt_period_us": strconv.Itoa(rtPeriodBefore),
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.CpuQuota = quotaAfter
|
||||
helper.CgroupData.config.Resources.CpuPeriod = periodAfter
|
||||
helper.CgroupData.config.Resources.CpuRtRuntime = rtRuntimeAfter
|
||||
helper.CgroupData.config.Resources.CpuRtPeriod = rtPeriodAfter
|
||||
cpu := &CpuGroup{}
|
||||
if err := cpu.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
quota, err := getCgroupParamUint(helper.CgroupPath, "cpu.cfs_quota_us")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse cpu.cfs_quota_us - %s", err)
|
||||
}
|
||||
if quota != quotaAfter {
|
||||
t.Fatal("Got the wrong value, set cpu.cfs_quota_us failed.")
|
||||
}
|
||||
|
||||
period, err := getCgroupParamUint(helper.CgroupPath, "cpu.cfs_period_us")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse cpu.cfs_period_us - %s", err)
|
||||
}
|
||||
if period != periodAfter {
|
||||
t.Fatal("Got the wrong value, set cpu.cfs_period_us failed.")
|
||||
}
|
||||
rtRuntime, err := getCgroupParamUint(helper.CgroupPath, "cpu.rt_runtime_us")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse cpu.rt_runtime_us - %s", err)
|
||||
}
|
||||
if rtRuntime != rtRuntimeAfter {
|
||||
t.Fatal("Got the wrong value, set cpu.rt_runtime_us failed.")
|
||||
}
|
||||
rtPeriod, err := getCgroupParamUint(helper.CgroupPath, "cpu.rt_period_us")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse cpu.rt_period_us - %s", err)
|
||||
}
|
||||
if rtPeriod != rtPeriodAfter {
|
||||
t.Fatal("Got the wrong value, set cpu.rt_period_us failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCpuStats(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("cpu", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
kNrPeriods = 2000
|
||||
kNrThrottled = 200
|
||||
kThrottledTime = uint64(18446744073709551615)
|
||||
)
|
||||
|
||||
cpuStatContent := fmt.Sprintf("nr_periods %d\n nr_throttled %d\n throttled_time %d\n",
|
||||
kNrPeriods, kNrThrottled, kThrottledTime)
|
||||
helper.writeFileContents(map[string]string{
|
||||
"cpu.stat": cpuStatContent,
|
||||
})
|
||||
|
||||
cpu := &CpuGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := cpu.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedStats := cgroups.ThrottlingData{
|
||||
Periods: kNrPeriods,
|
||||
ThrottledPeriods: kNrThrottled,
|
||||
ThrottledTime: kThrottledTime}
|
||||
|
||||
expectThrottlingDataEquals(t, expectedStats, actualStats.CpuStats.ThrottlingData)
|
||||
}
|
||||
|
||||
func TestNoCpuStatFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("cpu", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
cpu := &CpuGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := cpu.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal("Expected not to fail, but did")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidCpuStat(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("cpu", t)
|
||||
defer helper.cleanup()
|
||||
cpuStatContent := `nr_periods 2000
|
||||
nr_throttled 200
|
||||
throttled_time fortytwo`
|
||||
helper.writeFileContents(map[string]string{
|
||||
"cpu.stat": cpuStatContent,
|
||||
})
|
||||
|
||||
cpu := &CpuGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := cpu.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failed stat parsing.")
|
||||
}
|
||||
}
|
|
@ -1,65 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCpusetSetCpus(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("cpuset", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
cpusBefore = "0"
|
||||
cpusAfter = "1-3"
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"cpuset.cpus": cpusBefore,
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.CpusetCpus = cpusAfter
|
||||
cpuset := &CpusetGroup{}
|
||||
if err := cpuset.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "cpuset.cpus")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse cpuset.cpus - %s", err)
|
||||
}
|
||||
|
||||
if value != cpusAfter {
|
||||
t.Fatal("Got the wrong value, set cpuset.cpus failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCpusetSetMems(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("cpuset", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
memsBefore = "0"
|
||||
memsAfter = "1"
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"cpuset.mems": memsBefore,
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.CpusetMems = memsAfter
|
||||
cpuset := &CpusetGroup{}
|
||||
if err := cpuset.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "cpuset.mems")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse cpuset.mems - %s", err)
|
||||
}
|
||||
|
||||
if value != memsAfter {
|
||||
t.Fatal("Got the wrong value, set cpuset.mems failed.")
|
||||
}
|
||||
}
|
|
@ -5,6 +5,7 @@ package fs
|
|||
import (
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
)
|
||||
|
||||
type DevicesGroup struct {
|
||||
|
@ -25,6 +26,10 @@ func (s *DevicesGroup) Apply(d *cgroupData) error {
|
|||
}
|
||||
|
||||
func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
if system.RunningInUserNS() {
|
||||
return nil
|
||||
}
|
||||
|
||||
devices := cgroup.Resources.Devices
|
||||
if len(devices) > 0 {
|
||||
for _, dev := range devices {
|
||||
|
|
|
@ -1,84 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
var (
|
||||
allowedDevices = []*configs.Device{
|
||||
{
|
||||
Path: "/dev/zero",
|
||||
Type: 'c',
|
||||
Major: 1,
|
||||
Minor: 5,
|
||||
Permissions: "rwm",
|
||||
FileMode: 0666,
|
||||
},
|
||||
}
|
||||
allowedList = "c 1:5 rwm"
|
||||
deniedDevices = []*configs.Device{
|
||||
{
|
||||
Path: "/dev/null",
|
||||
Type: 'c',
|
||||
Major: 1,
|
||||
Minor: 3,
|
||||
Permissions: "rwm",
|
||||
FileMode: 0666,
|
||||
},
|
||||
}
|
||||
deniedList = "c 1:3 rwm"
|
||||
)
|
||||
|
||||
func TestDevicesSetAllow(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("devices", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"devices.deny": "a",
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.AllowAllDevices = false
|
||||
helper.CgroupData.config.Resources.AllowedDevices = allowedDevices
|
||||
devices := &DevicesGroup{}
|
||||
if err := devices.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "devices.allow")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse devices.allow - %s", err)
|
||||
}
|
||||
|
||||
if value != allowedList {
|
||||
t.Fatal("Got the wrong value, set devices.allow failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDevicesSetDeny(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("devices", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"devices.allow": "a",
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.AllowAllDevices = true
|
||||
helper.CgroupData.config.Resources.DeniedDevices = deniedDevices
|
||||
devices := &DevicesGroup{}
|
||||
if err := devices.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "devices.deny")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse devices.deny - %s", err)
|
||||
}
|
||||
|
||||
if value != deniedList {
|
||||
t.Fatal("Got the wrong value, set devices.deny failed.")
|
||||
}
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
func TestFreezerSetState(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("freezer", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"freezer.state": string(configs.Frozen),
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.Freezer = configs.Thawed
|
||||
freezer := &FreezerGroup{}
|
||||
if err := freezer.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "freezer.state")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse freezer.state - %s", err)
|
||||
}
|
||||
if value != string(configs.Thawed) {
|
||||
t.Fatal("Got the wrong value, set freezer.state failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFreezerSetInvalidState(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("freezer", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
invalidArg configs.FreezerState = "Invalid"
|
||||
)
|
||||
|
||||
helper.CgroupData.config.Resources.Freezer = invalidArg
|
||||
freezer := &FreezerGroup{}
|
||||
if err := freezer.Set(helper.CgroupPath, helper.CgroupData.config); err == nil {
|
||||
t.Fatal("Failed to return invalid argument error")
|
||||
}
|
||||
}
|
|
@ -1,154 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
const (
|
||||
hugetlbUsageContents = "128\n"
|
||||
hugetlbMaxUsageContents = "256\n"
|
||||
hugetlbFailcnt = "100\n"
|
||||
)
|
||||
|
||||
var (
|
||||
usage = "hugetlb.%s.usage_in_bytes"
|
||||
limit = "hugetlb.%s.limit_in_bytes"
|
||||
maxUsage = "hugetlb.%s.max_usage_in_bytes"
|
||||
failcnt = "hugetlb.%s.failcnt"
|
||||
)
|
||||
|
||||
func TestHugetlbSetHugetlb(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("hugetlb", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
hugetlbBefore = 256
|
||||
hugetlbAfter = 512
|
||||
)
|
||||
|
||||
for _, pageSize := range HugePageSizes {
|
||||
helper.writeFileContents(map[string]string{
|
||||
fmt.Sprintf(limit, pageSize): strconv.Itoa(hugetlbBefore),
|
||||
})
|
||||
}
|
||||
|
||||
for _, pageSize := range HugePageSizes {
|
||||
helper.CgroupData.config.Resources.HugetlbLimit = []*configs.HugepageLimit{
|
||||
{
|
||||
Pagesize: pageSize,
|
||||
Limit: hugetlbAfter,
|
||||
},
|
||||
}
|
||||
hugetlb := &HugetlbGroup{}
|
||||
if err := hugetlb.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pageSize := range HugePageSizes {
|
||||
limit := fmt.Sprintf(limit, pageSize)
|
||||
value, err := getCgroupParamUint(helper.CgroupPath, limit)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse %s - %s", limit, err)
|
||||
}
|
||||
if value != hugetlbAfter {
|
||||
t.Fatalf("Set hugetlb.limit_in_bytes failed. Expected: %v, Got: %v", hugetlbAfter, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHugetlbStats(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("hugetlb", t)
|
||||
defer helper.cleanup()
|
||||
for _, pageSize := range HugePageSizes {
|
||||
helper.writeFileContents(map[string]string{
|
||||
fmt.Sprintf(usage, pageSize): hugetlbUsageContents,
|
||||
fmt.Sprintf(maxUsage, pageSize): hugetlbMaxUsageContents,
|
||||
fmt.Sprintf(failcnt, pageSize): hugetlbFailcnt,
|
||||
})
|
||||
}
|
||||
|
||||
hugetlb := &HugetlbGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expectedStats := cgroups.HugetlbStats{Usage: 128, MaxUsage: 256, Failcnt: 100}
|
||||
for _, pageSize := range HugePageSizes {
|
||||
expectHugetlbStatEquals(t, expectedStats, actualStats.HugetlbStats[pageSize])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHugetlbStatsNoUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("hugetlb", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
maxUsage: hugetlbMaxUsageContents,
|
||||
})
|
||||
|
||||
hugetlb := &HugetlbGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHugetlbStatsNoMaxUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("hugetlb", t)
|
||||
defer helper.cleanup()
|
||||
for _, pageSize := range HugePageSizes {
|
||||
helper.writeFileContents(map[string]string{
|
||||
fmt.Sprintf(usage, pageSize): hugetlbUsageContents,
|
||||
})
|
||||
}
|
||||
|
||||
hugetlb := &HugetlbGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHugetlbStatsBadUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("hugetlb", t)
|
||||
defer helper.cleanup()
|
||||
for _, pageSize := range HugePageSizes {
|
||||
helper.writeFileContents(map[string]string{
|
||||
fmt.Sprintf(usage, pageSize): "bad",
|
||||
maxUsage: hugetlbMaxUsageContents,
|
||||
})
|
||||
}
|
||||
|
||||
hugetlb := &HugetlbGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHugetlbStatsBadMaxUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("hugetlb", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
usage: hugetlbUsageContents,
|
||||
maxUsage: "bad",
|
||||
})
|
||||
|
||||
hugetlb := &HugetlbGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
|
@ -86,14 +86,14 @@ func (s *MemoryGroup) Set(path string, cgroup *configs.Cgroup) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.Resources.MemorySwappiness >= 0 && cgroup.Resources.MemorySwappiness <= 100 {
|
||||
if err := writeFile(path, "memory.swappiness", strconv.FormatInt(cgroup.Resources.MemorySwappiness, 10)); err != nil {
|
||||
if cgroup.Resources.MemorySwappiness == nil || int64(*cgroup.Resources.MemorySwappiness) == -1 {
|
||||
return nil
|
||||
} else if int64(*cgroup.Resources.MemorySwappiness) >= 0 && int64(*cgroup.Resources.MemorySwappiness) <= 100 {
|
||||
if err := writeFile(path, "memory.swappiness", strconv.FormatInt(*cgroup.Resources.MemorySwappiness, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if cgroup.Resources.MemorySwappiness == -1 {
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("invalid value:%d. valid memory swappiness range is 0-100", cgroup.Resources.MemorySwappiness)
|
||||
return fmt.Errorf("invalid value:%d. valid memory swappiness range is 0-100", int64(*cgroup.Resources.MemorySwappiness))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -149,7 +149,7 @@ func memoryAssigned(cgroup *configs.Cgroup) bool {
|
|||
cgroup.Resources.MemorySwap > 0 ||
|
||||
cgroup.Resources.KernelMemory > 0 ||
|
||||
cgroup.Resources.OomKillDisable ||
|
||||
cgroup.Resources.MemorySwappiness != -1
|
||||
(cgroup.Resources.MemorySwappiness != nil && *cgroup.Resources.MemorySwappiness != -1)
|
||||
}
|
||||
|
||||
func getMemoryData(path, name string) (cgroups.MemoryData, error) {
|
||||
|
|
|
@ -1,339 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
const (
|
||||
memoryStatContents = `cache 512
|
||||
rss 1024`
|
||||
memoryUsageContents = "2048\n"
|
||||
memoryMaxUsageContents = "4096\n"
|
||||
memoryFailcnt = "100\n"
|
||||
memoryLimitContents = "8192\n"
|
||||
)
|
||||
|
||||
func TestMemorySetMemory(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
memoryBefore = 314572800 // 300M
|
||||
memoryAfter = 524288000 // 500M
|
||||
reservationBefore = 209715200 // 200M
|
||||
reservationAfter = 314572800 // 300M
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.limit_in_bytes": strconv.Itoa(memoryBefore),
|
||||
"memory.soft_limit_in_bytes": strconv.Itoa(reservationBefore),
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.Memory = memoryAfter
|
||||
helper.CgroupData.config.Resources.MemoryReservation = reservationAfter
|
||||
memory := &MemoryGroup{}
|
||||
if err := memory.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamUint(helper.CgroupPath, "memory.limit_in_bytes")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse memory.limit_in_bytes - %s", err)
|
||||
}
|
||||
if value != memoryAfter {
|
||||
t.Fatal("Got the wrong value, set memory.limit_in_bytes failed.")
|
||||
}
|
||||
|
||||
value, err = getCgroupParamUint(helper.CgroupPath, "memory.soft_limit_in_bytes")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse memory.soft_limit_in_bytes - %s", err)
|
||||
}
|
||||
if value != reservationAfter {
|
||||
t.Fatal("Got the wrong value, set memory.soft_limit_in_bytes failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemorySetMemoryswap(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
memoryswapBefore = 314572800 // 300M
|
||||
memoryswapAfter = 524288000 // 500M
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.memsw.limit_in_bytes": strconv.Itoa(memoryswapBefore),
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.MemorySwap = memoryswapAfter
|
||||
memory := &MemoryGroup{}
|
||||
if err := memory.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamUint(helper.CgroupPath, "memory.memsw.limit_in_bytes")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse memory.memsw.limit_in_bytes - %s", err)
|
||||
}
|
||||
if value != memoryswapAfter {
|
||||
t.Fatal("Got the wrong value, set memory.memsw.limit_in_bytes failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemorySetKernelMemory(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
kernelMemoryBefore = 314572800 // 300M
|
||||
kernelMemoryAfter = 524288000 // 500M
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.kmem.limit_in_bytes": strconv.Itoa(kernelMemoryBefore),
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.KernelMemory = kernelMemoryAfter
|
||||
memory := &MemoryGroup{}
|
||||
if err := memory.SetKernelMemory(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamUint(helper.CgroupPath, "memory.kmem.limit_in_bytes")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse memory.kmem.limit_in_bytes - %s", err)
|
||||
}
|
||||
if value != kernelMemoryAfter {
|
||||
t.Fatal("Got the wrong value, set memory.kmem.limit_in_bytes failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemorySetMemorySwappinessDefault(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
swappinessBefore = 60 //deafult is 60
|
||||
swappinessAfter = 0
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.swappiness": strconv.Itoa(swappinessBefore),
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.Memory = swappinessAfter
|
||||
memory := &MemoryGroup{}
|
||||
if err := memory.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamUint(helper.CgroupPath, "memory.swappiness")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse memory.swappiness - %s", err)
|
||||
}
|
||||
if value != swappinessAfter {
|
||||
t.Fatal("Got the wrong value, set memory.swappiness failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStats(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": memoryStatContents,
|
||||
"memory.usage_in_bytes": memoryUsageContents,
|
||||
"memory.limit_in_bytes": memoryLimitContents,
|
||||
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
"memory.failcnt": memoryFailcnt,
|
||||
"memory.memsw.usage_in_bytes": memoryUsageContents,
|
||||
"memory.memsw.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
"memory.memsw.failcnt": memoryFailcnt,
|
||||
"memory.memsw.limit_in_bytes": memoryLimitContents,
|
||||
"memory.kmem.usage_in_bytes": memoryUsageContents,
|
||||
"memory.kmem.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
"memory.kmem.failcnt": memoryFailcnt,
|
||||
"memory.kmem.limit_in_bytes": memoryLimitContents,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expectedStats := cgroups.MemoryStats{Cache: 512, Usage: cgroups.MemoryData{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Limit: 8192}, SwapUsage: cgroups.MemoryData{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Limit: 8192}, KernelUsage: cgroups.MemoryData{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Limit: 8192}, Stats: map[string]uint64{"cache": 512, "rss": 1024}}
|
||||
expectMemoryStatEquals(t, expectedStats, actualStats.MemoryStats)
|
||||
}
|
||||
|
||||
func TestMemoryStatsNoStatFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.usage_in_bytes": memoryUsageContents,
|
||||
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
"memory.limit_in_bytes": memoryLimitContents,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStatsNoUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": memoryStatContents,
|
||||
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
"memory.limit_in_bytes": memoryLimitContents,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStatsNoMaxUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": memoryStatContents,
|
||||
"memory.usage_in_bytes": memoryUsageContents,
|
||||
"memory.limit_in_bytes": memoryLimitContents,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStatsNoLimitInBytesFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": memoryStatContents,
|
||||
"memory.usage_in_bytes": memoryUsageContents,
|
||||
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStatsBadStatFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": "rss rss",
|
||||
"memory.usage_in_bytes": memoryUsageContents,
|
||||
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
"memory.limit_in_bytes": memoryLimitContents,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStatsBadUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": memoryStatContents,
|
||||
"memory.usage_in_bytes": "bad",
|
||||
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
"memory.limit_in_bytes": memoryLimitContents,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStatsBadMaxUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": memoryStatContents,
|
||||
"memory.usage_in_bytes": memoryUsageContents,
|
||||
"memory.max_usage_in_bytes": "bad",
|
||||
"memory.limit_in_bytes": memoryLimitContents,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStatsBadLimitInBytesFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": memoryStatContents,
|
||||
"memory.usage_in_bytes": memoryUsageContents,
|
||||
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
"memory.limit_in_bytes": "bad",
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemorySetOomControl(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
oom_kill_disable = 1 // disable oom killer, default is 0
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.oom_control": strconv.Itoa(oom_kill_disable),
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
if err := memory.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamUint(helper.CgroupPath, "memory.oom_control")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse memory.oom_control - %s", err)
|
||||
}
|
||||
|
||||
if value != oom_kill_disable {
|
||||
t.Fatalf("Got the wrong value, set memory.oom_control failed.")
|
||||
}
|
||||
}
|
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
type NameGroup struct {
|
||||
GroupName string
|
||||
Join bool
|
||||
}
|
||||
|
||||
func (s *NameGroup) Name() string {
|
||||
|
@ -16,6 +17,10 @@ func (s *NameGroup) Name() string {
|
|||
}
|
||||
|
||||
func (s *NameGroup) Apply(d *cgroupData) error {
|
||||
if s.Join {
|
||||
// ignore errors if the named cgroup does not exist
|
||||
d.join(s.GroupName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -24,6 +29,9 @@ func (s *NameGroup) Set(path string, cgroup *configs.Cgroup) error {
|
|||
}
|
||||
|
||||
func (s *NameGroup) Remove(d *cgroupData) error {
|
||||
if s.Join {
|
||||
removePath(d.path(s.GroupName))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1,38 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
classidBefore = "0x100002"
|
||||
classidAfter = "0x100001"
|
||||
)
|
||||
|
||||
func TestNetClsSetClassid(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("net_cls", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"net_cls.classid": classidBefore,
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.NetClsClassid = classidAfter
|
||||
netcls := &NetClsGroup{}
|
||||
if err := netcls.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// As we are in mock environment, we can't get correct value of classid from
|
||||
// net_cls.classid.
|
||||
// So. we just judge if we successfully write classid into file
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "net_cls.classid")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse net_cls.classid - %s", err)
|
||||
}
|
||||
if value != classidAfter {
|
||||
t.Fatal("Got the wrong value, set net_cls.classid failed.")
|
||||
}
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
var (
|
||||
prioMap = []*configs.IfPrioMap{
|
||||
{
|
||||
Interface: "test",
|
||||
Priority: 5,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestNetPrioSetIfPrio(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("net_prio", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
helper.CgroupData.config.Resources.NetPrioIfpriomap = prioMap
|
||||
netPrio := &NetPrioGroup{}
|
||||
if err := netPrio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "net_prio.ifpriomap")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse net_prio.ifpriomap - %s", err)
|
||||
}
|
||||
if !strings.Contains(value, "test 5") {
|
||||
t.Fatal("Got the wrong value, set net_prio.ifpriomap failed.")
|
||||
}
|
||||
}
|
|
@ -1,83 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
const (
|
||||
maxUnlimited = -1
|
||||
maxLimited = 1024
|
||||
)
|
||||
|
||||
func TestPidsSetMax(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("pids", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"pids.max": "max",
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.PidsLimit = maxLimited
|
||||
pids := &PidsGroup{}
|
||||
if err := pids.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamUint(helper.CgroupPath, "pids.max")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse pids.max - %s", err)
|
||||
}
|
||||
|
||||
if value != maxLimited {
|
||||
t.Fatalf("Expected %d, got %d for setting pids.max - limited", maxLimited, value)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPidsSetUnlimited(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("pids", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"pids.max": strconv.Itoa(maxLimited),
|
||||
})
|
||||
|
||||
helper.CgroupData.config.Resources.PidsLimit = maxUnlimited
|
||||
pids := &PidsGroup{}
|
||||
if err := pids.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "pids.max")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse pids.max - %s", err)
|
||||
}
|
||||
|
||||
if value != "max" {
|
||||
t.Fatalf("Expected %s, got %s for setting pids.max - unlimited", "max", value)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPidsStats(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("pids", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"pids.current": strconv.Itoa(1337),
|
||||
"pids.max": strconv.Itoa(maxLimited),
|
||||
})
|
||||
|
||||
pids := &PidsGroup{}
|
||||
stats := *cgroups.NewStats()
|
||||
if err := pids.GetStats(helper.CgroupPath, &stats); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if stats.PidsStats.Current != 1337 {
|
||||
t.Fatalf("Expected %d, got %d for pids.current", 1337, stats.PidsStats.Current)
|
||||
}
|
||||
}
|
|
@ -1,117 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
func blkioStatEntryEquals(expected, actual []cgroups.BlkioStatEntry) error {
|
||||
if len(expected) != len(actual) {
|
||||
return fmt.Errorf("blkioStatEntries length do not match")
|
||||
}
|
||||
for i, expValue := range expected {
|
||||
actValue := actual[i]
|
||||
if expValue != actValue {
|
||||
return fmt.Errorf("Expected blkio stat entry %v but found %v", expValue, actValue)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func expectBlkioStatsEquals(t *testing.T, expected, actual cgroups.BlkioStats) {
|
||||
if err := blkioStatEntryEquals(expected.IoServiceBytesRecursive, actual.IoServiceBytesRecursive); err != nil {
|
||||
logrus.Printf("blkio IoServiceBytesRecursive do not match - %s\n", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err := blkioStatEntryEquals(expected.IoServicedRecursive, actual.IoServicedRecursive); err != nil {
|
||||
logrus.Printf("blkio IoServicedRecursive do not match - %s\n", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err := blkioStatEntryEquals(expected.IoQueuedRecursive, actual.IoQueuedRecursive); err != nil {
|
||||
logrus.Printf("blkio IoQueuedRecursive do not match - %s\n", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err := blkioStatEntryEquals(expected.SectorsRecursive, actual.SectorsRecursive); err != nil {
|
||||
logrus.Printf("blkio SectorsRecursive do not match - %s\n", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err := blkioStatEntryEquals(expected.IoServiceTimeRecursive, actual.IoServiceTimeRecursive); err != nil {
|
||||
logrus.Printf("blkio IoServiceTimeRecursive do not match - %s\n", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err := blkioStatEntryEquals(expected.IoWaitTimeRecursive, actual.IoWaitTimeRecursive); err != nil {
|
||||
logrus.Printf("blkio IoWaitTimeRecursive do not match - %s\n", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err := blkioStatEntryEquals(expected.IoMergedRecursive, actual.IoMergedRecursive); err != nil {
|
||||
logrus.Printf("blkio IoMergedRecursive do not match - %v vs %v\n", expected.IoMergedRecursive, actual.IoMergedRecursive)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err := blkioStatEntryEquals(expected.IoTimeRecursive, actual.IoTimeRecursive); err != nil {
|
||||
logrus.Printf("blkio IoTimeRecursive do not match - %s\n", err)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func expectThrottlingDataEquals(t *testing.T, expected, actual cgroups.ThrottlingData) {
|
||||
if expected != actual {
|
||||
logrus.Printf("Expected throttling data %v but found %v\n", expected, actual)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func expectHugetlbStatEquals(t *testing.T, expected, actual cgroups.HugetlbStats) {
|
||||
if expected != actual {
|
||||
logrus.Printf("Expected hugetlb stats %v but found %v\n", expected, actual)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func expectMemoryStatEquals(t *testing.T, expected, actual cgroups.MemoryStats) {
|
||||
expectMemoryDataEquals(t, expected.Usage, actual.Usage)
|
||||
expectMemoryDataEquals(t, expected.SwapUsage, actual.SwapUsage)
|
||||
expectMemoryDataEquals(t, expected.KernelUsage, actual.KernelUsage)
|
||||
|
||||
for key, expValue := range expected.Stats {
|
||||
actValue, ok := actual.Stats[key]
|
||||
if !ok {
|
||||
logrus.Printf("Expected memory stat key %s not found\n", key)
|
||||
t.Fail()
|
||||
}
|
||||
if expValue != actValue {
|
||||
logrus.Printf("Expected memory stat value %d but found %d\n", expValue, actValue)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func expectMemoryDataEquals(t *testing.T, expected, actual cgroups.MemoryData) {
|
||||
if expected.Usage != actual.Usage {
|
||||
logrus.Printf("Expected memory usage %d but found %d\n", expected.Usage, actual.Usage)
|
||||
t.Fail()
|
||||
}
|
||||
if expected.MaxUsage != actual.MaxUsage {
|
||||
logrus.Printf("Expected memory max usage %d but found %d\n", expected.MaxUsage, actual.MaxUsage)
|
||||
t.Fail()
|
||||
}
|
||||
if expected.Failcnt != actual.Failcnt {
|
||||
logrus.Printf("Expected memory failcnt %d but found %d\n", expected.Failcnt, actual.Failcnt)
|
||||
t.Fail()
|
||||
}
|
||||
if expected.Limit != actual.Limit {
|
||||
logrus.Printf("Expected memory limit %d but found %d\n", expected.Limit, actual.Limit)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
|
@ -1,67 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Utility for testing cgroup operations.
|
||||
|
||||
Creates a mock of the cgroup filesystem for the duration of the test.
|
||||
*/
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type cgroupTestUtil struct {
|
||||
// cgroup data to use in tests.
|
||||
CgroupData *cgroupData
|
||||
|
||||
// Path to the mock cgroup directory.
|
||||
CgroupPath string
|
||||
|
||||
// Temporary directory to store mock cgroup filesystem.
|
||||
tempDir string
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
// Creates a new test util for the specified subsystem
|
||||
func NewCgroupTestUtil(subsystem string, t *testing.T) *cgroupTestUtil {
|
||||
d := &cgroupData{
|
||||
config: &configs.Cgroup{},
|
||||
}
|
||||
d.config.Resources = &configs.Resources{}
|
||||
tempDir, err := ioutil.TempDir("", "cgroup_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
d.root = tempDir
|
||||
testCgroupPath := filepath.Join(d.root, subsystem)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure the full mock cgroup path exists.
|
||||
err = os.MkdirAll(testCgroupPath, 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return &cgroupTestUtil{CgroupData: d, CgroupPath: testCgroupPath, tempDir: tempDir, t: t}
|
||||
}
|
||||
|
||||
func (c *cgroupTestUtil) cleanup() {
|
||||
os.RemoveAll(c.tempDir)
|
||||
}
|
||||
|
||||
// Write the specified contents on the mock of the specified cgroup files.
|
||||
func (c *cgroupTestUtil) writeFileContents(fileContents map[string]string) {
|
||||
for file, contents := range fileContents {
|
||||
err := writeFile(c.CgroupPath, file, contents)
|
||||
if err != nil {
|
||||
c.t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,97 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
cgroupFile = "cgroup.file"
|
||||
floatValue = 2048.0
|
||||
floatString = "2048"
|
||||
)
|
||||
|
||||
func TestGetCgroupParamsInt(t *testing.T) {
|
||||
// Setup tempdir.
|
||||
tempDir, err := ioutil.TempDir("", "cgroup_utils_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
tempFile := filepath.Join(tempDir, cgroupFile)
|
||||
|
||||
// Success.
|
||||
err = ioutil.WriteFile(tempFile, []byte(floatString), 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
value, err := getCgroupParamUint(tempDir, cgroupFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if value != floatValue {
|
||||
t.Fatalf("Expected %d to equal %f", value, floatValue)
|
||||
}
|
||||
|
||||
// Success with new line.
|
||||
err = ioutil.WriteFile(tempFile, []byte(floatString+"\n"), 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
value, err = getCgroupParamUint(tempDir, cgroupFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if value != floatValue {
|
||||
t.Fatalf("Expected %d to equal %f", value, floatValue)
|
||||
}
|
||||
|
||||
// Success with negative values
|
||||
err = ioutil.WriteFile(tempFile, []byte("-12345"), 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
value, err = getCgroupParamUint(tempDir, cgroupFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if value != 0 {
|
||||
t.Fatalf("Expected %d to equal %d", value, 0)
|
||||
}
|
||||
|
||||
// Success with negative values lesser than min int64
|
||||
s := strconv.FormatFloat(math.MinInt64, 'f', -1, 64)
|
||||
err = ioutil.WriteFile(tempFile, []byte(s), 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
value, err = getCgroupParamUint(tempDir, cgroupFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if value != 0 {
|
||||
t.Fatalf("Expected %d to equal %d", value, 0)
|
||||
}
|
||||
|
||||
// Not a float.
|
||||
err = ioutil.WriteFile(tempFile, []byte("not-a-float"), 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = getCgroupParamUint(tempDir, cgroupFile)
|
||||
if err == nil {
|
||||
t.Fatal("Expecting error, got none")
|
||||
}
|
||||
|
||||
// Unknown file.
|
||||
err = os.Remove(tempFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = getCgroupParamUint(tempDir, cgroupFile)
|
||||
if err == nil {
|
||||
t.Fatal("Expecting error, got none")
|
||||
}
|
||||
}
|
|
@ -46,7 +46,7 @@ type MemoryStats struct {
|
|||
Usage MemoryData `json:"usage,omitempty"`
|
||||
// usage of memory + swap
|
||||
SwapUsage MemoryData `json:"swap_usage,omitempty"`
|
||||
// usafe of kernel memory
|
||||
// usage of kernel memory
|
||||
KernelUsage MemoryData `json:"kernel_usage,omitempty"`
|
||||
Stats map[string]uint64 `json:"stats,omitempty"`
|
||||
}
|
||||
|
@ -80,7 +80,7 @@ type HugetlbStats struct {
|
|||
Usage uint64 `json:"usage,omitempty"`
|
||||
// maximum usage ever recorded.
|
||||
MaxUsage uint64 `json:"max_usage,omitempty"`
|
||||
// number of times htgetlb usage allocation failure.
|
||||
// number of times hugetlb usage allocation failure.
|
||||
Failcnt uint64 `json:"failcnt"`
|
||||
}
|
||||
|
||||
|
|
|
@ -126,11 +126,11 @@ func getCgroupMountsHelper(ss map[string]bool, mi io.Reader) ([]Mount, error) {
|
|||
scanner := bufio.NewScanner(mi)
|
||||
for scanner.Scan() {
|
||||
txt := scanner.Text()
|
||||
sepIdx := strings.IndexByte(txt, '-')
|
||||
sepIdx := strings.Index(txt, " - ")
|
||||
if sepIdx == -1 {
|
||||
return nil, fmt.Errorf("invalid mountinfo format")
|
||||
}
|
||||
if txt[sepIdx+2:sepIdx+8] != "cgroup" {
|
||||
if txt[sepIdx+3:sepIdx+9] != "cgroup" {
|
||||
continue
|
||||
}
|
||||
fields := strings.Split(txt, " ")
|
||||
|
|
|
@ -1,138 +0,0 @@
|
|||
package cgroups
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw
|
||||
16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel
|
||||
17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755
|
||||
18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw
|
||||
19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw
|
||||
20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel
|
||||
21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000
|
||||
22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755
|
||||
23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755
|
||||
24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd
|
||||
25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw
|
||||
26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children
|
||||
27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children
|
||||
28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children
|
||||
29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children
|
||||
30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children
|
||||
31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children
|
||||
32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children
|
||||
33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children
|
||||
34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children
|
||||
35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered
|
||||
36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct
|
||||
37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel
|
||||
38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel
|
||||
39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel
|
||||
40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw
|
||||
41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw
|
||||
42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw
|
||||
43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw
|
||||
45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered
|
||||
46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered
|
||||
47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered
|
||||
48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered
|
||||
121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000
|
||||
124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw
|
||||
165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered
|
||||
167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered
|
||||
171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered
|
||||
175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered
|
||||
179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered
|
||||
183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered
|
||||
187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered
|
||||
191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered
|
||||
195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered
|
||||
199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered
|
||||
203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered
|
||||
207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered
|
||||
211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered
|
||||
215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered
|
||||
219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered
|
||||
223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered
|
||||
227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered
|
||||
231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered
|
||||
235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered
|
||||
239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered
|
||||
243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered
|
||||
247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered
|
||||
31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1`
|
||||
|
||||
func TestGetCgroupMounts(t *testing.T) {
|
||||
subsystems := map[string]bool{
|
||||
"cpuset": true,
|
||||
"cpu": true,
|
||||
"cpuacct": true,
|
||||
"memory": true,
|
||||
"devices": true,
|
||||
"freezer": true,
|
||||
"net_cls": true,
|
||||
"blkio": true,
|
||||
"perf_event": true,
|
||||
"hugetlb": true,
|
||||
}
|
||||
mi := bytes.NewBufferString(fedoraMountinfo)
|
||||
cgMounts, err := getCgroupMountsHelper(subsystems, mi)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cgMap := make(map[string]Mount)
|
||||
for _, m := range cgMounts {
|
||||
for _, ss := range m.Subsystems {
|
||||
cgMap[ss] = m
|
||||
}
|
||||
}
|
||||
for ss := range subsystems {
|
||||
m, ok := cgMap[ss]
|
||||
if !ok {
|
||||
t.Fatalf("%s not found", ss)
|
||||
}
|
||||
if m.Root != "/" {
|
||||
t.Fatalf("unexpected root for %s: %s", ss, m.Root)
|
||||
}
|
||||
if !strings.HasPrefix(m.Mountpoint, "/sys/fs/cgroup/") && !strings.Contains(m.Mountpoint, ss) {
|
||||
t.Fatalf("unexpected mountpoint for %s: %s", ss, m.Mountpoint)
|
||||
}
|
||||
var ssFound bool
|
||||
for _, mss := range m.Subsystems {
|
||||
if mss == ss {
|
||||
ssFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ssFound {
|
||||
t.Fatalf("subsystem %s not found in Subsystems field %v", ss, m.Subsystems)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGetCgroupMounts(b *testing.B) {
|
||||
subsystems := map[string]bool{
|
||||
"cpuset": true,
|
||||
"cpu": true,
|
||||
"cpuacct": true,
|
||||
"memory": true,
|
||||
"devices": true,
|
||||
"freezer": true,
|
||||
"net_cls": true,
|
||||
"blkio": true,
|
||||
"perf_event": true,
|
||||
"hugetlb": true,
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
mi := bytes.NewBufferString(fedoraMountinfo)
|
||||
b.StartTimer()
|
||||
if _, err := getCgroupMountsHelper(subsystems, mi); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -111,7 +111,7 @@ type Resources struct {
|
|||
OomKillDisable bool `json:"oom_kill_disable"`
|
||||
|
||||
// Tuning swappiness behaviour per cgroup
|
||||
MemorySwappiness int64 `json:"memory_swappiness"`
|
||||
MemorySwappiness *int64 `json:"memory_swappiness"`
|
||||
|
||||
// Set priority of network traffic for container
|
||||
NetPrioIfpriomap []*IfPrioMap `json:"net_prio_ifpriomap"`
|
||||
|
|
|
@ -128,11 +128,11 @@ type Config struct {
|
|||
|
||||
// AppArmorProfile specifies the profile to apply to the process running in the container and is
|
||||
// change at the time the process is execed
|
||||
AppArmorProfile string `json:"apparmor_profile"`
|
||||
AppArmorProfile string `json:"apparmor_profile,omitempty"`
|
||||
|
||||
// ProcessLabel specifies the label to apply to the process running in the container. It is
|
||||
// commonly used by selinux
|
||||
ProcessLabel string `json:"process_label"`
|
||||
ProcessLabel string `json:"process_label,omitempty"`
|
||||
|
||||
// Rlimits specifies the resource limits, such as max open files, to set in the container
|
||||
// If Rlimits are not set, the container will inherit rlimits from the parent process
|
||||
|
@ -171,12 +171,18 @@ type Config struct {
|
|||
// A default action to be taken if no rules match is also given.
|
||||
Seccomp *Seccomp `json:"seccomp"`
|
||||
|
||||
// NoNewPrivileges controls whether processes in the container can gain additional privileges.
|
||||
NoNewPrivileges bool `json:"no_new_privileges,omitempty"`
|
||||
|
||||
// Hooks are a collection of actions to perform at various container lifecycle events.
|
||||
// Hooks are not able to be marshaled to json but they are also not needed to.
|
||||
Hooks *Hooks `json:"-"`
|
||||
|
||||
// Version is the version of opencontainer specification that is supported.
|
||||
Version string `json:"version"`
|
||||
|
||||
// Labels are user defined metadata that is stored in the config and populated on the state
|
||||
Labels []string `json:"labels"`
|
||||
}
|
||||
|
||||
type Hooks struct {
|
||||
|
|
|
@ -1,156 +0,0 @@
|
|||
// +build linux freebsd
|
||||
|
||||
package configs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Checks whether the expected capability is specified in the capabilities.
|
||||
func contains(expected string, values []string) bool {
|
||||
for _, v := range values {
|
||||
if v == expected {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func containsDevice(expected *Device, values []*Device) bool {
|
||||
for _, d := range values {
|
||||
if d.Path == expected.Path &&
|
||||
d.Permissions == expected.Permissions &&
|
||||
d.FileMode == expected.FileMode &&
|
||||
d.Major == expected.Major &&
|
||||
d.Minor == expected.Minor &&
|
||||
d.Type == expected.Type {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func loadConfig(name string) (*Config, error) {
|
||||
f, err := os.Open(filepath.Join("../sample_configs", name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var container *Config
|
||||
if err := json.NewDecoder(f).Decode(&container); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check that a config doesn't contain extra fields
|
||||
var configMap, abstractMap map[string]interface{}
|
||||
|
||||
if _, err := f.Seek(0, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(f).Decode(&abstractMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
configData, err := json.Marshal(&container)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(configData, &configMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for k := range configMap {
|
||||
delete(abstractMap, k)
|
||||
}
|
||||
|
||||
if len(abstractMap) != 0 {
|
||||
return nil, fmt.Errorf("unknown fields: %s", abstractMap)
|
||||
}
|
||||
|
||||
return container, nil
|
||||
}
|
||||
|
||||
func TestRemoveNamespace(t *testing.T) {
|
||||
ns := Namespaces{
|
||||
{Type: NEWNET},
|
||||
}
|
||||
if !ns.Remove(NEWNET) {
|
||||
t.Fatal("NEWNET was not removed")
|
||||
}
|
||||
if len(ns) != 0 {
|
||||
t.Fatalf("namespaces should have 0 items but reports %d", len(ns))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHostUIDNoUSERNS(t *testing.T) {
|
||||
config := &Config{
|
||||
Namespaces: Namespaces{},
|
||||
}
|
||||
uid, err := config.HostUID()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if uid != 0 {
|
||||
t.Fatalf("expected uid 0 with no USERNS but received %d", uid)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHostUIDWithUSERNS(t *testing.T) {
|
||||
config := &Config{
|
||||
Namespaces: Namespaces{{Type: NEWUSER}},
|
||||
UidMappings: []IDMap{
|
||||
{
|
||||
ContainerID: 0,
|
||||
HostID: 1000,
|
||||
Size: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
uid, err := config.HostUID()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if uid != 1000 {
|
||||
t.Fatalf("expected uid 1000 with no USERNS but received %d", uid)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHostGIDNoUSERNS(t *testing.T) {
|
||||
config := &Config{
|
||||
Namespaces: Namespaces{},
|
||||
}
|
||||
uid, err := config.HostGID()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if uid != 0 {
|
||||
t.Fatalf("expected gid 0 with no USERNS but received %d", uid)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHostGIDWithUSERNS(t *testing.T) {
|
||||
config := &Config{
|
||||
Namespaces: Namespaces{{Type: NEWUSER}},
|
||||
GidMappings: []IDMap{
|
||||
{
|
||||
ContainerID: 0,
|
||||
HostID: 1000,
|
||||
Size: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
uid, err := config.HostGID()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if uid != 1000 {
|
||||
t.Fatalf("expected gid 1000 with no USERNS but received %d", uid)
|
||||
}
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
package configs
|
||||
|
||||
// All current tests are for Unix-specific functionality
|
|
@ -2,7 +2,11 @@
|
|||
|
||||
package configs
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
NEWNET NamespaceType = "NEWNET"
|
||||
|
@ -13,6 +17,51 @@ const (
|
|||
NEWUSER NamespaceType = "NEWUSER"
|
||||
)
|
||||
|
||||
var (
|
||||
nsLock sync.Mutex
|
||||
supportedNamespaces = make(map[NamespaceType]bool)
|
||||
)
|
||||
|
||||
// nsToFile converts the namespace type to its filename
|
||||
func nsToFile(ns NamespaceType) string {
|
||||
switch ns {
|
||||
case NEWNET:
|
||||
return "net"
|
||||
case NEWNS:
|
||||
return "mnt"
|
||||
case NEWPID:
|
||||
return "pid"
|
||||
case NEWIPC:
|
||||
return "ipc"
|
||||
case NEWUSER:
|
||||
return "user"
|
||||
case NEWUTS:
|
||||
return "uts"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// IsNamespaceSupported returns whether a namespace is available or
|
||||
// not
|
||||
func IsNamespaceSupported(ns NamespaceType) bool {
|
||||
nsLock.Lock()
|
||||
defer nsLock.Unlock()
|
||||
supported, ok := supportedNamespaces[ns]
|
||||
if ok {
|
||||
return supported
|
||||
}
|
||||
nsFile := nsToFile(ns)
|
||||
// if the namespace type is unknown, just return false
|
||||
if nsFile == "" {
|
||||
return false
|
||||
}
|
||||
_, err := os.Stat(fmt.Sprintf("/proc/self/ns/%s", nsFile))
|
||||
// a namespace is supported if it exists and we have permissions to read it
|
||||
supported = err == nil
|
||||
supportedNamespaces[ns] = supported
|
||||
return supported
|
||||
}
|
||||
|
||||
func NamespaceTypes() []NamespaceType {
|
||||
return []NamespaceType{
|
||||
NEWNET,
|
||||
|
@ -35,26 +84,7 @@ func (n *Namespace) GetPath(pid int) string {
|
|||
if n.Path != "" {
|
||||
return n.Path
|
||||
}
|
||||
return fmt.Sprintf("/proc/%d/ns/%s", pid, n.file())
|
||||
}
|
||||
|
||||
func (n *Namespace) file() string {
|
||||
file := ""
|
||||
switch n.Type {
|
||||
case NEWNET:
|
||||
file = "net"
|
||||
case NEWNS:
|
||||
file = "mnt"
|
||||
case NEWPID:
|
||||
file = "pid"
|
||||
case NEWIPC:
|
||||
file = "ipc"
|
||||
case NEWUSER:
|
||||
file = "user"
|
||||
case NEWUTS:
|
||||
file = "uts"
|
||||
}
|
||||
return file
|
||||
return fmt.Sprintf("/proc/%d/ns/%s", pid, nsToFile(n.Type))
|
||||
}
|
||||
|
||||
func (n *Namespaces) Remove(t NamespaceType) bool {
|
||||
|
@ -87,3 +117,11 @@ func (n *Namespaces) index(t NamespaceType) int {
|
|||
func (n *Namespaces) Contains(t NamespaceType) bool {
|
||||
return n.index(t) != -1
|
||||
}
|
||||
|
||||
func (n *Namespaces) PathOf(t NamespaceType) string {
|
||||
i := n.index(t)
|
||||
if i == -1 {
|
||||
return ""
|
||||
}
|
||||
return (*n)[i].Path
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
@ -35,6 +36,9 @@ func (v *ConfigValidator) Validate(config *configs.Config) error {
|
|||
if err := v.usernamespace(config); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := v.sysctl(config); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -91,3 +95,44 @@ func (v *ConfigValidator) usernamespace(config *configs.Config) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sysctl validates that the specified sysctl keys are valid or not.
|
||||
// /proc/sys isn't completely namespaced and depending on which namespaces
|
||||
// are specified, a subset of sysctls are permitted.
|
||||
func (v *ConfigValidator) sysctl(config *configs.Config) error {
|
||||
validSysctlPrefixes := []string{}
|
||||
validSysctlMap := make(map[string]bool)
|
||||
if config.Namespaces.Contains(configs.NEWNET) {
|
||||
validSysctlPrefixes = append(validSysctlPrefixes, "net.")
|
||||
}
|
||||
if config.Namespaces.Contains(configs.NEWIPC) {
|
||||
validSysctlPrefixes = append(validSysctlPrefixes, "fs.mqueue.")
|
||||
validSysctlMap = map[string]bool{
|
||||
"kernel.msgmax": true,
|
||||
"kernel.msgmnb": true,
|
||||
"kernel.msgmni": true,
|
||||
"kernel.sem": true,
|
||||
"kernel.shmall": true,
|
||||
"kernel.shmmax": true,
|
||||
"kernel.shmmni": true,
|
||||
"kernel.shm_rmid_forced": true,
|
||||
}
|
||||
}
|
||||
for s := range config.Sysctl {
|
||||
if validSysctlMap[s] {
|
||||
continue
|
||||
}
|
||||
valid := false
|
||||
for _, vp := range validSysctlPrefixes {
|
||||
if strings.HasPrefix(s, vp) {
|
||||
valid = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !valid {
|
||||
return fmt.Errorf("sysctl %q is not permitted in the config", s)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -27,9 +27,6 @@ const (
|
|||
// The container exists, but all its processes are paused.
|
||||
Paused
|
||||
|
||||
// The container exists, but its state is saved on disk
|
||||
Checkpointed
|
||||
|
||||
// The container does not exist.
|
||||
Destroyed
|
||||
)
|
||||
|
@ -44,8 +41,6 @@ func (s Status) String() string {
|
|||
return "pausing"
|
||||
case Paused:
|
||||
return "paused"
|
||||
case Checkpointed:
|
||||
return "checkpointed"
|
||||
case Destroyed:
|
||||
return "destroyed"
|
||||
default:
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runc/libcontainer/criurpc"
|
||||
"github.com/opencontainers/runc/libcontainer/utils"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
"github.com/vishvananda/netlink/nl"
|
||||
)
|
||||
|
||||
|
@ -268,37 +269,40 @@ func (c *linuxContainer) commandTemplate(p *Process, childPipe *os.File) (*exec.
|
|||
}
|
||||
|
||||
func (c *linuxContainer) newInitProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe *os.File) (*initProcess, error) {
|
||||
t := "_LIBCONTAINER_INITTYPE=" + string(initStandard)
|
||||
cloneFlags := c.config.Namespaces.CloneFlags()
|
||||
if cloneFlags&syscall.CLONE_NEWUSER != 0 {
|
||||
if err := c.addUidGidMappings(cmd.SysProcAttr); err != nil {
|
||||
// user mappings are not supported
|
||||
return nil, err
|
||||
}
|
||||
enableSetgroups(cmd.SysProcAttr)
|
||||
// Default to root user when user namespaces are enabled.
|
||||
if cmd.SysProcAttr.Credential == nil {
|
||||
cmd.SysProcAttr.Credential = &syscall.Credential{}
|
||||
cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initStandard))
|
||||
nsMaps := make(map[configs.NamespaceType]string)
|
||||
for _, ns := range c.config.Namespaces {
|
||||
if ns.Path != "" {
|
||||
nsMaps[ns.Type] = ns.Path
|
||||
}
|
||||
}
|
||||
cmd.Env = append(cmd.Env, t)
|
||||
cmd.SysProcAttr.Cloneflags = cloneFlags
|
||||
_, sharePidns := nsMaps[configs.NEWPID]
|
||||
data, err := c.bootstrapData(c.config.Namespaces.CloneFlags(), nsMaps, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &initProcess{
|
||||
cmd: cmd,
|
||||
childPipe: childPipe,
|
||||
parentPipe: parentPipe,
|
||||
manager: c.cgroupManager,
|
||||
config: c.newInitConfig(p),
|
||||
container: c,
|
||||
process: p,
|
||||
cmd: cmd,
|
||||
childPipe: childPipe,
|
||||
parentPipe: parentPipe,
|
||||
manager: c.cgroupManager,
|
||||
config: c.newInitConfig(p),
|
||||
container: c,
|
||||
process: p,
|
||||
bootstrapData: data,
|
||||
sharePidns: sharePidns,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe *os.File) (*setnsProcess, error) {
|
||||
cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initSetns))
|
||||
state, err := c.currentState()
|
||||
if err != nil {
|
||||
return nil, newSystemError(err)
|
||||
}
|
||||
// for setns process, we dont have to set cloneflags as the process namespaces
|
||||
// will only be set via setns syscall
|
||||
data, err := c.bootstrapData(0, c.initProcess.pid(), p.consolePath)
|
||||
data, err := c.bootstrapData(0, state.NamespacePaths, p.consolePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -315,7 +319,7 @@ func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, parentPipe,
|
|||
}
|
||||
|
||||
func (c *linuxContainer) newInitConfig(process *Process) *initConfig {
|
||||
return &initConfig{
|
||||
cfg := &initConfig{
|
||||
Config: c.config,
|
||||
Args: process.Args,
|
||||
Env: process.Env,
|
||||
|
@ -324,7 +328,21 @@ func (c *linuxContainer) newInitConfig(process *Process) *initConfig {
|
|||
Console: process.consolePath,
|
||||
Capabilities: process.Capabilities,
|
||||
PassedFilesCount: len(process.ExtraFiles),
|
||||
ContainerId: c.ID(),
|
||||
NoNewPrivileges: c.config.NoNewPrivileges,
|
||||
AppArmorProfile: c.config.AppArmorProfile,
|
||||
ProcessLabel: c.config.ProcessLabel,
|
||||
}
|
||||
if process.NoNewPrivileges != nil {
|
||||
cfg.NoNewPrivileges = *process.NoNewPrivileges
|
||||
}
|
||||
if process.AppArmorProfile != "" {
|
||||
cfg.AppArmorProfile = process.AppArmorProfile
|
||||
}
|
||||
if process.Label != "" {
|
||||
cfg.ProcessLabel = process.Label
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
func newPipe() (parent *os.File, child *os.File, err error) {
|
||||
|
@ -1059,6 +1077,9 @@ func (c *linuxContainer) currentState() (*State, error) {
|
|||
state.NamespacePaths[ns.Type] = ns.GetPath(pid)
|
||||
}
|
||||
for _, nsType := range configs.NamespaceTypes() {
|
||||
if !configs.IsNamespaceSupported(nsType) {
|
||||
continue
|
||||
}
|
||||
if _, ok := state.NamespacePaths[nsType]; !ok {
|
||||
ns := configs.Namespace{Type: nsType}
|
||||
state.NamespacePaths[ns.Type] = ns.GetPath(pid)
|
||||
|
@ -1068,18 +1089,69 @@ func (c *linuxContainer) currentState() (*State, error) {
|
|||
return state, nil
|
||||
}
|
||||
|
||||
// bootstrapData encodes the necessary data in netlink binary format as a io.Reader.
|
||||
// Consumer can write the data to a bootstrap program such as one that uses
|
||||
// nsenter package to bootstrap the container's init process correctly, i.e. with
|
||||
// correct namespaces, uid/gid mapping etc.
|
||||
func (c *linuxContainer) bootstrapData(cloneFlags uintptr, pid int, consolePath string) (io.Reader, error) {
|
||||
// orderNamespacePaths sorts namespace paths into a list of paths that we
|
||||
// can setns in order.
|
||||
func (c *linuxContainer) orderNamespacePaths(namespaces map[configs.NamespaceType]string) ([]string, error) {
|
||||
paths := []string{}
|
||||
nsTypes := []configs.NamespaceType{
|
||||
configs.NEWIPC,
|
||||
configs.NEWUTS,
|
||||
configs.NEWNET,
|
||||
configs.NEWPID,
|
||||
configs.NEWNS,
|
||||
}
|
||||
// join userns if the init process explicitly requires NEWUSER
|
||||
if c.config.Namespaces.Contains(configs.NEWUSER) {
|
||||
nsTypes = append(nsTypes, configs.NEWUSER)
|
||||
}
|
||||
for _, nsType := range nsTypes {
|
||||
if p, ok := namespaces[nsType]; ok && p != "" {
|
||||
// check if the requested namespace is supported
|
||||
if !configs.IsNamespaceSupported(nsType) {
|
||||
return nil, newSystemError(fmt.Errorf("namespace %s is not supported", nsType))
|
||||
}
|
||||
// only set to join this namespace if it exists
|
||||
if _, err := os.Lstat(p); err != nil {
|
||||
return nil, newSystemError(err)
|
||||
}
|
||||
// do not allow namespace path with comma as we use it to separate
|
||||
// the namespace paths
|
||||
if strings.ContainsRune(p, ',') {
|
||||
return nil, newSystemError(fmt.Errorf("invalid path %s", p))
|
||||
}
|
||||
paths = append(paths, p)
|
||||
}
|
||||
}
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
func encodeIDMapping(idMap []configs.IDMap) ([]byte, error) {
|
||||
data := bytes.NewBuffer(nil)
|
||||
for _, im := range idMap {
|
||||
line := fmt.Sprintf("%d %d %d\n", im.ContainerID, im.HostID, im.Size)
|
||||
if _, err := data.WriteString(line); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return data.Bytes(), nil
|
||||
}
|
||||
|
||||
// bootstrapData encodes the necessary data in netlink binary format
|
||||
// as a io.Reader.
|
||||
// Consumer can write the data to a bootstrap program
|
||||
// such as one that uses nsenter package to bootstrap the container's
|
||||
// init process correctly, i.e. with correct namespaces, uid/gid
|
||||
// mapping etc.
|
||||
func (c *linuxContainer) bootstrapData(cloneFlags uintptr, nsMaps map[configs.NamespaceType]string, consolePath string) (io.Reader, error) {
|
||||
// create the netlink message
|
||||
r := nl.NewNetlinkRequest(int(InitMsg), 0)
|
||||
// write pid
|
||||
|
||||
// write cloneFlags
|
||||
r.AddData(&Int32msg{
|
||||
Type: PidAttr,
|
||||
Value: uint32(pid),
|
||||
Type: CloneFlagsAttr,
|
||||
Value: uint32(cloneFlags),
|
||||
})
|
||||
|
||||
// write console path
|
||||
if consolePath != "" {
|
||||
r.AddData(&Bytemsg{
|
||||
|
@ -1087,5 +1159,57 @@ func (c *linuxContainer) bootstrapData(cloneFlags uintptr, pid int, consolePath
|
|||
Value: []byte(consolePath),
|
||||
})
|
||||
}
|
||||
|
||||
// write custom namespace paths
|
||||
if len(nsMaps) > 0 {
|
||||
nsPaths, err := c.orderNamespacePaths(nsMaps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.AddData(&Bytemsg{
|
||||
Type: NsPathsAttr,
|
||||
Value: []byte(strings.Join(nsPaths, ",")),
|
||||
})
|
||||
}
|
||||
|
||||
// write namespace paths only when we are not joining an existing user ns
|
||||
_, joinExistingUser := nsMaps[configs.NEWUSER]
|
||||
if !joinExistingUser {
|
||||
// write uid mappings
|
||||
if len(c.config.UidMappings) > 0 {
|
||||
b, err := encodeIDMapping(c.config.UidMappings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.AddData(&Bytemsg{
|
||||
Type: UidmapAttr,
|
||||
Value: b,
|
||||
})
|
||||
}
|
||||
|
||||
// write gid mappings
|
||||
if len(c.config.GidMappings) > 0 {
|
||||
b, err := encodeIDMapping(c.config.UidMappings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.AddData(&Bytemsg{
|
||||
Type: GidmapAttr,
|
||||
Value: b,
|
||||
})
|
||||
// check if we have CAP_SETGID to setgroup properly
|
||||
pid, err := capability.NewPid(os.Getpid())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !pid.Get(capability.EFFECTIVE, capability.CAP_SETGID) {
|
||||
r.AddData(&Boolmsg{
|
||||
Type: SetgroupAttr,
|
||||
Value: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return bytes.NewReader(r.Serialize()), nil
|
||||
}
|
||||
|
|
|
@ -1,218 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type mockCgroupManager struct {
|
||||
pids []int
|
||||
allPids []int
|
||||
stats *cgroups.Stats
|
||||
paths map[string]string
|
||||
}
|
||||
|
||||
func (m *mockCgroupManager) GetPids() ([]int, error) {
|
||||
return m.pids, nil
|
||||
}
|
||||
|
||||
func (m *mockCgroupManager) GetAllPids() ([]int, error) {
|
||||
return m.allPids, nil
|
||||
}
|
||||
|
||||
func (m *mockCgroupManager) GetStats() (*cgroups.Stats, error) {
|
||||
return m.stats, nil
|
||||
}
|
||||
|
||||
func (m *mockCgroupManager) Apply(pid int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockCgroupManager) Set(container *configs.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockCgroupManager) Destroy() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockCgroupManager) GetPaths() map[string]string {
|
||||
return m.paths
|
||||
}
|
||||
|
||||
func (m *mockCgroupManager) Freeze(state configs.FreezerState) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockProcess struct {
|
||||
_pid int
|
||||
started string
|
||||
}
|
||||
|
||||
func (m *mockProcess) terminate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockProcess) pid() int {
|
||||
return m._pid
|
||||
}
|
||||
|
||||
func (m *mockProcess) startTime() (string, error) {
|
||||
return m.started, nil
|
||||
}
|
||||
|
||||
func (m *mockProcess) start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockProcess) wait() (*os.ProcessState, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockProcess) signal(_ os.Signal) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *mockProcess) externalDescriptors() []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (p *mockProcess) setExternalDescriptors(newFds []string) {
|
||||
}
|
||||
|
||||
func TestGetContainerPids(t *testing.T) {
|
||||
container := &linuxContainer{
|
||||
id: "myid",
|
||||
config: &configs.Config{},
|
||||
cgroupManager: &mockCgroupManager{allPids: []int{1, 2, 3}},
|
||||
}
|
||||
pids, err := container.Processes()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, expected := range []int{1, 2, 3} {
|
||||
if pids[i] != expected {
|
||||
t.Fatalf("expected pid %d but received %d", expected, pids[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContainerStats(t *testing.T) {
|
||||
container := &linuxContainer{
|
||||
id: "myid",
|
||||
config: &configs.Config{},
|
||||
cgroupManager: &mockCgroupManager{
|
||||
pids: []int{1, 2, 3},
|
||||
stats: &cgroups.Stats{
|
||||
MemoryStats: cgroups.MemoryStats{
|
||||
Usage: cgroups.MemoryData{
|
||||
Usage: 1024,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
stats, err := container.Stats()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if stats.CgroupStats == nil {
|
||||
t.Fatal("cgroup stats are nil")
|
||||
}
|
||||
if stats.CgroupStats.MemoryStats.Usage.Usage != 1024 {
|
||||
t.Fatalf("expected memory usage 1024 but recevied %d", stats.CgroupStats.MemoryStats.Usage.Usage)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContainerState(t *testing.T) {
|
||||
var (
|
||||
pid = os.Getpid()
|
||||
expectedMemoryPath = "/sys/fs/cgroup/memory/myid"
|
||||
expectedNetworkPath = "/networks/fd"
|
||||
)
|
||||
container := &linuxContainer{
|
||||
id: "myid",
|
||||
config: &configs.Config{
|
||||
Namespaces: []configs.Namespace{
|
||||
{Type: configs.NEWPID},
|
||||
{Type: configs.NEWNS},
|
||||
{Type: configs.NEWNET, Path: expectedNetworkPath},
|
||||
{Type: configs.NEWUTS},
|
||||
// emulate host for IPC
|
||||
//{Type: configs.NEWIPC},
|
||||
},
|
||||
},
|
||||
initProcess: &mockProcess{
|
||||
_pid: pid,
|
||||
started: "010",
|
||||
},
|
||||
cgroupManager: &mockCgroupManager{
|
||||
pids: []int{1, 2, 3},
|
||||
stats: &cgroups.Stats{
|
||||
MemoryStats: cgroups.MemoryStats{
|
||||
Usage: cgroups.MemoryData{
|
||||
Usage: 1024,
|
||||
},
|
||||
},
|
||||
},
|
||||
paths: map[string]string{
|
||||
"memory": expectedMemoryPath,
|
||||
},
|
||||
},
|
||||
}
|
||||
container.state = &createdState{c: container}
|
||||
state, err := container.State()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if state.InitProcessPid != pid {
|
||||
t.Fatalf("expected pid %d but received %d", pid, state.InitProcessPid)
|
||||
}
|
||||
if state.InitProcessStartTime != "010" {
|
||||
t.Fatalf("expected process start time 010 but received %s", state.InitProcessStartTime)
|
||||
}
|
||||
paths := state.CgroupPaths
|
||||
if paths == nil {
|
||||
t.Fatal("cgroup paths should not be nil")
|
||||
}
|
||||
if memPath := paths["memory"]; memPath != expectedMemoryPath {
|
||||
t.Fatalf("expected memory path %q but received %q", expectedMemoryPath, memPath)
|
||||
}
|
||||
for _, ns := range container.config.Namespaces {
|
||||
path := state.NamespacePaths[ns.Type]
|
||||
if path == "" {
|
||||
t.Fatalf("expected non nil namespace path for %s", ns.Type)
|
||||
}
|
||||
if ns.Type == configs.NEWNET {
|
||||
if path != expectedNetworkPath {
|
||||
t.Fatalf("expected path %q but received %q", expectedNetworkPath, path)
|
||||
}
|
||||
} else {
|
||||
file := ""
|
||||
switch ns.Type {
|
||||
case configs.NEWNET:
|
||||
file = "net"
|
||||
case configs.NEWNS:
|
||||
file = "mnt"
|
||||
case configs.NEWPID:
|
||||
file = "pid"
|
||||
case configs.NEWIPC:
|
||||
file = "ipc"
|
||||
case configs.NEWUSER:
|
||||
file = "user"
|
||||
case configs.NEWUTS:
|
||||
file = "uts"
|
||||
}
|
||||
expected := fmt.Sprintf("/proc/%d/ns/%s", pid, file)
|
||||
if expected != path {
|
||||
t.Fatalf("expected path %q but received %q", expected, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
// +build !go1.4
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// not available before go 1.4
|
||||
func (c *linuxContainer) addUidGidMappings(sys *syscall.SysProcAttr) error {
|
||||
return fmt.Errorf("User namespace is not supported in golang < 1.4")
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
// +build go1.4
|
||||
|
||||
package libcontainer
|
||||
|
||||
import "syscall"
|
||||
|
||||
// Converts IDMap to SysProcIDMap array and adds it to SysProcAttr.
|
||||
func (c *linuxContainer) addUidGidMappings(sys *syscall.SysProcAttr) error {
|
||||
if c.config.UidMappings != nil {
|
||||
sys.UidMappings = make([]syscall.SysProcIDMap, len(c.config.UidMappings))
|
||||
for i, um := range c.config.UidMappings {
|
||||
sys.UidMappings[i].ContainerID = um.ContainerID
|
||||
sys.UidMappings[i].HostID = um.HostID
|
||||
sys.UidMappings[i].Size = um.Size
|
||||
}
|
||||
}
|
||||
if c.config.GidMappings != nil {
|
||||
sys.GidMappings = make([]syscall.SysProcIDMap, len(c.config.GidMappings))
|
||||
for i, gm := range c.config.GidMappings {
|
||||
sys.GidMappings[i].ContainerID = gm.ContainerID
|
||||
sys.GidMappings[i].HostID = gm.HostID
|
||||
sys.GidMappings[i].Size = gm.Size
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,63 +0,0 @@
|
|||
// +build linux freebsd
|
||||
|
||||
package devices
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDeviceFromPathLstatFailure(t *testing.T) {
|
||||
testError := errors.New("test error")
|
||||
|
||||
// Override os.Lstat to inject error.
|
||||
osLstat = func(path string) (os.FileInfo, error) {
|
||||
return nil, testError
|
||||
}
|
||||
|
||||
_, err := DeviceFromPath("", "")
|
||||
if err != testError {
|
||||
t.Fatalf("Unexpected error %v, expected %v", err, testError)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHostDevicesIoutilReadDirFailure(t *testing.T) {
|
||||
testError := errors.New("test error")
|
||||
|
||||
// Override ioutil.ReadDir to inject error.
|
||||
ioutilReadDir = func(dirname string) ([]os.FileInfo, error) {
|
||||
return nil, testError
|
||||
}
|
||||
|
||||
_, err := HostDevices()
|
||||
if err != testError {
|
||||
t.Fatalf("Unexpected error %v, expected %v", err, testError)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHostDevicesIoutilReadDirDeepFailure(t *testing.T) {
|
||||
testError := errors.New("test error")
|
||||
called := false
|
||||
|
||||
// Override ioutil.ReadDir to inject error after the first call.
|
||||
ioutilReadDir = func(dirname string) ([]os.FileInfo, error) {
|
||||
if called {
|
||||
return nil, testError
|
||||
}
|
||||
called = true
|
||||
|
||||
// Provoke a second call.
|
||||
fi, err := os.Lstat("/tmp")
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error %v", err)
|
||||
}
|
||||
|
||||
return []os.FileInfo{fi}, nil
|
||||
}
|
||||
|
||||
_, err := HostDevices()
|
||||
if err != testError {
|
||||
t.Fatalf("Unexpected error %v, expected %v", err, testError)
|
||||
}
|
||||
}
|
|
@ -1,102 +0,0 @@
|
|||
// +build linux freebsd
|
||||
|
||||
package devices
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotADevice = errors.New("not a device node")
|
||||
)
|
||||
|
||||
// Testing dependencies
|
||||
var (
|
||||
osLstat = os.Lstat
|
||||
ioutilReadDir = ioutil.ReadDir
|
||||
)
|
||||
|
||||
// Given the path to a device and it's cgroup_permissions(which cannot be easily queried) look up the information about a linux device and return that information as a Device struct.
|
||||
func DeviceFromPath(path, permissions string) (*configs.Device, error) {
|
||||
fileInfo, err := osLstat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
devType rune
|
||||
mode = fileInfo.Mode()
|
||||
fileModePermissionBits = os.FileMode.Perm(mode)
|
||||
)
|
||||
switch {
|
||||
case mode&os.ModeDevice == 0:
|
||||
return nil, ErrNotADevice
|
||||
case mode&os.ModeCharDevice != 0:
|
||||
fileModePermissionBits |= syscall.S_IFCHR
|
||||
devType = 'c'
|
||||
default:
|
||||
fileModePermissionBits |= syscall.S_IFBLK
|
||||
devType = 'b'
|
||||
}
|
||||
stat_t, ok := fileInfo.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot determine the device number for device %s", path)
|
||||
}
|
||||
devNumber := int(stat_t.Rdev)
|
||||
return &configs.Device{
|
||||
Type: devType,
|
||||
Path: path,
|
||||
Major: Major(devNumber),
|
||||
Minor: Minor(devNumber),
|
||||
Permissions: permissions,
|
||||
FileMode: fileModePermissionBits,
|
||||
Uid: stat_t.Uid,
|
||||
Gid: stat_t.Gid,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func HostDevices() ([]*configs.Device, error) {
|
||||
return getDevices("/dev")
|
||||
}
|
||||
|
||||
func getDevices(path string) ([]*configs.Device, error) {
|
||||
files, err := ioutilReadDir(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := []*configs.Device{}
|
||||
for _, f := range files {
|
||||
switch {
|
||||
case f.IsDir():
|
||||
switch f.Name() {
|
||||
case "pts", "shm", "fd", "mqueue":
|
||||
continue
|
||||
default:
|
||||
sub, err := getDevices(filepath.Join(path, f.Name()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out = append(out, sub...)
|
||||
continue
|
||||
}
|
||||
case f.Name() == "console":
|
||||
continue
|
||||
}
|
||||
device, err := DeviceFromPath(filepath.Join(path, f.Name()), "rwm")
|
||||
if err != nil {
|
||||
if err == ErrNotADevice {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, device)
|
||||
}
|
||||
return out, nil
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package devices
|
|
@ -1,24 +0,0 @@
|
|||
// +build linux freebsd
|
||||
|
||||
package devices
|
||||
|
||||
/*
|
||||
|
||||
This code provides support for manipulating linux device numbers. It should be replaced by normal syscall functions once http://code.google.com/p/go/issues/detail?id=8106 is solved.
|
||||
|
||||
You can read what they are here:
|
||||
|
||||
- http://www.makelinux.net/ldd3/chp-3-sect-2
|
||||
- http://www.linux-tutorial.info/modules.php?name=MContent&pageid=94
|
||||
|
||||
Note! These are NOT the same as the MAJOR(dev_t device);, MINOR(dev_t device); and MKDEV(int major, int minor); functions as defined in <linux/kdev_t.h> as the representation of device numbers used by go is different than the one used internally to the kernel! - https://github.com/torvalds/linux/blob/master/include/linux/kdev_t.h#L9
|
||||
|
||||
*/
|
||||
|
||||
func Major(devNumber int) int64 {
|
||||
return int64((devNumber >> 8) & 0xfff)
|
||||
}
|
||||
|
||||
func Minor(devNumber int) int64 {
|
||||
return int64((devNumber & 0xff) | ((devNumber >> 12) & 0xfff00))
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
package libcontainer
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestErrorCode(t *testing.T) {
|
||||
codes := map[ErrorCode]string{
|
||||
IdInUse: "Id already in use",
|
||||
InvalidIdFormat: "Invalid format",
|
||||
ContainerPaused: "Container paused",
|
||||
ConfigInvalid: "Invalid configuration",
|
||||
SystemError: "System error",
|
||||
ContainerNotExists: "Container does not exist",
|
||||
}
|
||||
|
||||
for code, expected := range codes {
|
||||
if actual := code.String(); actual != expected {
|
||||
t.Fatalf("expected string %q but received %q", expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -227,36 +227,40 @@ func (l *LinuxFactory) StartInitialization() (err error) {
|
|||
pipe = os.NewFile(uintptr(pipefd), "pipe")
|
||||
it = initType(os.Getenv("_LIBCONTAINER_INITTYPE"))
|
||||
)
|
||||
defer pipe.Close()
|
||||
// clear the current process's environment to clean any libcontainer
|
||||
// specific env vars.
|
||||
os.Clearenv()
|
||||
var i initer
|
||||
defer func() {
|
||||
// if we have an error during the initialization of the container's init then send it back to the
|
||||
// parent process in the form of an initError.
|
||||
if err != nil {
|
||||
if _, ok := i.(*linuxStandardInit); ok {
|
||||
// Synchronisation only necessary for standard init.
|
||||
if err := utils.WriteJSON(pipe, syncT{procError}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
if err := utils.WriteJSON(pipe, newSystemError(err)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else {
|
||||
if err := utils.WriteJSON(pipe, syncT{procStart}); err != nil {
|
||||
i, err := newContainerInit(it, pipe)
|
||||
if err != nil {
|
||||
l.sendError(nil, pipe, err)
|
||||
return err
|
||||
}
|
||||
if err := i.Init(); err != nil {
|
||||
if !isExecError(err) {
|
||||
l.sendError(i, pipe, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LinuxFactory) sendError(i initer, pipe *os.File, err error) {
|
||||
// We have an error during the initialization of the container's init,
|
||||
// send it back to the parent process in the form of an initError.
|
||||
// If container's init successed, syscall.Exec will not return, hence
|
||||
// this defer function will never be called.
|
||||
if i != nil {
|
||||
if _, ok := i.(*linuxStandardInit); ok {
|
||||
// Synchronisation only necessary for standard init.
|
||||
if err := utils.WriteJSON(pipe, syncT{procError}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
// ensure that this pipe is always closed
|
||||
pipe.Close()
|
||||
}()
|
||||
i, err = newContainerInit(it, pipe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return i.Init()
|
||||
if err := utils.WriteJSON(pipe, newSystemError(err)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LinuxFactory) loadState(root string) (*State, error) {
|
||||
|
@ -284,3 +288,8 @@ func (l *LinuxFactory) validateID(id string) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isExecError(err error) bool {
|
||||
_, ok := err.(*exec.Error)
|
||||
return ok
|
||||
}
|
||||
|
|
|
@ -1,183 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runc/libcontainer/utils"
|
||||
)
|
||||
|
||||
func newTestRoot() (string, error) {
|
||||
dir, err := ioutil.TempDir("", "libcontainer")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
func TestFactoryNew(t *testing.T) {
|
||||
root, rerr := newTestRoot()
|
||||
if rerr != nil {
|
||||
t.Fatal(rerr)
|
||||
}
|
||||
defer os.RemoveAll(root)
|
||||
factory, err := New(root, Cgroupfs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if factory == nil {
|
||||
t.Fatal("factory should not be nil")
|
||||
}
|
||||
lfactory, ok := factory.(*LinuxFactory)
|
||||
if !ok {
|
||||
t.Fatal("expected linux factory returned on linux based systems")
|
||||
}
|
||||
if lfactory.Root != root {
|
||||
t.Fatalf("expected factory root to be %q but received %q", root, lfactory.Root)
|
||||
}
|
||||
|
||||
if factory.Type() != "libcontainer" {
|
||||
t.Fatalf("unexpected factory type: %q, expected %q", factory.Type(), "libcontainer")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFactoryNewTmpfs(t *testing.T) {
|
||||
root, rerr := newTestRoot()
|
||||
if rerr != nil {
|
||||
t.Fatal(rerr)
|
||||
}
|
||||
defer os.RemoveAll(root)
|
||||
factory, err := New(root, Cgroupfs, TmpfsRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if factory == nil {
|
||||
t.Fatal("factory should not be nil")
|
||||
}
|
||||
lfactory, ok := factory.(*LinuxFactory)
|
||||
if !ok {
|
||||
t.Fatal("expected linux factory returned on linux based systems")
|
||||
}
|
||||
if lfactory.Root != root {
|
||||
t.Fatalf("expected factory root to be %q but received %q", root, lfactory.Root)
|
||||
}
|
||||
|
||||
if factory.Type() != "libcontainer" {
|
||||
t.Fatalf("unexpected factory type: %q, expected %q", factory.Type(), "libcontainer")
|
||||
}
|
||||
mounted, err := mount.Mounted(lfactory.Root)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !mounted {
|
||||
t.Fatalf("Factory Root is not mounted")
|
||||
}
|
||||
mounts, err := mount.GetMounts()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var found bool
|
||||
for _, m := range mounts {
|
||||
if m.Mountpoint == lfactory.Root {
|
||||
if m.Fstype != "tmpfs" {
|
||||
t.Fatalf("Fstype of root: %s, expected %s", m.Fstype, "tmpfs")
|
||||
}
|
||||
if m.Source != "tmpfs" {
|
||||
t.Fatalf("Source of root: %s, expected %s", m.Source, "tmpfs")
|
||||
}
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("Factory Root is not listed in mounts list")
|
||||
}
|
||||
defer syscall.Unmount(root, syscall.MNT_DETACH)
|
||||
}
|
||||
|
||||
func TestFactoryLoadNotExists(t *testing.T) {
|
||||
root, rerr := newTestRoot()
|
||||
if rerr != nil {
|
||||
t.Fatal(rerr)
|
||||
}
|
||||
defer os.RemoveAll(root)
|
||||
factory, err := New(root, Cgroupfs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = factory.Load("nocontainer")
|
||||
if err == nil {
|
||||
t.Fatal("expected nil error loading non-existing container")
|
||||
}
|
||||
lerr, ok := err.(Error)
|
||||
if !ok {
|
||||
t.Fatal("expected libcontainer error type")
|
||||
}
|
||||
if lerr.Code() != ContainerNotExists {
|
||||
t.Fatalf("expected error code %s but received %s", ContainerNotExists, lerr.Code())
|
||||
}
|
||||
}
|
||||
|
||||
func TestFactoryLoadContainer(t *testing.T) {
|
||||
root, err := newTestRoot()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(root)
|
||||
// setup default container config and state for mocking
|
||||
var (
|
||||
id = "1"
|
||||
expectedConfig = &configs.Config{
|
||||
Rootfs: "/mycontainer/root",
|
||||
}
|
||||
expectedState = &State{
|
||||
BaseState: BaseState{
|
||||
InitProcessPid: 1024,
|
||||
Config: *expectedConfig,
|
||||
},
|
||||
}
|
||||
)
|
||||
if err := os.Mkdir(filepath.Join(root, id), 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := marshal(filepath.Join(root, id, stateFilename), expectedState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
factory, err := New(root, Cgroupfs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container, err := factory.Load(id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if container.ID() != id {
|
||||
t.Fatalf("expected container id %q but received %q", id, container.ID())
|
||||
}
|
||||
config := container.Config()
|
||||
if config.Rootfs != expectedConfig.Rootfs {
|
||||
t.Fatalf("expected rootfs %q but received %q", expectedConfig.Rootfs, config.Rootfs)
|
||||
}
|
||||
lcontainer, ok := container.(*linuxContainer)
|
||||
if !ok {
|
||||
t.Fatal("expected linux container on linux based systems")
|
||||
}
|
||||
if lcontainer.initProcess.pid() != expectedState.InitProcessPid {
|
||||
t.Fatalf("expected init pid %d but received %d", expectedState.InitProcessPid, lcontainer.initProcess.pid())
|
||||
}
|
||||
}
|
||||
|
||||
func marshal(path string, v interface{}) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
return utils.WriteJSON(f, v)
|
||||
}
|
|
@ -14,8 +14,9 @@ type syncType uint8
|
|||
const (
|
||||
procReady syncType = iota
|
||||
procError
|
||||
procStart
|
||||
procRun
|
||||
procHooks
|
||||
procResume
|
||||
)
|
||||
|
||||
type syncT struct {
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestErrorDetail(t *testing.T) {
|
||||
err := newGenericError(fmt.Errorf("test error"), SystemError)
|
||||
if derr := err.Detail(ioutil.Discard); derr != nil {
|
||||
t.Fatal(derr)
|
||||
}
|
||||
}
|
|
@ -48,11 +48,15 @@ type initConfig struct {
|
|||
Env []string `json:"env"`
|
||||
Cwd string `json:"cwd"`
|
||||
Capabilities []string `json:"capabilities"`
|
||||
ProcessLabel string `json:"process_label"`
|
||||
AppArmorProfile string `json:"apparmor_profile"`
|
||||
NoNewPrivileges bool `json:"no_new_privileges"`
|
||||
User string `json:"user"`
|
||||
Config *configs.Config `json:"config"`
|
||||
Console string `json:"console"`
|
||||
Networks []*network `json:"network"`
|
||||
PassedFilesCount int `json:"passed_files_count"`
|
||||
ContainerId string `json:"containerid"`
|
||||
}
|
||||
|
||||
type initer interface {
|
||||
|
@ -163,20 +167,22 @@ func syncParentReady(pipe io.ReadWriter) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// joinExistingNamespaces gets all the namespace paths specified for the container and
|
||||
// does a setns on the namespace fd so that the current process joins the namespace.
|
||||
func joinExistingNamespaces(namespaces []configs.Namespace) error {
|
||||
for _, ns := range namespaces {
|
||||
if ns.Path != "" {
|
||||
f, err := os.OpenFile(ns.Path, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = system.Setns(f.Fd(), uintptr(ns.Syscall()))
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// syncParentHooks sends to the given pipe a JSON payload which indicates that
|
||||
// the parent should execute pre-start hooks. It then waits for the parent to
|
||||
// indicate that it is cleared to resume.
|
||||
func syncParentHooks(pipe io.ReadWriter) error {
|
||||
// Tell parent.
|
||||
if err := utils.WriteJSON(pipe, syncT{procHooks}); err != nil {
|
||||
return err
|
||||
}
|
||||
// Wait for parent to give the all-clear.
|
||||
var procSync syncT
|
||||
if err := json.NewDecoder(pipe).Decode(&procSync); err != nil {
|
||||
if err == io.EOF {
|
||||
return fmt.Errorf("parent closed synchronisation channel")
|
||||
}
|
||||
if procSync.Type != procResume {
|
||||
return fmt.Errorf("invalid synchronisation flag from parent")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -319,9 +325,10 @@ func setupRlimits(config *configs.Config) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func setOomScoreAdj(oomScoreAdj int) error {
|
||||
path := "/proc/self/oom_score_adj"
|
||||
return ioutil.WriteFile(path, []byte(strconv.Itoa(oomScoreAdj)), 0700)
|
||||
func setOomScoreAdj(oomScoreAdj int, pid int) error {
|
||||
path := fmt.Sprintf("/proc/%d/oom_score_adj", pid)
|
||||
|
||||
return ioutil.WriteFile(path, []byte(strconv.Itoa(oomScoreAdj)), 0600)
|
||||
}
|
||||
|
||||
// killCgroupProcesses freezes then iterates over all the processes inside the
|
||||
|
|
|
@ -1,204 +0,0 @@
|
|||
package integration
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
func showFile(t *testing.T, fname string) error {
|
||||
t.Logf("=== %s ===\n", fname)
|
||||
|
||||
f, err := os.Open(fname)
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
t.Log(scanner.Text())
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.Logf("=== END ===\n")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestCheckpoint(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
root, err := newTestRoot()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(root)
|
||||
|
||||
rootfs, err := newRootfs()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer remove(rootfs)
|
||||
|
||||
config := newTemplateConfig(rootfs)
|
||||
|
||||
config.Mounts = append(config.Mounts, &configs.Mount{
|
||||
Destination: "/sys/fs/cgroup",
|
||||
Device: "cgroup",
|
||||
Flags: defaultMountFlags | syscall.MS_RDONLY,
|
||||
})
|
||||
|
||||
factory, err := libcontainer.New(root, libcontainer.Cgroupfs)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
container, err := factory.Create("test", config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer container.Destroy()
|
||||
|
||||
stdinR, stdinW, err := os.Pipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var stdout bytes.Buffer
|
||||
|
||||
pconfig := libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{"cat"},
|
||||
Env: standardEnvironment,
|
||||
Stdin: stdinR,
|
||||
Stdout: &stdout,
|
||||
}
|
||||
|
||||
err = container.Start(&pconfig)
|
||||
stdinR.Close()
|
||||
defer stdinW.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pid, err := pconfig.Pid()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
process, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
imagesDir, err := ioutil.TempDir("", "criu")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(imagesDir)
|
||||
|
||||
checkpointOpts := &libcontainer.CriuOpts{
|
||||
ImagesDirectory: imagesDir,
|
||||
WorkDirectory: imagesDir,
|
||||
}
|
||||
dumpLog := filepath.Join(checkpointOpts.WorkDirectory, "dump.log")
|
||||
restoreLog := filepath.Join(checkpointOpts.WorkDirectory, "restore.log")
|
||||
|
||||
if err := container.Checkpoint(checkpointOpts); err != nil {
|
||||
showFile(t, dumpLog)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
state, err := container.Status()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if state != libcontainer.Running {
|
||||
t.Fatal("Unexpected state checkpoint: ", state)
|
||||
}
|
||||
|
||||
stdinW.Close()
|
||||
_, err = process.Wait()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// reload the container
|
||||
container, err = factory.Load("test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
restoreStdinR, restoreStdinW, err := os.Pipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
restoreProcessConfig := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Stdin: restoreStdinR,
|
||||
Stdout: &stdout,
|
||||
}
|
||||
|
||||
err = container.Restore(restoreProcessConfig, checkpointOpts)
|
||||
restoreStdinR.Close()
|
||||
defer restoreStdinW.Close()
|
||||
if err != nil {
|
||||
showFile(t, restoreLog)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
state, err = container.Status()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if state != libcontainer.Running {
|
||||
t.Fatal("Unexpected restore state: ", state)
|
||||
}
|
||||
|
||||
pid, err = restoreProcessConfig.Pid()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
process, err = os.FindProcess(pid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = restoreStdinW.WriteString("Hello!")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
restoreStdinW.Close()
|
||||
s, err := process.Wait()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !s.Success() {
|
||||
t.Fatal(s.String(), pid)
|
||||
}
|
||||
|
||||
output := string(stdout.Bytes())
|
||||
if !strings.Contains(output, "Hello!") {
|
||||
t.Fatal("Did not restore the pipe correctly:", output)
|
||||
}
|
||||
}
|
|
@ -1,2 +0,0 @@
|
|||
// integration is used for integration testing of libcontainer
|
||||
package integration
|
File diff suppressed because it is too large
Load diff
|
@ -1,402 +0,0 @@
|
|||
package integration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer"
|
||||
)
|
||||
|
||||
func TestExecIn(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
rootfs, err := newRootfs()
|
||||
ok(t, err)
|
||||
defer remove(rootfs)
|
||||
config := newTemplateConfig(rootfs)
|
||||
container, err := newContainer(config)
|
||||
ok(t, err)
|
||||
defer container.Destroy()
|
||||
|
||||
// Execute a first process in the container
|
||||
stdinR, stdinW, err := os.Pipe()
|
||||
ok(t, err)
|
||||
process := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{"cat"},
|
||||
Env: standardEnvironment,
|
||||
Stdin: stdinR,
|
||||
}
|
||||
err = container.Start(process)
|
||||
stdinR.Close()
|
||||
defer stdinW.Close()
|
||||
ok(t, err)
|
||||
|
||||
buffers := newStdBuffers()
|
||||
ps := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{"ps"},
|
||||
Env: standardEnvironment,
|
||||
Stdin: buffers.Stdin,
|
||||
Stdout: buffers.Stdout,
|
||||
Stderr: buffers.Stderr,
|
||||
}
|
||||
|
||||
err = container.Start(ps)
|
||||
ok(t, err)
|
||||
waitProcess(ps, t)
|
||||
stdinW.Close()
|
||||
waitProcess(process, t)
|
||||
|
||||
out := buffers.Stdout.String()
|
||||
if !strings.Contains(out, "cat") || !strings.Contains(out, "ps") {
|
||||
t.Fatalf("unexpected running process, output %q", out)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecInRlimit(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
rootfs, err := newRootfs()
|
||||
ok(t, err)
|
||||
defer remove(rootfs)
|
||||
config := newTemplateConfig(rootfs)
|
||||
container, err := newContainer(config)
|
||||
ok(t, err)
|
||||
defer container.Destroy()
|
||||
|
||||
stdinR, stdinW, err := os.Pipe()
|
||||
ok(t, err)
|
||||
process := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{"cat"},
|
||||
Env: standardEnvironment,
|
||||
Stdin: stdinR,
|
||||
}
|
||||
err = container.Start(process)
|
||||
stdinR.Close()
|
||||
defer stdinW.Close()
|
||||
ok(t, err)
|
||||
|
||||
buffers := newStdBuffers()
|
||||
ps := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{"/bin/sh", "-c", "ulimit -n"},
|
||||
Env: standardEnvironment,
|
||||
Stdin: buffers.Stdin,
|
||||
Stdout: buffers.Stdout,
|
||||
Stderr: buffers.Stderr,
|
||||
}
|
||||
err = container.Start(ps)
|
||||
ok(t, err)
|
||||
waitProcess(ps, t)
|
||||
|
||||
stdinW.Close()
|
||||
waitProcess(process, t)
|
||||
|
||||
out := buffers.Stdout.String()
|
||||
if limit := strings.TrimSpace(out); limit != "1025" {
|
||||
t.Fatalf("expected rlimit to be 1025, got %s", limit)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecInError(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
rootfs, err := newRootfs()
|
||||
ok(t, err)
|
||||
defer remove(rootfs)
|
||||
config := newTemplateConfig(rootfs)
|
||||
container, err := newContainer(config)
|
||||
ok(t, err)
|
||||
defer container.Destroy()
|
||||
|
||||
// Execute a first process in the container
|
||||
stdinR, stdinW, err := os.Pipe()
|
||||
ok(t, err)
|
||||
process := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{"cat"},
|
||||
Env: standardEnvironment,
|
||||
Stdin: stdinR,
|
||||
}
|
||||
err = container.Start(process)
|
||||
stdinR.Close()
|
||||
defer func() {
|
||||
stdinW.Close()
|
||||
if _, err := process.Wait(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
}()
|
||||
ok(t, err)
|
||||
|
||||
for i := 0; i < 42; i++ {
|
||||
var out bytes.Buffer
|
||||
unexistent := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{"unexistent"},
|
||||
Env: standardEnvironment,
|
||||
Stdout: &out,
|
||||
}
|
||||
err = container.Start(unexistent)
|
||||
if err == nil {
|
||||
t.Fatal("Should be an error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "executable file not found") {
|
||||
t.Fatalf("Should be error about not found executable, got %s", err)
|
||||
}
|
||||
if !bytes.Contains(out.Bytes(), []byte("executable file not found")) {
|
||||
t.Fatalf("executable file not found error not delivered to stdio:\n%s", out.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecInTTY(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
rootfs, err := newRootfs()
|
||||
ok(t, err)
|
||||
defer remove(rootfs)
|
||||
config := newTemplateConfig(rootfs)
|
||||
container, err := newContainer(config)
|
||||
ok(t, err)
|
||||
defer container.Destroy()
|
||||
|
||||
// Execute a first process in the container
|
||||
stdinR, stdinW, err := os.Pipe()
|
||||
ok(t, err)
|
||||
process := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{"cat"},
|
||||
Env: standardEnvironment,
|
||||
Stdin: stdinR,
|
||||
}
|
||||
err = container.Start(process)
|
||||
stdinR.Close()
|
||||
defer stdinW.Close()
|
||||
ok(t, err)
|
||||
|
||||
var stdout bytes.Buffer
|
||||
ps := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{"ps"},
|
||||
Env: standardEnvironment,
|
||||
}
|
||||
console, err := ps.NewConsole(0)
|
||||
copy := make(chan struct{})
|
||||
go func() {
|
||||
io.Copy(&stdout, console)
|
||||
close(copy)
|
||||
}()
|
||||
ok(t, err)
|
||||
err = container.Start(ps)
|
||||
ok(t, err)
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("Waiting for copy timed out")
|
||||
case <-copy:
|
||||
}
|
||||
waitProcess(ps, t)
|
||||
|
||||
stdinW.Close()
|
||||
waitProcess(process, t)
|
||||
|
||||
out := stdout.String()
|
||||
if !strings.Contains(out, "cat") || !strings.Contains(string(out), "ps") {
|
||||
t.Fatalf("unexpected running process, output %q", out)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecInEnvironment(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
rootfs, err := newRootfs()
|
||||
ok(t, err)
|
||||
defer remove(rootfs)
|
||||
config := newTemplateConfig(rootfs)
|
||||
container, err := newContainer(config)
|
||||
ok(t, err)
|
||||
defer container.Destroy()
|
||||
|
||||
// Execute a first process in the container
|
||||
stdinR, stdinW, err := os.Pipe()
|
||||
ok(t, err)
|
||||
process := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{"cat"},
|
||||
Env: standardEnvironment,
|
||||
Stdin: stdinR,
|
||||
}
|
||||
err = container.Start(process)
|
||||
stdinR.Close()
|
||||
defer stdinW.Close()
|
||||
ok(t, err)
|
||||
|
||||
buffers := newStdBuffers()
|
||||
process2 := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{"env"},
|
||||
Env: []string{
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"DEBUG=true",
|
||||
"DEBUG=false",
|
||||
"ENV=test",
|
||||
},
|
||||
Stdin: buffers.Stdin,
|
||||
Stdout: buffers.Stdout,
|
||||
Stderr: buffers.Stderr,
|
||||
}
|
||||
err = container.Start(process2)
|
||||
ok(t, err)
|
||||
waitProcess(process2, t)
|
||||
|
||||
stdinW.Close()
|
||||
waitProcess(process, t)
|
||||
|
||||
out := buffers.Stdout.String()
|
||||
// check execin's process environment
|
||||
if !strings.Contains(out, "DEBUG=false") ||
|
||||
!strings.Contains(out, "ENV=test") ||
|
||||
!strings.Contains(out, "HOME=/root") ||
|
||||
!strings.Contains(out, "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin") ||
|
||||
strings.Contains(out, "DEBUG=true") {
|
||||
t.Fatalf("unexpected running process, output %q", out)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecinPassExtraFiles(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
rootfs, err := newRootfs()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer remove(rootfs)
|
||||
config := newTemplateConfig(rootfs)
|
||||
container, err := newContainer(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer container.Destroy()
|
||||
|
||||
// Execute a first process in the container
|
||||
stdinR, stdinW, err := os.Pipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
process := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{"cat"},
|
||||
Env: standardEnvironment,
|
||||
Stdin: stdinR,
|
||||
}
|
||||
err = container.Start(process)
|
||||
stdinR.Close()
|
||||
defer stdinW.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var stdout bytes.Buffer
|
||||
pipeout1, pipein1, err := os.Pipe()
|
||||
pipeout2, pipein2, err := os.Pipe()
|
||||
inprocess := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{"sh", "-c", "cd /proc/$$/fd; echo -n *; echo -n 1 >3; echo -n 2 >4"},
|
||||
Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
|
||||
ExtraFiles: []*os.File{pipein1, pipein2},
|
||||
Stdin: nil,
|
||||
Stdout: &stdout,
|
||||
}
|
||||
err = container.Start(inprocess)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
waitProcess(inprocess, t)
|
||||
stdinW.Close()
|
||||
waitProcess(process, t)
|
||||
|
||||
out := string(stdout.Bytes())
|
||||
// fd 5 is the directory handle for /proc/$$/fd
|
||||
if out != "0 1 2 3 4 5" {
|
||||
t.Fatalf("expected to have the file descriptors '0 1 2 3 4 5' passed to exec, got '%s'", out)
|
||||
}
|
||||
var buf = []byte{0}
|
||||
_, err = pipeout1.Read(buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out1 := string(buf)
|
||||
if out1 != "1" {
|
||||
t.Fatalf("expected first pipe to receive '1', got '%s'", out1)
|
||||
}
|
||||
|
||||
_, err = pipeout2.Read(buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out2 := string(buf)
|
||||
if out2 != "2" {
|
||||
t.Fatalf("expected second pipe to receive '2', got '%s'", out2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecInOomScoreAdj(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
rootfs, err := newRootfs()
|
||||
ok(t, err)
|
||||
defer remove(rootfs)
|
||||
config := newTemplateConfig(rootfs)
|
||||
config.OomScoreAdj = 200
|
||||
container, err := newContainer(config)
|
||||
ok(t, err)
|
||||
defer container.Destroy()
|
||||
|
||||
stdinR, stdinW, err := os.Pipe()
|
||||
ok(t, err)
|
||||
process := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{"cat"},
|
||||
Env: standardEnvironment,
|
||||
Stdin: stdinR,
|
||||
}
|
||||
err = container.Start(process)
|
||||
stdinR.Close()
|
||||
defer stdinW.Close()
|
||||
ok(t, err)
|
||||
|
||||
buffers := newStdBuffers()
|
||||
ps := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{"/bin/sh", "-c", "cat /proc/self/oom_score_adj"},
|
||||
Env: standardEnvironment,
|
||||
Stdin: buffers.Stdin,
|
||||
Stdout: buffers.Stdout,
|
||||
Stderr: buffers.Stderr,
|
||||
}
|
||||
err = container.Start(ps)
|
||||
ok(t, err)
|
||||
waitProcess(ps, t)
|
||||
|
||||
stdinW.Close()
|
||||
waitProcess(process, t)
|
||||
|
||||
out := buffers.Stdout.String()
|
||||
if oomScoreAdj := strings.TrimSpace(out); oomScoreAdj != strconv.Itoa(config.OomScoreAdj) {
|
||||
t.Fatalf("expected oomScoreAdj to be %d, got %s", config.OomScoreAdj, oomScoreAdj)
|
||||
}
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
package integration
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/opencontainers/runc/libcontainer"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
|
||||
_ "github.com/opencontainers/runc/libcontainer/nsenter"
|
||||
)
|
||||
|
||||
// init runs the libcontainer initialization code because of the busybox style needs
|
||||
// to work around the go runtime and the issues with forking
|
||||
func init() {
|
||||
if len(os.Args) < 2 || os.Args[1] != "init" {
|
||||
return
|
||||
}
|
||||
runtime.GOMAXPROCS(1)
|
||||
runtime.LockOSThread()
|
||||
factory, err := libcontainer.New("")
|
||||
if err != nil {
|
||||
logrus.Fatalf("unable to initialize for container: %s", err)
|
||||
}
|
||||
if err := factory.StartInitialization(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
factory libcontainer.Factory
|
||||
systemdFactory libcontainer.Factory
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var (
|
||||
err error
|
||||
ret int = 0
|
||||
)
|
||||
|
||||
logrus.SetOutput(os.Stderr)
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
|
||||
factory, err = libcontainer.New(".", libcontainer.Cgroupfs)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if systemd.UseSystemd() {
|
||||
systemdFactory, err = libcontainer.New(".", libcontainer.SystemdCgroups)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
ret = m.Run()
|
||||
os.Exit(ret)
|
||||
}
|
|
@ -1,219 +0,0 @@
|
|||
// +build linux,cgo,seccomp
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
libseccomp "github.com/seccomp/libseccomp-golang"
|
||||
)
|
||||
|
||||
func TestSeccompDenyGetcwd(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
|
||||
rootfs, err := newRootfs()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer remove(rootfs)
|
||||
|
||||
config := newTemplateConfig(rootfs)
|
||||
config.Seccomp = &configs.Seccomp{
|
||||
DefaultAction: configs.Allow,
|
||||
Syscalls: []*configs.Syscall{
|
||||
{
|
||||
Name: "getcwd",
|
||||
Action: configs.Errno,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
container, err := newContainer(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer container.Destroy()
|
||||
|
||||
buffers := newStdBuffers()
|
||||
pwd := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{"pwd"},
|
||||
Env: standardEnvironment,
|
||||
Stdin: buffers.Stdin,
|
||||
Stdout: buffers.Stdout,
|
||||
Stderr: buffers.Stderr,
|
||||
}
|
||||
|
||||
err = container.Start(pwd)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ps, err := pwd.Wait()
|
||||
if err == nil {
|
||||
t.Fatal("Expecting error (negative return code); instead exited cleanly!")
|
||||
}
|
||||
|
||||
var exitCode int
|
||||
status := ps.Sys().(syscall.WaitStatus)
|
||||
if status.Exited() {
|
||||
exitCode = status.ExitStatus()
|
||||
} else if status.Signaled() {
|
||||
exitCode = -int(status.Signal())
|
||||
} else {
|
||||
t.Fatalf("Unrecognized exit reason!")
|
||||
}
|
||||
|
||||
if exitCode == 0 {
|
||||
t.Fatalf("Getcwd should fail with negative exit code, instead got %d!", exitCode)
|
||||
}
|
||||
|
||||
expected := "pwd: getcwd: Operation not permitted"
|
||||
actual := strings.Trim(buffers.Stderr.String(), "\n")
|
||||
if actual != expected {
|
||||
t.Fatalf("Expected output %s but got %s\n", expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeccompPermitWriteConditional(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
|
||||
rootfs, err := newRootfs()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer remove(rootfs)
|
||||
|
||||
config := newTemplateConfig(rootfs)
|
||||
config.Seccomp = &configs.Seccomp{
|
||||
DefaultAction: configs.Allow,
|
||||
Syscalls: []*configs.Syscall{
|
||||
{
|
||||
Name: "write",
|
||||
Action: configs.Errno,
|
||||
Args: []*configs.Arg{
|
||||
{
|
||||
Index: 0,
|
||||
Value: 1,
|
||||
Op: configs.GreaterThan,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
container, err := newContainer(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer container.Destroy()
|
||||
|
||||
buffers := newStdBuffers()
|
||||
dmesg := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{"busybox", "ls", "/"},
|
||||
Env: standardEnvironment,
|
||||
Stdin: buffers.Stdin,
|
||||
Stdout: buffers.Stdout,
|
||||
Stderr: buffers.Stderr,
|
||||
}
|
||||
|
||||
err = container.Start(dmesg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := dmesg.Wait(); err != nil {
|
||||
t.Fatalf("%s: %s", err, buffers.Stderr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeccompDenyWriteConditional(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
|
||||
// Only test if library version is v2.2.1 or higher
|
||||
// Conditional filtering will always error in v2.2.0 and lower
|
||||
major, minor, micro := libseccomp.GetLibraryVersion()
|
||||
if (major == 2 && minor < 2) || (major == 2 && minor == 2 && micro < 1) {
|
||||
return
|
||||
}
|
||||
|
||||
rootfs, err := newRootfs()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer remove(rootfs)
|
||||
|
||||
config := newTemplateConfig(rootfs)
|
||||
config.Seccomp = &configs.Seccomp{
|
||||
DefaultAction: configs.Allow,
|
||||
Syscalls: []*configs.Syscall{
|
||||
{
|
||||
Name: "write",
|
||||
Action: configs.Errno,
|
||||
Args: []*configs.Arg{
|
||||
{
|
||||
Index: 0,
|
||||
Value: 1,
|
||||
Op: configs.GreaterThan,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
container, err := newContainer(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer container.Destroy()
|
||||
|
||||
buffers := newStdBuffers()
|
||||
dmesg := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: []string{"busybox", "ls", "does_not_exist"},
|
||||
Env: standardEnvironment,
|
||||
Stdin: buffers.Stdin,
|
||||
Stdout: buffers.Stdout,
|
||||
Stderr: buffers.Stderr,
|
||||
}
|
||||
|
||||
err = container.Start(dmesg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ps, err := dmesg.Wait()
|
||||
if err == nil {
|
||||
t.Fatal("Expecting negative return, instead got 0!")
|
||||
}
|
||||
|
||||
var exitCode int
|
||||
status := ps.Sys().(syscall.WaitStatus)
|
||||
if status.Exited() {
|
||||
exitCode = status.ExitStatus()
|
||||
} else if status.Signaled() {
|
||||
exitCode = -int(status.Signal())
|
||||
} else {
|
||||
t.Fatalf("Unrecognized exit reason!")
|
||||
}
|
||||
|
||||
if exitCode == 0 {
|
||||
t.Fatalf("Busybox should fail with negative exit code, instead got %d!", exitCode)
|
||||
}
|
||||
|
||||
// We're denying write to stderr, so we expect an empty buffer
|
||||
expected := ""
|
||||
actual := strings.Trim(buffers.Stderr.String(), "\n")
|
||||
if actual != expected {
|
||||
t.Fatalf("Expected output %s but got %s\n", expected, actual)
|
||||
}
|
||||
}
|
|
@ -1,120 +0,0 @@
|
|||
package integration
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
var standardEnvironment = []string{
|
||||
"HOME=/root",
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"HOSTNAME=integration",
|
||||
"TERM=xterm",
|
||||
}
|
||||
|
||||
const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV
|
||||
|
||||
// newTemplateConfig returns a base template for running a container
|
||||
//
|
||||
// it uses a network strategy of just setting a loopback interface
|
||||
// and the default setup for devices
|
||||
func newTemplateConfig(rootfs string) *configs.Config {
|
||||
return &configs.Config{
|
||||
Rootfs: rootfs,
|
||||
Capabilities: []string{
|
||||
"CAP_CHOWN",
|
||||
"CAP_DAC_OVERRIDE",
|
||||
"CAP_FSETID",
|
||||
"CAP_FOWNER",
|
||||
"CAP_MKNOD",
|
||||
"CAP_NET_RAW",
|
||||
"CAP_SETGID",
|
||||
"CAP_SETUID",
|
||||
"CAP_SETFCAP",
|
||||
"CAP_SETPCAP",
|
||||
"CAP_NET_BIND_SERVICE",
|
||||
"CAP_SYS_CHROOT",
|
||||
"CAP_KILL",
|
||||
"CAP_AUDIT_WRITE",
|
||||
},
|
||||
Namespaces: configs.Namespaces([]configs.Namespace{
|
||||
{Type: configs.NEWNS},
|
||||
{Type: configs.NEWUTS},
|
||||
{Type: configs.NEWIPC},
|
||||
{Type: configs.NEWPID},
|
||||
{Type: configs.NEWNET},
|
||||
}),
|
||||
Cgroups: &configs.Cgroup{
|
||||
Path: "integration/test",
|
||||
Resources: &configs.Resources{
|
||||
MemorySwappiness: -1,
|
||||
AllowAllDevices: false,
|
||||
AllowedDevices: configs.DefaultAllowedDevices,
|
||||
},
|
||||
},
|
||||
MaskPaths: []string{
|
||||
"/proc/kcore",
|
||||
},
|
||||
ReadonlyPaths: []string{
|
||||
"/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
|
||||
},
|
||||
Devices: configs.DefaultAutoCreatedDevices,
|
||||
Hostname: "integration",
|
||||
Mounts: []*configs.Mount{
|
||||
{
|
||||
Source: "proc",
|
||||
Destination: "/proc",
|
||||
Device: "proc",
|
||||
Flags: defaultMountFlags,
|
||||
},
|
||||
{
|
||||
Source: "tmpfs",
|
||||
Destination: "/dev",
|
||||
Device: "tmpfs",
|
||||
Flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME,
|
||||
Data: "mode=755",
|
||||
},
|
||||
{
|
||||
Source: "devpts",
|
||||
Destination: "/dev/pts",
|
||||
Device: "devpts",
|
||||
Flags: syscall.MS_NOSUID | syscall.MS_NOEXEC,
|
||||
Data: "newinstance,ptmxmode=0666,mode=0620,gid=5",
|
||||
},
|
||||
{
|
||||
Device: "tmpfs",
|
||||
Source: "shm",
|
||||
Destination: "/dev/shm",
|
||||
Data: "mode=1777,size=65536k",
|
||||
Flags: defaultMountFlags,
|
||||
},
|
||||
{
|
||||
Source: "mqueue",
|
||||
Destination: "/dev/mqueue",
|
||||
Device: "mqueue",
|
||||
Flags: defaultMountFlags,
|
||||
},
|
||||
{
|
||||
Source: "sysfs",
|
||||
Destination: "/sys",
|
||||
Device: "sysfs",
|
||||
Flags: defaultMountFlags | syscall.MS_RDONLY,
|
||||
},
|
||||
},
|
||||
Networks: []*configs.Network{
|
||||
{
|
||||
Type: "loopback",
|
||||
Address: "127.0.0.1/0",
|
||||
Gateway: "localhost",
|
||||
},
|
||||
},
|
||||
Rlimits: []configs.Rlimit{
|
||||
{
|
||||
Type: syscall.RLIMIT_NOFILE,
|
||||
Hard: uint64(1025),
|
||||
Soft: uint64(1025),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
|
@ -1,141 +0,0 @@
|
|||
package integration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
func newStdBuffers() *stdBuffers {
|
||||
return &stdBuffers{
|
||||
Stdin: bytes.NewBuffer(nil),
|
||||
Stdout: bytes.NewBuffer(nil),
|
||||
Stderr: bytes.NewBuffer(nil),
|
||||
}
|
||||
}
|
||||
|
||||
type stdBuffers struct {
|
||||
Stdin *bytes.Buffer
|
||||
Stdout *bytes.Buffer
|
||||
Stderr *bytes.Buffer
|
||||
}
|
||||
|
||||
func (b *stdBuffers) String() string {
|
||||
s := []string{}
|
||||
if b.Stderr != nil {
|
||||
s = append(s, b.Stderr.String())
|
||||
}
|
||||
if b.Stdout != nil {
|
||||
s = append(s, b.Stdout.String())
|
||||
}
|
||||
return strings.Join(s, "|")
|
||||
}
|
||||
|
||||
// ok fails the test if an err is not nil.
|
||||
func ok(t testing.TB, err error) {
|
||||
if err != nil {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
t.Fatalf("%s:%d: unexpected error: %s\n\n", filepath.Base(file), line, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func waitProcess(p *libcontainer.Process, t *testing.T) {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
status, err := p.Wait()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("%s:%d: unexpected error: %s\n\n", filepath.Base(file), line, err.Error())
|
||||
}
|
||||
|
||||
if !status.Success() {
|
||||
t.Fatalf("%s:%d: unexpected status: %s\n\n", filepath.Base(file), line, status.String())
|
||||
}
|
||||
}
|
||||
|
||||
// newRootfs creates a new tmp directory and copies the busybox root filesystem
|
||||
func newRootfs() (string, error) {
|
||||
dir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := copyBusybox(dir); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
func remove(dir string) {
|
||||
os.RemoveAll(dir)
|
||||
}
|
||||
|
||||
// copyBusybox copies the rootfs for a busybox container created for the test image
|
||||
// into the new directory for the specific test
|
||||
func copyBusybox(dest string) error {
|
||||
out, err := exec.Command("sh", "-c", fmt.Sprintf("cp -R /busybox/* %s/", dest)).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("copy error %q: %q", err, out)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newContainer(config *configs.Config) (libcontainer.Container, error) {
|
||||
f := factory
|
||||
|
||||
if config.Cgroups != nil && config.Cgroups.Parent == "system.slice" {
|
||||
f = systemdFactory
|
||||
}
|
||||
|
||||
return f.Create("testCT", config)
|
||||
}
|
||||
|
||||
// runContainer runs the container with the specific config and arguments
|
||||
//
|
||||
// buffers are returned containing the STDOUT and STDERR output for the run
|
||||
// along with the exit code and any go error
|
||||
func runContainer(config *configs.Config, console string, args ...string) (buffers *stdBuffers, exitCode int, err error) {
|
||||
container, err := newContainer(config)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
defer container.Destroy()
|
||||
buffers = newStdBuffers()
|
||||
process := &libcontainer.Process{
|
||||
Cwd: "/",
|
||||
Args: args,
|
||||
Env: standardEnvironment,
|
||||
Stdin: buffers.Stdin,
|
||||
Stdout: buffers.Stdout,
|
||||
Stderr: buffers.Stderr,
|
||||
}
|
||||
|
||||
err = container.Start(process)
|
||||
if err != nil {
|
||||
return buffers, -1, err
|
||||
}
|
||||
ps, err := process.Wait()
|
||||
if err != nil {
|
||||
return buffers, -1, err
|
||||
}
|
||||
status := ps.Sys().(syscall.WaitStatus)
|
||||
if status.Exited() {
|
||||
exitCode = status.ExitStatus()
|
||||
} else if status.Signaled() {
|
||||
exitCode = -int(status.Signal())
|
||||
} else {
|
||||
return buffers, -1, err
|
||||
}
|
||||
return
|
||||
}
|
|
@ -1,144 +0,0 @@
|
|||
// +build selinux,linux
|
||||
|
||||
package label
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/selinux"
|
||||
)
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
if selinux.SelinuxEnabled() {
|
||||
var testNull []string
|
||||
plabel, mlabel, err := InitLabels(testNull)
|
||||
if err != nil {
|
||||
t.Log("InitLabels Failed")
|
||||
t.Fatal(err)
|
||||
}
|
||||
testDisabled := []string{"disable"}
|
||||
plabel, mlabel, err = InitLabels(testDisabled)
|
||||
if err != nil {
|
||||
t.Log("InitLabels Disabled Failed")
|
||||
t.Fatal(err)
|
||||
}
|
||||
if plabel != "" {
|
||||
t.Log("InitLabels Disabled Failed")
|
||||
t.Fatal()
|
||||
}
|
||||
testUser := []string{"user:user_u", "role:user_r", "type:user_t", "level:s0:c1,c15"}
|
||||
plabel, mlabel, err = InitLabels(testUser)
|
||||
if err != nil {
|
||||
t.Log("InitLabels User Failed")
|
||||
t.Fatal(err)
|
||||
}
|
||||
if plabel != "user_u:user_r:user_t:s0:c1,c15" || mlabel != "user_u:object_r:svirt_sandbox_file_t:s0:c1,c15" {
|
||||
t.Log("InitLabels User Match Failed")
|
||||
t.Log(plabel, mlabel)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testBadData := []string{"user", "role:user_r", "type:user_t", "level:s0:c1,c15"}
|
||||
plabel, mlabel, err = InitLabels(testBadData)
|
||||
if err == nil {
|
||||
t.Log("InitLabels Bad Failed")
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestDuplicateLabel(t *testing.T) {
|
||||
secopt := DupSecOpt("system_u:system_r:svirt_lxc_net_t:s0:c1,c2")
|
||||
t.Log(secopt)
|
||||
for _, opt := range secopt {
|
||||
con := strings.SplitN(opt, ":", 3)
|
||||
if len(con) != 3 || con[0] != "label" {
|
||||
t.Errorf("Invalid DupSecOpt return value")
|
||||
continue
|
||||
}
|
||||
if con[1] == "user" {
|
||||
if con[2] != "system_u" {
|
||||
t.Errorf("DupSecOpt Failed user incorrect")
|
||||
}
|
||||
continue
|
||||
}
|
||||
if con[1] == "role" {
|
||||
if con[2] != "system_r" {
|
||||
t.Errorf("DupSecOpt Failed role incorrect")
|
||||
}
|
||||
continue
|
||||
}
|
||||
if con[1] == "type" {
|
||||
if con[2] != "svirt_lxc_net_t" {
|
||||
t.Errorf("DupSecOpt Failed type incorrect")
|
||||
}
|
||||
continue
|
||||
}
|
||||
if con[1] == "level" {
|
||||
if con[2] != "s0:c1,c2" {
|
||||
t.Errorf("DupSecOpt Failed level incorrect")
|
||||
}
|
||||
continue
|
||||
}
|
||||
t.Errorf("DupSecOpt Failed invalid field %q", con[1])
|
||||
}
|
||||
secopt = DisableSecOpt()
|
||||
if secopt[0] != "label:disable" {
|
||||
t.Errorf("DisableSecOpt Failed level incorrect")
|
||||
}
|
||||
}
|
||||
func TestRelabel(t *testing.T) {
|
||||
testdir := "/tmp/test"
|
||||
if err := os.Mkdir(testdir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(testdir)
|
||||
label := "system_u:system_r:svirt_sandbox_file_t:s0:c1,c2"
|
||||
if err := Relabel(testdir, "", true); err != nil {
|
||||
t.Fatal("Relabel with no label failed: %v", err)
|
||||
}
|
||||
if err := Relabel(testdir, label, true); err != nil {
|
||||
t.Fatal("Relabel shared failed: %v", err)
|
||||
}
|
||||
if err := Relabel(testdir, label, false); err != nil {
|
||||
t.Fatal("Relabel unshared failed: %v", err)
|
||||
}
|
||||
if err := Relabel("/etc", label, false); err == nil {
|
||||
t.Fatal("Relabel /etc succeeded")
|
||||
}
|
||||
if err := Relabel("/", label, false); err == nil {
|
||||
t.Fatal("Relabel / succeeded")
|
||||
}
|
||||
if err := Relabel("/usr", label, false); err == nil {
|
||||
t.Fatal("Relabel /usr succeeded")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidate(t *testing.T) {
|
||||
if err := Validate("zZ"); err != ErrIncompatibleLabel {
|
||||
t.Fatalf("Expected incompatible error, got %v", err)
|
||||
}
|
||||
if err := Validate("Z"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := Validate("z"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := Validate(""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsShared(t *testing.T) {
|
||||
if shared := IsShared("Z"); shared {
|
||||
t.Fatal("Expected label `Z` to not be shared, got %v", shared)
|
||||
}
|
||||
if shared := IsShared("z"); !shared {
|
||||
t.Fatal("Expected label `z` to be shared, got %v", shared)
|
||||
}
|
||||
if shared := IsShared("Zz"); !shared {
|
||||
t.Fatal("Expected label `Zz` to be shared, got %v", shared)
|
||||
}
|
||||
|
||||
}
|
|
@ -12,8 +12,12 @@ import (
|
|||
// The number is randomly chosen to not conflict with known netlink types
|
||||
const (
|
||||
InitMsg uint16 = 62000
|
||||
PidAttr uint16 = 27281
|
||||
CloneFlagsAttr uint16 = 27281
|
||||
ConsolePathAttr uint16 = 27282
|
||||
NsPathsAttr uint16 = 27283
|
||||
UidmapAttr uint16 = 27284
|
||||
GidmapAttr uint16 = 27285
|
||||
SetgroupAttr uint16 = 27286
|
||||
// When syscall.NLA_HDRLEN is in gccgo, take this out.
|
||||
syscall_NLA_HDRLEN = (syscall.SizeofNlAttr + syscall.NLA_ALIGNTO - 1) & ^(syscall.NLA_ALIGNTO - 1)
|
||||
)
|
||||
|
@ -60,3 +64,25 @@ func (msg *Bytemsg) Serialize() []byte {
|
|||
func (msg *Bytemsg) Len() int {
|
||||
return syscall_NLA_HDRLEN + len(msg.Value) + 1 // null-terminated
|
||||
}
|
||||
|
||||
type Boolmsg struct {
|
||||
Type uint16
|
||||
Value bool
|
||||
}
|
||||
|
||||
func (msg *Boolmsg) Serialize() []byte {
|
||||
buf := make([]byte, msg.Len())
|
||||
native := nl.NativeEndian()
|
||||
native.PutUint16(buf[0:2], uint16(msg.Len()))
|
||||
native.PutUint16(buf[2:4], msg.Type)
|
||||
if msg.Value {
|
||||
buf[4] = 1
|
||||
} else {
|
||||
buf[4] = 0
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
func (msg *Boolmsg) Len() int {
|
||||
return syscall_NLA_HDRLEN + 1
|
||||
}
|
||||
|
|
|
@ -1,128 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type notifyFunc func(paths map[string]string) (<-chan struct{}, error)
|
||||
|
||||
func testMemoryNotification(t *testing.T, evName string, notify notifyFunc, targ string) {
|
||||
memoryPath, err := ioutil.TempDir("", "testmemnotification-"+evName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
evFile := filepath.Join(memoryPath, evName)
|
||||
eventPath := filepath.Join(memoryPath, "cgroup.event_control")
|
||||
if err := ioutil.WriteFile(evFile, []byte{}, 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(eventPath, []byte{}, 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
paths := map[string]string{
|
||||
"memory": memoryPath,
|
||||
}
|
||||
ch, err := notify(paths)
|
||||
if err != nil {
|
||||
t.Fatal("expected no error, got:", err)
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadFile(eventPath)
|
||||
if err != nil {
|
||||
t.Fatal("couldn't read event control file:", err)
|
||||
}
|
||||
|
||||
var eventFd, evFd int
|
||||
var arg string
|
||||
if targ != "" {
|
||||
_, err = fmt.Sscanf(string(data), "%d %d %s", &eventFd, &evFd, &arg)
|
||||
} else {
|
||||
_, err = fmt.Sscanf(string(data), "%d %d", &eventFd, &evFd)
|
||||
}
|
||||
if err != nil || arg != targ {
|
||||
t.Fatalf("invalid control data %q: %s", data, err)
|
||||
}
|
||||
|
||||
// re-open the eventfd
|
||||
efd, err := syscall.Dup(eventFd)
|
||||
if err != nil {
|
||||
t.Fatal("unable to reopen eventfd:", err)
|
||||
}
|
||||
defer syscall.Close(efd)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal("unable to dup event fd:", err)
|
||||
}
|
||||
|
||||
buf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(buf, 1)
|
||||
|
||||
if _, err := syscall.Write(efd, buf); err != nil {
|
||||
t.Fatal("unable to write to eventfd:", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ch:
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatal("no notification on channel after 100ms")
|
||||
}
|
||||
|
||||
// simulate what happens when a cgroup is destroyed by cleaning up and then
|
||||
// writing to the eventfd.
|
||||
if err := os.RemoveAll(memoryPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := syscall.Write(efd, buf); err != nil {
|
||||
t.Fatal("unable to write to eventfd:", err)
|
||||
}
|
||||
|
||||
// give things a moment to shut down
|
||||
select {
|
||||
case _, ok := <-ch:
|
||||
if ok {
|
||||
t.Fatal("expected no notification to be triggered")
|
||||
}
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
}
|
||||
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(evFd), syscall.F_GETFD, 0); err != syscall.EBADF {
|
||||
t.Error("expected event control to be closed")
|
||||
}
|
||||
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(eventFd), syscall.F_GETFD, 0); err != syscall.EBADF {
|
||||
t.Error("expected event fd to be closed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotifyOnOOM(t *testing.T) {
|
||||
f := func(paths map[string]string) (<-chan struct{}, error) {
|
||||
return notifyOnOOM(paths)
|
||||
}
|
||||
|
||||
testMemoryNotification(t, "memory.oom_control", f, "")
|
||||
}
|
||||
|
||||
func TestNotifyMemoryPressure(t *testing.T) {
|
||||
tests := map[PressureLevel]string{
|
||||
LowPressure: "low",
|
||||
MediumPressure: "medium",
|
||||
CriticalPressure: "critical",
|
||||
}
|
||||
|
||||
for level, arg := range tests {
|
||||
f := func(paths map[string]string) (<-chan struct{}, error) {
|
||||
return notifyMemoryPressure(paths, level)
|
||||
}
|
||||
|
||||
testMemoryNotification(t, "memory.pressure_level", f, arg)
|
||||
}
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
## nsenter
|
||||
|
||||
The `nsenter` package registers a special init constructor that is called before
|
||||
the Go runtime has a chance to boot. This provides us the ability to `setns` on
|
||||
existing namespaces and avoid the issues that the Go runtime has with multiple
|
||||
threads. This constructor will be called if this package is registered,
|
||||
imported, in your go application.
|
||||
|
||||
The `nsenter` package will `import "C"` and it uses [cgo](https://golang.org/cmd/cgo/)
|
||||
package. In cgo, if the import of "C" is immediately preceded by a comment, that comment,
|
||||
called the preamble, is used as a header when compiling the C parts of the package.
|
||||
So every time we import package `nsenter`, the C code function `nsexec()` would be
|
||||
called. And package `nsenter` is now only imported in Docker execdriver, so every time
|
||||
before we call `execdriver.Exec()`, that C code would run.
|
||||
|
||||
`nsexec()` will first check the environment variable `_LIBCONTAINER_INITPID`
|
||||
which will give the process of the container that should be joined. Namespaces fd will
|
||||
be found from `/proc/[pid]/ns` and set by `setns` syscall.
|
||||
|
||||
And then get the pipe number from `_LIBCONTAINER_INITPIPE`, error message could
|
||||
be transfered through it. If tty is added, `_LIBCONTAINER_CONSOLE_PATH` will
|
||||
have value and start a console for output.
|
||||
|
||||
Finally, `nsexec()` will clone a child process , exit the parent process and let
|
||||
the Go runtime take over.
|
|
@ -1,12 +0,0 @@
|
|||
// +build linux,!gccgo
|
||||
|
||||
package nsenter
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -Wall
|
||||
extern void nsexec();
|
||||
void __attribute__((constructor)) init(void) {
|
||||
nsexec();
|
||||
}
|
||||
*/
|
||||
import "C"
|
|
@ -1,25 +0,0 @@
|
|||
// +build linux,gccgo
|
||||
|
||||
package nsenter
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -Wall
|
||||
extern void nsexec();
|
||||
void __attribute__((constructor)) init(void) {
|
||||
nsexec();
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// AlwaysFalse is here to stay false
|
||||
// (and be exported so the compiler doesn't optimize out its reference)
|
||||
var AlwaysFalse bool
|
||||
|
||||
func init() {
|
||||
if AlwaysFalse {
|
||||
// by referencing this C init() in a noop test, it will ensure the compiler
|
||||
// links in the C function.
|
||||
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134
|
||||
C.init()
|
||||
}
|
||||
}
|
|
@ -1,143 +0,0 @@
|
|||
package nsenter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer"
|
||||
"github.com/vishvananda/netlink/nl"
|
||||
)
|
||||
|
||||
type pid struct {
|
||||
Pid int `json:"Pid"`
|
||||
}
|
||||
|
||||
func TestNsenterAlivePid(t *testing.T) {
|
||||
args := []string{"nsenter-exec"}
|
||||
parent, child, err := newPipe()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create pipe %v", err)
|
||||
}
|
||||
|
||||
cmd := &exec.Cmd{
|
||||
Path: os.Args[0],
|
||||
Args: args,
|
||||
ExtraFiles: []*os.File{child},
|
||||
Env: []string{"_LIBCONTAINER_INITTYPE=setns", "_LIBCONTAINER_INITPIPE=3"},
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
t.Fatalf("nsenter failed to start %v", err)
|
||||
}
|
||||
r := nl.NewNetlinkRequest(int(libcontainer.InitMsg), 0)
|
||||
r.AddData(&libcontainer.Int32msg{
|
||||
Type: libcontainer.PidAttr,
|
||||
Value: uint32(os.Getpid()),
|
||||
})
|
||||
if _, err := io.Copy(parent, bytes.NewReader(r.Serialize())); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
decoder := json.NewDecoder(parent)
|
||||
var pid *pid
|
||||
|
||||
if err := decoder.Decode(&pid); err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
t.Fatalf("nsenter exits with a non-zero exit status")
|
||||
}
|
||||
p, err := os.FindProcess(pid.Pid)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
p.Wait()
|
||||
}
|
||||
|
||||
func TestNsenterInvalidPid(t *testing.T) {
|
||||
args := []string{"nsenter-exec"}
|
||||
parent, child, err := newPipe()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create pipe %v", err)
|
||||
}
|
||||
|
||||
cmd := &exec.Cmd{
|
||||
Path: os.Args[0],
|
||||
Args: args,
|
||||
ExtraFiles: []*os.File{child},
|
||||
Env: []string{"_LIBCONTAINER_INITTYPE=setns", "_LIBCONTAINER_INITPIPE=3"},
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
t.Fatal("nsenter exits with a zero exit status")
|
||||
}
|
||||
r := nl.NewNetlinkRequest(int(libcontainer.InitMsg), 0)
|
||||
r.AddData(&libcontainer.Int32msg{
|
||||
Type: libcontainer.PidAttr,
|
||||
Value: 0,
|
||||
})
|
||||
if _, err := io.Copy(parent, bytes.NewReader(r.Serialize())); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cmd.Wait(); err == nil {
|
||||
t.Fatal("nsenter exits with a zero exit status")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNsenterDeadPid(t *testing.T) {
|
||||
deadCmd := exec.Command("true")
|
||||
if err := deadCmd.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
args := []string{"nsenter-exec"}
|
||||
parent, child, err := newPipe()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create pipe %v", err)
|
||||
}
|
||||
|
||||
cmd := &exec.Cmd{
|
||||
Path: os.Args[0],
|
||||
Args: args,
|
||||
ExtraFiles: []*os.File{child},
|
||||
Env: []string{"_LIBCONTAINER_INITTYPE=setns", "_LIBCONTAINER_INITPIPE=3"},
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
t.Fatal("nsenter exits with a zero exit status")
|
||||
}
|
||||
|
||||
r := nl.NewNetlinkRequest(int(libcontainer.InitMsg), 0)
|
||||
r.AddData(&libcontainer.Int32msg{
|
||||
Type: libcontainer.PidAttr,
|
||||
Value: uint32(deadCmd.Process.Pid),
|
||||
})
|
||||
if _, err := io.Copy(parent, bytes.NewReader(r.Serialize())); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cmd.Wait(); err == nil {
|
||||
t.Fatal("nsenter exits with a zero exit status")
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
if strings.HasPrefix(os.Args[0], "nsenter-") {
|
||||
os.Exit(0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func newPipe() (parent *os.File, child *os.File, err error) {
|
||||
fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return os.NewFile(uintptr(fds[1]), "parent"), os.NewFile(uintptr(fds[0]), "child"), nil
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
// +build !linux !cgo
|
||||
|
||||
package nsenter
|
||||
|
||||
import "C"
|
|
@ -1,261 +0,0 @@
|
|||
#define _GNU_SOURCE
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <linux/limits.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <fcntl.h>
|
||||
#include <signal.h>
|
||||
#include <setjmp.h>
|
||||
#include <sched.h>
|
||||
#include <signal.h>
|
||||
|
||||
#include <bits/sockaddr.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/types.h>
|
||||
#include <stdint.h>
|
||||
#include <sys/socket.h>
|
||||
|
||||
/* All arguments should be above stack, because it grows down */
|
||||
struct clone_arg {
|
||||
/*
|
||||
* Reserve some space for clone() to locate arguments
|
||||
* and retcode in this place
|
||||
*/
|
||||
char stack[4096] __attribute__ ((aligned(16)));
|
||||
char stack_ptr[0];
|
||||
jmp_buf *env;
|
||||
};
|
||||
|
||||
#define pr_perror(fmt, ...) fprintf(stderr, "nsenter: " fmt ": %m\n", ##__VA_ARGS__)
|
||||
|
||||
static int child_func(void *_arg)
|
||||
{
|
||||
struct clone_arg *arg = (struct clone_arg *)_arg;
|
||||
longjmp(*arg->env, 1);
|
||||
}
|
||||
|
||||
// Use raw setns syscall for versions of glibc that don't include it (namely glibc-2.12)
|
||||
#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 14
|
||||
#define _GNU_SOURCE
|
||||
#include "syscall.h"
|
||||
#if defined(__NR_setns) && !defined(SYS_setns)
|
||||
#define SYS_setns __NR_setns
|
||||
#endif
|
||||
#ifdef SYS_setns
|
||||
int setns(int fd, int nstype)
|
||||
{
|
||||
return syscall(SYS_setns, fd, nstype);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
static int clone_parent(jmp_buf * env) __attribute__ ((noinline));
|
||||
static int clone_parent(jmp_buf * env)
|
||||
{
|
||||
struct clone_arg ca;
|
||||
int child;
|
||||
|
||||
ca.env = env;
|
||||
child = clone(child_func, ca.stack_ptr, CLONE_PARENT | SIGCHLD, &ca);
|
||||
|
||||
return child;
|
||||
}
|
||||
|
||||
static uint32_t readint32(char *buf)
|
||||
{
|
||||
return *(uint32_t *) buf;
|
||||
}
|
||||
|
||||
// list of known message types we want to send to bootstrap program
|
||||
// These are defined in libcontainer/message_linux.go
|
||||
#define INIT_MSG 62000
|
||||
#define PID_ATTR 27281
|
||||
#define CONSOLE_PATH_ATTR 27282
|
||||
|
||||
void nsexec()
|
||||
{
|
||||
char *namespaces[] = { "ipc", "uts", "net", "pid", "mnt", "user" };
|
||||
const int num = sizeof(namespaces) / sizeof(char *);
|
||||
jmp_buf env;
|
||||
char buf[PATH_MAX], *val;
|
||||
int i, tfd, self_tfd, child, n, len, pipenum, consolefd = -1;
|
||||
pid_t pid = 0;
|
||||
|
||||
// if we dont have INITTYPE or this is the init process, skip the bootstrap process
|
||||
val = getenv("_LIBCONTAINER_INITTYPE");
|
||||
if (val == NULL || strcmp(val, "standard") == 0) {
|
||||
return;
|
||||
}
|
||||
if (strcmp(val, "setns") != 0) {
|
||||
pr_perror("Invalid inittype %s", val);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
val = getenv("_LIBCONTAINER_INITPIPE");
|
||||
if (val == NULL) {
|
||||
pr_perror("Child pipe not found");
|
||||
exit(1);
|
||||
}
|
||||
pipenum = atoi(val);
|
||||
snprintf(buf, sizeof(buf), "%d", pipenum);
|
||||
if (strcmp(val, buf)) {
|
||||
pr_perror("Unable to parse _LIBCONTAINER_INITPIPE");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
char nlbuf[NLMSG_HDRLEN];
|
||||
struct nlmsghdr *nh;
|
||||
if ((n = read(pipenum, nlbuf, NLMSG_HDRLEN)) != NLMSG_HDRLEN) {
|
||||
pr_perror("Failed to read netlink header, got %d", n);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
nh = (struct nlmsghdr *)nlbuf;
|
||||
if (nh->nlmsg_type == NLMSG_ERROR) {
|
||||
pr_perror("Invalid netlink header message");
|
||||
exit(1);
|
||||
}
|
||||
if (nh->nlmsg_type != INIT_MSG) {
|
||||
pr_perror("Unexpected netlink message type %d", nh->nlmsg_type);
|
||||
exit(1);
|
||||
}
|
||||
// read the netlink payload
|
||||
len = NLMSG_PAYLOAD(nh, 0);
|
||||
char data[len];
|
||||
if ((n = read(pipenum, data, len)) != len) {
|
||||
pr_perror("Failed to read netlink payload, got %d", n);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
int start = 0;
|
||||
struct nlattr *attr;
|
||||
while (start < len) {
|
||||
int payload_len;
|
||||
attr = (struct nlattr *)((void *)data + start);
|
||||
start += NLA_HDRLEN;
|
||||
payload_len = attr->nla_len - NLA_HDRLEN;
|
||||
switch (attr->nla_type) {
|
||||
case PID_ATTR:
|
||||
pid = (pid_t) readint32(data + start);
|
||||
break;
|
||||
case CONSOLE_PATH_ATTR:
|
||||
consolefd = open((char *)data + start, O_RDWR);
|
||||
if (consolefd < 0) {
|
||||
pr_perror("Failed to open console %s", (char *)data + start);
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
}
|
||||
start += NLA_ALIGN(payload_len);
|
||||
}
|
||||
|
||||
// required pid to be passed
|
||||
if (pid == 0) {
|
||||
pr_perror("missing pid");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* Check that the specified process exists */
|
||||
snprintf(buf, PATH_MAX - 1, "/proc/%d/ns", pid);
|
||||
tfd = open(buf, O_DIRECTORY | O_RDONLY);
|
||||
if (tfd == -1) {
|
||||
pr_perror("Failed to open \"%s\"", buf);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
self_tfd = open("/proc/self/ns", O_DIRECTORY | O_RDONLY);
|
||||
if (self_tfd == -1) {
|
||||
pr_perror("Failed to open /proc/self/ns");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
struct stat st;
|
||||
struct stat self_st;
|
||||
int fd;
|
||||
|
||||
/* Symlinks on all namespaces exist for dead processes, but they can't be opened */
|
||||
if (fstatat(tfd, namespaces[i], &st, 0) == -1) {
|
||||
// Ignore nonexistent namespaces.
|
||||
if (errno == ENOENT)
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Skip namespaces we're already part of */
|
||||
if (fstatat(self_tfd, namespaces[i], &self_st, 0) != -1 && st.st_ino == self_st.st_ino) {
|
||||
continue;
|
||||
}
|
||||
|
||||
fd = openat(tfd, namespaces[i], O_RDONLY);
|
||||
if (fd == -1) {
|
||||
pr_perror("Failed to open ns file %s for ns %s", buf, namespaces[i]);
|
||||
exit(1);
|
||||
}
|
||||
// Set the namespace.
|
||||
if (setns(fd, 0) == -1) {
|
||||
pr_perror("Failed to setns for %s", namespaces[i]);
|
||||
exit(1);
|
||||
}
|
||||
close(fd);
|
||||
}
|
||||
|
||||
close(self_tfd);
|
||||
close(tfd);
|
||||
|
||||
if (setjmp(env) == 1) {
|
||||
// Child
|
||||
|
||||
if (setsid() == -1) {
|
||||
pr_perror("setsid failed");
|
||||
exit(1);
|
||||
}
|
||||
if (consolefd != -1) {
|
||||
if (ioctl(consolefd, TIOCSCTTY, 0) == -1) {
|
||||
pr_perror("ioctl TIOCSCTTY failed");
|
||||
exit(1);
|
||||
}
|
||||
if (dup3(consolefd, STDIN_FILENO, 0) != STDIN_FILENO) {
|
||||
pr_perror("Failed to dup 0");
|
||||
exit(1);
|
||||
}
|
||||
if (dup3(consolefd, STDOUT_FILENO, 0) != STDOUT_FILENO) {
|
||||
pr_perror("Failed to dup 1");
|
||||
exit(1);
|
||||
}
|
||||
if (dup3(consolefd, STDERR_FILENO, 0) != STDERR_FILENO) {
|
||||
pr_perror("Failed to dup 2");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
// Finish executing, let the Go runtime take over.
|
||||
return;
|
||||
}
|
||||
// Parent
|
||||
|
||||
// We must fork to actually enter the PID namespace, use CLONE_PARENT
|
||||
// so the child can have the right parent, and we don't need to forward
|
||||
// the child's exit code or resend its death signal.
|
||||
child = clone_parent(&env);
|
||||
if (child < 0) {
|
||||
pr_perror("Unable to fork");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
len = snprintf(buf, sizeof(buf), "{ \"pid\" : %d }\n", child);
|
||||
|
||||
if (write(pipenum, buf, len) != len) {
|
||||
pr_perror("Unable to send a child pid");
|
||||
kill(child, SIGKILL);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
|
@ -48,6 +48,16 @@ type Process struct {
|
|||
// All capabilities not specified will be dropped from the processes capability mask
|
||||
Capabilities []string
|
||||
|
||||
// AppArmorProfile specifies the profile to apply to the process and is
|
||||
// changed at the time the process is execed
|
||||
AppArmorProfile string
|
||||
|
||||
// Label specifies the label to apply to the process. It is commonly used by selinux
|
||||
Label string
|
||||
|
||||
// NoNewPrivileges controls whether processes can gain additional privileges.
|
||||
NoNewPrivileges *bool
|
||||
|
||||
ops processOperations
|
||||
}
|
||||
|
||||
|
|
|
@ -88,6 +88,10 @@ func (p *setnsProcess) start() (err error) {
|
|||
if err := utils.WriteJSON(p.parentPipe, p.config); err != nil {
|
||||
return newSystemError(err)
|
||||
}
|
||||
// set oom_score_adj
|
||||
if err := setOomScoreAdj(p.config.Config.OomScoreAdj, p.pid()); err != nil {
|
||||
return newSystemError(err)
|
||||
}
|
||||
|
||||
if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil {
|
||||
return newSystemError(err)
|
||||
|
@ -167,14 +171,16 @@ func (p *setnsProcess) setExternalDescriptors(newFds []string) {
|
|||
}
|
||||
|
||||
type initProcess struct {
|
||||
cmd *exec.Cmd
|
||||
parentPipe *os.File
|
||||
childPipe *os.File
|
||||
config *initConfig
|
||||
manager cgroups.Manager
|
||||
container *linuxContainer
|
||||
fds []string
|
||||
process *Process
|
||||
cmd *exec.Cmd
|
||||
parentPipe *os.File
|
||||
childPipe *os.File
|
||||
config *initConfig
|
||||
manager cgroups.Manager
|
||||
container *linuxContainer
|
||||
fds []string
|
||||
process *Process
|
||||
bootstrapData io.Reader
|
||||
sharePidns bool
|
||||
}
|
||||
|
||||
func (p *initProcess) pid() int {
|
||||
|
@ -185,15 +191,49 @@ func (p *initProcess) externalDescriptors() []string {
|
|||
return p.fds
|
||||
}
|
||||
|
||||
func (p *initProcess) start() (err error) {
|
||||
// execSetns runs the process that executes C code to perform the setns calls
|
||||
// because setns support requires the C process to fork off a child and perform the setns
|
||||
// before the go runtime boots, we wait on the process to die and receive the child's pid
|
||||
// over the provided pipe.
|
||||
// This is called by initProcess.start function
|
||||
func (p *initProcess) execSetns() error {
|
||||
status, err := p.cmd.Process.Wait()
|
||||
if err != nil {
|
||||
p.cmd.Wait()
|
||||
return err
|
||||
}
|
||||
if !status.Success() {
|
||||
p.cmd.Wait()
|
||||
return &exec.ExitError{ProcessState: status}
|
||||
}
|
||||
var pid *pid
|
||||
if err := json.NewDecoder(p.parentPipe).Decode(&pid); err != nil {
|
||||
p.cmd.Wait()
|
||||
return err
|
||||
}
|
||||
process, err := os.FindProcess(pid.Pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.cmd.Process = process
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *initProcess) start() error {
|
||||
defer p.parentPipe.Close()
|
||||
err = p.cmd.Start()
|
||||
err := p.cmd.Start()
|
||||
p.process.ops = p
|
||||
p.childPipe.Close()
|
||||
if err != nil {
|
||||
p.process.ops = nil
|
||||
return newSystemError(err)
|
||||
}
|
||||
if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.execSetns(); err != nil {
|
||||
return newSystemError(err)
|
||||
}
|
||||
// Save the standard descriptor names before the container process
|
||||
// can potentially move them (e.g., via dup2()). If we don't do this now,
|
||||
// we won't know at checkpoint time which file descriptor to look up.
|
||||
|
@ -213,19 +253,6 @@ func (p *initProcess) start() (err error) {
|
|||
p.manager.Destroy()
|
||||
}
|
||||
}()
|
||||
if p.config.Config.Hooks != nil {
|
||||
s := configs.HookState{
|
||||
Version: p.container.config.Version,
|
||||
ID: p.container.id,
|
||||
Pid: p.pid(),
|
||||
Root: p.config.Config.Rootfs,
|
||||
}
|
||||
for _, hook := range p.config.Config.Hooks.Prestart {
|
||||
if err := hook.Run(s); err != nil {
|
||||
return newSystemError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := p.createNetworkInterfaces(); err != nil {
|
||||
return newSystemError(err)
|
||||
}
|
||||
|
@ -233,35 +260,74 @@ func (p *initProcess) start() (err error) {
|
|||
return newSystemError(err)
|
||||
}
|
||||
var (
|
||||
procSync syncT
|
||||
sentRun bool
|
||||
ierr *genericError
|
||||
procSync syncT
|
||||
sentRun bool
|
||||
sentResume bool
|
||||
ierr *genericError
|
||||
)
|
||||
|
||||
dec := json.NewDecoder(p.parentPipe)
|
||||
loop:
|
||||
for {
|
||||
if err := json.NewDecoder(p.parentPipe).Decode(&procSync); err != nil {
|
||||
if err := dec.Decode(&procSync); err != nil {
|
||||
if err == io.EOF {
|
||||
break loop
|
||||
}
|
||||
return newSystemError(err)
|
||||
}
|
||||
switch procSync.Type {
|
||||
case procStart:
|
||||
break loop
|
||||
case procReady:
|
||||
if err := p.manager.Set(p.config.Config); err != nil {
|
||||
return newSystemError(err)
|
||||
}
|
||||
// set oom_score_adj
|
||||
if err := setOomScoreAdj(p.config.Config.OomScoreAdj, p.pid()); err != nil {
|
||||
return newSystemError(err)
|
||||
}
|
||||
// call prestart hooks
|
||||
if !p.config.Config.Namespaces.Contains(configs.NEWNS) {
|
||||
if p.config.Config.Hooks != nil {
|
||||
s := configs.HookState{
|
||||
Version: p.container.config.Version,
|
||||
ID: p.container.id,
|
||||
Pid: p.pid(),
|
||||
Root: p.config.Config.Rootfs,
|
||||
}
|
||||
for _, hook := range p.config.Config.Hooks.Prestart {
|
||||
if err := hook.Run(s); err != nil {
|
||||
return newSystemError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Sync with child.
|
||||
if err := utils.WriteJSON(p.parentPipe, syncT{procRun}); err != nil {
|
||||
return newSystemError(err)
|
||||
}
|
||||
sentRun = true
|
||||
case procHooks:
|
||||
if p.config.Config.Hooks != nil {
|
||||
s := configs.HookState{
|
||||
Version: p.container.config.Version,
|
||||
ID: p.container.id,
|
||||
Pid: p.pid(),
|
||||
Root: p.config.Config.Rootfs,
|
||||
}
|
||||
for _, hook := range p.config.Config.Hooks.Prestart {
|
||||
if err := hook.Run(s); err != nil {
|
||||
return newSystemError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Sync with child.
|
||||
if err := utils.WriteJSON(p.parentPipe, syncT{procResume}); err != nil {
|
||||
return newSystemError(err)
|
||||
}
|
||||
sentResume = true
|
||||
case procError:
|
||||
// wait for the child process to fully complete and receive an error message
|
||||
// if one was encoutered
|
||||
if err := json.NewDecoder(p.parentPipe).Decode(&ierr); err != nil && err != io.EOF {
|
||||
if err := dec.Decode(&ierr); err != nil && err != io.EOF {
|
||||
return newSystemError(err)
|
||||
}
|
||||
if ierr != nil {
|
||||
|
@ -276,6 +342,9 @@ loop:
|
|||
if !sentRun {
|
||||
return newSystemError(fmt.Errorf("could not synchronise with container process"))
|
||||
}
|
||||
if p.config.Config.Namespaces.Contains(configs.NEWNS) && !sentResume {
|
||||
return newSystemError(fmt.Errorf("could not synchronise after executing prestart hooks with container process"))
|
||||
}
|
||||
if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil {
|
||||
return newSystemError(err)
|
||||
}
|
||||
|
@ -293,7 +362,7 @@ func (p *initProcess) wait() (*os.ProcessState, error) {
|
|||
return p.cmd.ProcessState, err
|
||||
}
|
||||
// we should kill all processes in cgroup when init is died if we use host PID namespace
|
||||
if p.cmd.SysProcAttr.Cloneflags&syscall.CLONE_NEWPID == 0 {
|
||||
if p.sharePidns {
|
||||
killCgroupProcesses(p.manager)
|
||||
}
|
||||
return p.cmd.ProcessState, nil
|
||||
|
|
|
@ -4,6 +4,7 @@ package libcontainer
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
@ -26,7 +27,7 @@ const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NOD
|
|||
|
||||
// setupRootfs sets up the devices, mount points, and filesystems for use inside a
|
||||
// new mount namespace.
|
||||
func setupRootfs(config *configs.Config, console *linuxConsole) (err error) {
|
||||
func setupRootfs(config *configs.Config, console *linuxConsole, pipe io.ReadWriter) (err error) {
|
||||
if err := prepareRoot(config); err != nil {
|
||||
return newSystemError(err)
|
||||
}
|
||||
|
@ -59,6 +60,13 @@ func setupRootfs(config *configs.Config, console *linuxConsole) (err error) {
|
|||
return newSystemError(err)
|
||||
}
|
||||
}
|
||||
// Signal the parent to run the pre-start hooks.
|
||||
// The hooks are run after the mounts are setup, but before we switch to the new
|
||||
// root, so that the old root is still available in the hooks for any mount
|
||||
// manipulations.
|
||||
if err := syncParentHooks(pipe); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := syscall.Chdir(config.Rootfs); err != nil {
|
||||
return newSystemError(err)
|
||||
}
|
||||
|
@ -75,6 +83,18 @@ func setupRootfs(config *configs.Config, console *linuxConsole) (err error) {
|
|||
return newSystemError(err)
|
||||
}
|
||||
}
|
||||
// remount dev as ro if specifed
|
||||
for _, m := range config.Mounts {
|
||||
if m.Destination == "/dev" {
|
||||
if m.Flags&syscall.MS_RDONLY != 0 {
|
||||
if err := remountReadonly(m.Destination); err != nil {
|
||||
return newSystemError(err)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
// set rootfs ( / ) as readonly
|
||||
if config.Readonlyfs {
|
||||
if err := setReadonly(); err != nil {
|
||||
return newSystemError(err)
|
||||
|
@ -138,16 +158,6 @@ func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error {
|
|||
}
|
||||
}
|
||||
return nil
|
||||
case "devpts":
|
||||
if err := os.MkdirAll(dest, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
return mountPropagate(m, rootfs, mountLabel)
|
||||
case "securityfs":
|
||||
if err := os.MkdirAll(dest, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
return mountPropagate(m, rootfs, mountLabel)
|
||||
case "bind":
|
||||
stat, err := os.Stat(m.Source)
|
||||
if err != nil {
|
||||
|
@ -253,7 +263,10 @@ func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error {
|
|||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown mount device %q to %q", m.Device, m.Destination)
|
||||
if err := os.MkdirAll(dest, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
return mountPropagate(m, rootfs, mountLabel)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -308,7 +321,8 @@ func checkMountDestination(rootfs, dest string) error {
|
|||
"/proc/cpuinfo",
|
||||
"/proc/diskstats",
|
||||
"/proc/meminfo",
|
||||
"/proc/stats",
|
||||
"/proc/stat",
|
||||
"/proc/net/dev",
|
||||
}
|
||||
for _, valid := range validDestinations {
|
||||
path, err := filepath.Rel(filepath.Join(rootfs, valid), dest)
|
||||
|
@ -341,7 +355,7 @@ func setupDevSymlinks(rootfs string) error {
|
|||
// kcore support can be toggled with CONFIG_PROC_KCORE; only create a symlink
|
||||
// in /dev if it exists in /proc.
|
||||
if _, err := os.Stat("/proc/kcore"); err == nil {
|
||||
links = append(links, [2]string{"/proc/kcore", "/dev/kcore"})
|
||||
links = append(links, [2]string{"/proc/kcore", "/dev/core"})
|
||||
}
|
||||
for _, link := range links {
|
||||
var (
|
||||
|
@ -551,7 +565,7 @@ func setupPtmx(config *configs.Config, console *linuxConsole) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func pivotRoot(rootfs, pivotBaseDir string) error {
|
||||
func pivotRoot(rootfs, pivotBaseDir string) (err error) {
|
||||
if pivotBaseDir == "" {
|
||||
pivotBaseDir = "/"
|
||||
}
|
||||
|
@ -563,6 +577,12 @@ func pivotRoot(rootfs, pivotBaseDir string) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("can't create pivot_root dir %s, error %v", pivotDir, err)
|
||||
}
|
||||
defer func() {
|
||||
errVal := os.Remove(pivotDir)
|
||||
if err == nil {
|
||||
err = errVal
|
||||
}
|
||||
}()
|
||||
if err := syscall.PivotRoot(rootfs, pivotDir); err != nil {
|
||||
return fmt.Errorf("pivot_root %s", err)
|
||||
}
|
||||
|
@ -581,7 +601,7 @@ func pivotRoot(rootfs, pivotBaseDir string) error {
|
|||
if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {
|
||||
return fmt.Errorf("unmount pivot_root dir %s", err)
|
||||
}
|
||||
return os.Remove(pivotDir)
|
||||
return nil
|
||||
}
|
||||
|
||||
func msMoveRoot(rootfs string) error {
|
||||
|
@ -670,14 +690,18 @@ func remount(m *configs.Mount, rootfs string) error {
|
|||
// of propagation flags.
|
||||
func mountPropagate(m *configs.Mount, rootfs string, mountLabel string) error {
|
||||
var (
|
||||
dest = m.Destination
|
||||
data = label.FormatMountLabel(m.Data, mountLabel)
|
||||
dest = m.Destination
|
||||
data = label.FormatMountLabel(m.Data, mountLabel)
|
||||
flags = m.Flags
|
||||
)
|
||||
if dest == "/dev" {
|
||||
flags &= ^syscall.MS_RDONLY
|
||||
}
|
||||
if !strings.HasPrefix(dest, rootfs) {
|
||||
dest = filepath.Join(rootfs, dest)
|
||||
}
|
||||
|
||||
if err := syscall.Mount(m.Source, dest, m.Device, uintptr(m.Flags), data); err != nil {
|
||||
if err := syscall.Mount(m.Source, dest, m.Device, uintptr(flags), data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -1,37 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package libcontainer
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestCheckMountDestOnProc(t *testing.T) {
|
||||
dest := "/rootfs/proc/"
|
||||
err := checkMountDestination("/rootfs", dest)
|
||||
if err == nil {
|
||||
t.Fatal("destination inside proc should return an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckMountDestInSys(t *testing.T) {
|
||||
dest := "/rootfs//sys/fs/cgroup"
|
||||
err := checkMountDestination("/rootfs", dest)
|
||||
if err != nil {
|
||||
t.Fatal("destination inside /sys should not return an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckMountDestFalsePositive(t *testing.T) {
|
||||
dest := "/rootfs/sysfiles/fs/cgroup"
|
||||
err := checkMountDestination("/rootfs", dest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckMountRoot(t *testing.T) {
|
||||
dest := "/rootfs"
|
||||
err := checkMountDestination("/rootfs", dest)
|
||||
if err == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
Name: cat
|
||||
State: R (running)
|
||||
Tgid: 19383
|
||||
Ngid: 0
|
||||
Pid: 19383
|
||||
PPid: 19275
|
||||
TracerPid: 0
|
||||
Uid: 1000 1000 1000 1000
|
||||
Gid: 1000 1000 1000 1000
|
||||
FDSize: 256
|
||||
Groups: 24 25 27 29 30 44 46 102 104 108 111 1000 1001
|
||||
NStgid: 19383
|
||||
NSpid: 19383
|
||||
NSpgid: 19383
|
||||
NSsid: 19275
|
||||
VmPeak: 5944 kB
|
||||
VmSize: 5944 kB
|
||||
VmLck: 0 kB
|
||||
VmPin: 0 kB
|
||||
VmHWM: 744 kB
|
||||
VmRSS: 744 kB
|
||||
VmData: 324 kB
|
||||
VmStk: 136 kB
|
||||
VmExe: 48 kB
|
||||
VmLib: 1776 kB
|
||||
VmPTE: 32 kB
|
||||
VmPMD: 12 kB
|
||||
VmSwap: 0 kB
|
||||
Threads: 1
|
||||
SigQ: 0/30067
|
||||
SigPnd: 0000000000000000
|
||||
ShdPnd: 0000000000000000
|
||||
SigBlk: 0000000000000000
|
||||
SigIgn: 0000000000000080
|
||||
SigCgt: 0000000000000000
|
||||
CapInh: 0000000000000000
|
||||
CapPrm: 0000000000000000
|
||||
CapEff: 0000000000000000
|
||||
CapBnd: 0000003fffffffff
|
||||
CapAmb: 0000000000000000
|
||||
Seccomp: 0
|
||||
Cpus_allowed: f
|
||||
Cpus_allowed_list: 0-3
|
||||
Mems_allowed: 00000000,00000001
|
||||
Mems_allowed_list: 0
|
||||
voluntary_ctxt_switches: 0
|
||||
nonvoluntary_ctxt_switches: 1
|
|
@ -1,17 +0,0 @@
|
|||
// +build linux,cgo,seccomp
|
||||
|
||||
package seccomp
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestParseStatusFile(t *testing.T) {
|
||||
s, err := parseStatusFile("fixtures/proc_self_status")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, ok := s["Seccomp"]; !ok {
|
||||
|
||||
t.Fatal("expected to find 'Seccomp' in the map but did not.")
|
||||
}
|
||||
}
|
|
@ -1,477 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package selinux
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
)
|
||||
|
||||
const (
|
||||
Enforcing = 1
|
||||
Permissive = 0
|
||||
Disabled = -1
|
||||
selinuxDir = "/etc/selinux/"
|
||||
selinuxConfig = selinuxDir + "config"
|
||||
selinuxTypeTag = "SELINUXTYPE"
|
||||
selinuxTag = "SELINUX"
|
||||
selinuxPath = "/sys/fs/selinux"
|
||||
xattrNameSelinux = "security.selinux"
|
||||
stRdOnly = 0x01
|
||||
)
|
||||
|
||||
var (
|
||||
assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
|
||||
mcsList = make(map[string]bool)
|
||||
selinuxfs = "unknown"
|
||||
selinuxEnabled = false // Stores whether selinux is currently enabled
|
||||
selinuxEnabledChecked = false // Stores whether selinux enablement has been checked or established yet
|
||||
)
|
||||
|
||||
type SELinuxContext map[string]string
|
||||
|
||||
// SetDisabled disables selinux support for the package
|
||||
func SetDisabled() {
|
||||
selinuxEnabled, selinuxEnabledChecked = false, true
|
||||
}
|
||||
|
||||
// getSelinuxMountPoint returns the path to the mountpoint of an selinuxfs
|
||||
// filesystem or an empty string if no mountpoint is found. Selinuxfs is
|
||||
// a proc-like pseudo-filesystem that exposes the selinux policy API to
|
||||
// processes. The existence of an selinuxfs mount is used to determine
|
||||
// whether selinux is currently enabled or not.
|
||||
func getSelinuxMountPoint() string {
|
||||
if selinuxfs != "unknown" {
|
||||
return selinuxfs
|
||||
}
|
||||
selinuxfs = ""
|
||||
|
||||
mounts, err := mount.GetMounts()
|
||||
if err != nil {
|
||||
return selinuxfs
|
||||
}
|
||||
for _, mount := range mounts {
|
||||
if mount.Fstype == "selinuxfs" {
|
||||
selinuxfs = mount.Mountpoint
|
||||
break
|
||||
}
|
||||
}
|
||||
if selinuxfs != "" {
|
||||
var buf syscall.Statfs_t
|
||||
syscall.Statfs(selinuxfs, &buf)
|
||||
if (buf.Flags & stRdOnly) == 1 {
|
||||
selinuxfs = ""
|
||||
}
|
||||
}
|
||||
return selinuxfs
|
||||
}
|
||||
|
||||
// SelinuxEnabled returns whether selinux is currently enabled.
|
||||
func SelinuxEnabled() bool {
|
||||
if selinuxEnabledChecked {
|
||||
return selinuxEnabled
|
||||
}
|
||||
selinuxEnabledChecked = true
|
||||
if fs := getSelinuxMountPoint(); fs != "" {
|
||||
if con, _ := Getcon(); con != "kernel" {
|
||||
selinuxEnabled = true
|
||||
}
|
||||
}
|
||||
return selinuxEnabled
|
||||
}
|
||||
|
||||
func readConfig(target string) (value string) {
|
||||
var (
|
||||
val, key string
|
||||
bufin *bufio.Reader
|
||||
)
|
||||
|
||||
in, err := os.Open(selinuxConfig)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
bufin = bufio.NewReader(in)
|
||||
|
||||
for done := false; !done; {
|
||||
var line string
|
||||
if line, err = bufin.ReadString('\n'); err != nil {
|
||||
if err != io.EOF {
|
||||
return ""
|
||||
}
|
||||
done = true
|
||||
}
|
||||
line = strings.TrimSpace(line)
|
||||
if len(line) == 0 {
|
||||
// Skip blank lines
|
||||
continue
|
||||
}
|
||||
if line[0] == ';' || line[0] == '#' {
|
||||
// Skip comments
|
||||
continue
|
||||
}
|
||||
if groups := assignRegex.FindStringSubmatch(line); groups != nil {
|
||||
key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2])
|
||||
if key == target {
|
||||
return strings.Trim(val, "\"")
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getSELinuxPolicyRoot() string {
|
||||
return selinuxDir + readConfig(selinuxTypeTag)
|
||||
}
|
||||
|
||||
func readCon(name string) (string, error) {
|
||||
var val string
|
||||
|
||||
in, err := os.Open(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
_, err = fmt.Fscanf(in, "%s", &val)
|
||||
return val, err
|
||||
}
|
||||
|
||||
// Setfilecon sets the SELinux label for this path or returns an error.
|
||||
func Setfilecon(path string, scon string) error {
|
||||
return system.Lsetxattr(path, xattrNameSelinux, []byte(scon), 0)
|
||||
}
|
||||
|
||||
// Getfilecon returns the SELinux label for this path or returns an error.
|
||||
func Getfilecon(path string) (string, error) {
|
||||
con, err := system.Lgetxattr(path, xattrNameSelinux)
|
||||
|
||||
// Trim the NUL byte at the end of the byte buffer, if present.
|
||||
if con[len(con)-1] == '\x00' {
|
||||
con = con[:len(con)-1]
|
||||
}
|
||||
return string(con), err
|
||||
}
|
||||
|
||||
func Setfscreatecon(scon string) error {
|
||||
return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()), scon)
|
||||
}
|
||||
|
||||
func Getfscreatecon() (string, error) {
|
||||
return readCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()))
|
||||
}
|
||||
|
||||
// Getcon returns the SELinux label of the current process thread, or an error.
|
||||
func Getcon() (string, error) {
|
||||
return readCon(fmt.Sprintf("/proc/self/task/%d/attr/current", syscall.Gettid()))
|
||||
}
|
||||
|
||||
// Getpidcon returns the SELinux label of the given pid, or an error.
|
||||
func Getpidcon(pid int) (string, error) {
|
||||
return readCon(fmt.Sprintf("/proc/%d/attr/current", pid))
|
||||
}
|
||||
|
||||
func Getexeccon() (string, error) {
|
||||
return readCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()))
|
||||
}
|
||||
|
||||
func writeCon(name string, val string) error {
|
||||
out, err := os.OpenFile(name, os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
if val != "" {
|
||||
_, err = out.Write([]byte(val))
|
||||
} else {
|
||||
_, err = out.Write(nil)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func Setexeccon(scon string) error {
|
||||
return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), scon)
|
||||
}
|
||||
|
||||
func (c SELinuxContext) Get() string {
|
||||
return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"])
|
||||
}
|
||||
|
||||
func NewContext(scon string) SELinuxContext {
|
||||
c := make(SELinuxContext)
|
||||
|
||||
if len(scon) != 0 {
|
||||
con := strings.SplitN(scon, ":", 4)
|
||||
c["user"] = con[0]
|
||||
c["role"] = con[1]
|
||||
c["type"] = con[2]
|
||||
c["level"] = con[3]
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func ReserveLabel(scon string) {
|
||||
if len(scon) != 0 {
|
||||
con := strings.SplitN(scon, ":", 4)
|
||||
mcsAdd(con[3])
|
||||
}
|
||||
}
|
||||
|
||||
func selinuxEnforcePath() string {
|
||||
return fmt.Sprintf("%s/enforce", selinuxPath)
|
||||
}
|
||||
|
||||
func SelinuxGetEnforce() int {
|
||||
var enforce int
|
||||
|
||||
enforceS, err := readCon(selinuxEnforcePath())
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
enforce, err = strconv.Atoi(string(enforceS))
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
return enforce
|
||||
}
|
||||
|
||||
func SelinuxSetEnforce(mode int) error {
|
||||
return writeCon(selinuxEnforcePath(), fmt.Sprintf("%d", mode))
|
||||
}
|
||||
|
||||
func SelinuxGetEnforceMode() int {
|
||||
switch readConfig(selinuxTag) {
|
||||
case "enforcing":
|
||||
return Enforcing
|
||||
case "permissive":
|
||||
return Permissive
|
||||
}
|
||||
return Disabled
|
||||
}
|
||||
|
||||
func mcsAdd(mcs string) error {
|
||||
if mcsList[mcs] {
|
||||
return fmt.Errorf("MCS Label already exists")
|
||||
}
|
||||
mcsList[mcs] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func mcsDelete(mcs string) {
|
||||
mcsList[mcs] = false
|
||||
}
|
||||
|
||||
func IntToMcs(id int, catRange uint32) string {
|
||||
var (
|
||||
SETSIZE = int(catRange)
|
||||
TIER = SETSIZE
|
||||
ORD = id
|
||||
)
|
||||
|
||||
if id < 1 || id > 523776 {
|
||||
return ""
|
||||
}
|
||||
|
||||
for ORD > TIER {
|
||||
ORD = ORD - TIER
|
||||
TIER -= 1
|
||||
}
|
||||
TIER = SETSIZE - TIER
|
||||
ORD = ORD + TIER
|
||||
return fmt.Sprintf("s0:c%d,c%d", TIER, ORD)
|
||||
}
|
||||
|
||||
func uniqMcs(catRange uint32) string {
|
||||
var (
|
||||
n uint32
|
||||
c1, c2 uint32
|
||||
mcs string
|
||||
)
|
||||
|
||||
for {
|
||||
binary.Read(rand.Reader, binary.LittleEndian, &n)
|
||||
c1 = n % catRange
|
||||
binary.Read(rand.Reader, binary.LittleEndian, &n)
|
||||
c2 = n % catRange
|
||||
if c1 == c2 {
|
||||
continue
|
||||
} else {
|
||||
if c1 > c2 {
|
||||
t := c1
|
||||
c1 = c2
|
||||
c2 = t
|
||||
}
|
||||
}
|
||||
mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2)
|
||||
if err := mcsAdd(mcs); err != nil {
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
return mcs
|
||||
}
|
||||
|
||||
func FreeLxcContexts(scon string) {
|
||||
if len(scon) != 0 {
|
||||
con := strings.SplitN(scon, ":", 4)
|
||||
mcsDelete(con[3])
|
||||
}
|
||||
}
|
||||
|
||||
func GetLxcContexts() (processLabel string, fileLabel string) {
|
||||
var (
|
||||
val, key string
|
||||
bufin *bufio.Reader
|
||||
)
|
||||
|
||||
if !SelinuxEnabled() {
|
||||
return "", ""
|
||||
}
|
||||
lxcPath := fmt.Sprintf("%s/contexts/lxc_contexts", getSELinuxPolicyRoot())
|
||||
in, err := os.Open(lxcPath)
|
||||
if err != nil {
|
||||
return "", ""
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
bufin = bufio.NewReader(in)
|
||||
|
||||
for done := false; !done; {
|
||||
var line string
|
||||
if line, err = bufin.ReadString('\n'); err != nil {
|
||||
if err == io.EOF {
|
||||
done = true
|
||||
} else {
|
||||
goto exit
|
||||
}
|
||||
}
|
||||
line = strings.TrimSpace(line)
|
||||
if len(line) == 0 {
|
||||
// Skip blank lines
|
||||
continue
|
||||
}
|
||||
if line[0] == ';' || line[0] == '#' {
|
||||
// Skip comments
|
||||
continue
|
||||
}
|
||||
if groups := assignRegex.FindStringSubmatch(line); groups != nil {
|
||||
key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2])
|
||||
if key == "process" {
|
||||
processLabel = strings.Trim(val, "\"")
|
||||
}
|
||||
if key == "file" {
|
||||
fileLabel = strings.Trim(val, "\"")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if processLabel == "" || fileLabel == "" {
|
||||
return "", ""
|
||||
}
|
||||
|
||||
exit:
|
||||
// mcs := IntToMcs(os.Getpid(), 1024)
|
||||
mcs := uniqMcs(1024)
|
||||
scon := NewContext(processLabel)
|
||||
scon["level"] = mcs
|
||||
processLabel = scon.Get()
|
||||
scon = NewContext(fileLabel)
|
||||
scon["level"] = mcs
|
||||
fileLabel = scon.Get()
|
||||
return processLabel, fileLabel
|
||||
}
|
||||
|
||||
func SecurityCheckContext(val string) error {
|
||||
return writeCon(fmt.Sprintf("%s.context", selinuxPath), val)
|
||||
}
|
||||
|
||||
func CopyLevel(src, dest string) (string, error) {
|
||||
if src == "" {
|
||||
return "", nil
|
||||
}
|
||||
if err := SecurityCheckContext(src); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := SecurityCheckContext(dest); err != nil {
|
||||
return "", err
|
||||
}
|
||||
scon := NewContext(src)
|
||||
tcon := NewContext(dest)
|
||||
mcsDelete(tcon["level"])
|
||||
mcsAdd(scon["level"])
|
||||
tcon["level"] = scon["level"]
|
||||
return tcon.Get(), nil
|
||||
}
|
||||
|
||||
// Prevent users from relabing system files
|
||||
func badPrefix(fpath string) error {
|
||||
var badprefixes = []string{"/usr"}
|
||||
|
||||
for _, prefix := range badprefixes {
|
||||
if fpath == prefix || strings.HasPrefix(fpath, fmt.Sprintf("%s/", prefix)) {
|
||||
return fmt.Errorf("Relabeling content in %s is not allowed.", prefix)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Change the fpath file object to the SELinux label scon.
|
||||
// If the fpath is a directory and recurse is true Chcon will walk the
|
||||
// directory tree setting the label
|
||||
func Chcon(fpath string, scon string, recurse bool) error {
|
||||
if scon == "" {
|
||||
return nil
|
||||
}
|
||||
if err := badPrefix(fpath); err != nil {
|
||||
return err
|
||||
}
|
||||
callback := func(p string, info os.FileInfo, err error) error {
|
||||
return Setfilecon(p, scon)
|
||||
}
|
||||
|
||||
if recurse {
|
||||
return filepath.Walk(fpath, callback)
|
||||
}
|
||||
|
||||
return Setfilecon(fpath, scon)
|
||||
}
|
||||
|
||||
// DupSecOpt takes an SELinux process label and returns security options that
|
||||
// can will set the SELinux Type and Level for future container processes
|
||||
func DupSecOpt(src string) []string {
|
||||
if src == "" {
|
||||
return nil
|
||||
}
|
||||
con := NewContext(src)
|
||||
if con["user"] == "" ||
|
||||
con["role"] == "" ||
|
||||
con["type"] == "" ||
|
||||
con["level"] == "" {
|
||||
return nil
|
||||
}
|
||||
return []string{"label:user:" + con["user"],
|
||||
"label:role:" + con["role"],
|
||||
"label:type:" + con["type"],
|
||||
"label:level:" + con["level"]}
|
||||
}
|
||||
|
||||
// DisableSecOpt returns a security opt that can be used to disabling SELinux
|
||||
// labeling support for future container processes
|
||||
func DisableSecOpt() []string {
|
||||
return []string{"label:disable"}
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
// +build linux,selinux
|
||||
|
||||
package selinux_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/selinux"
|
||||
)
|
||||
|
||||
func TestSetfilecon(t *testing.T) {
|
||||
if selinux.SelinuxEnabled() {
|
||||
tmp := "selinux_test"
|
||||
out, _ := os.OpenFile(tmp, os.O_WRONLY|os.O_CREATE, 0)
|
||||
out.Close()
|
||||
err := selinux.Setfilecon(tmp, "system_u:object_r:bin_t:s0")
|
||||
if err != nil {
|
||||
t.Log("Setfilecon failed")
|
||||
t.Fatal(err)
|
||||
}
|
||||
os.Remove(tmp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSELinux(t *testing.T) {
|
||||
var (
|
||||
err error
|
||||
plabel, flabel string
|
||||
)
|
||||
|
||||
if selinux.SelinuxEnabled() {
|
||||
t.Log("Enabled")
|
||||
plabel, flabel = selinux.GetLxcContexts()
|
||||
t.Log(plabel)
|
||||
t.Log(flabel)
|
||||
selinux.FreeLxcContexts(plabel)
|
||||
plabel, flabel = selinux.GetLxcContexts()
|
||||
t.Log(plabel)
|
||||
t.Log(flabel)
|
||||
selinux.FreeLxcContexts(plabel)
|
||||
t.Log("getenforce ", selinux.SelinuxGetEnforce())
|
||||
mode := selinux.SelinuxGetEnforceMode()
|
||||
t.Log("getenforcemode ", mode)
|
||||
|
||||
defer selinux.SelinuxSetEnforce(mode)
|
||||
if err := selinux.SelinuxSetEnforce(selinux.Enforcing); err != nil {
|
||||
t.Fatalf("enforcing selinux failed: %v", err)
|
||||
}
|
||||
if err := selinux.SelinuxSetEnforce(selinux.Permissive); err != nil {
|
||||
t.Fatalf("setting selinux mode to permissive failed: %v", err)
|
||||
}
|
||||
selinux.SelinuxSetEnforce(mode)
|
||||
|
||||
pid := os.Getpid()
|
||||
t.Logf("PID:%d MCS:%s\n", pid, selinux.IntToMcs(pid, 1023))
|
||||
err = selinux.Setfscreatecon("unconfined_u:unconfined_r:unconfined_t:s0")
|
||||
if err == nil {
|
||||
t.Log(selinux.Getfscreatecon())
|
||||
} else {
|
||||
t.Log("setfscreatecon failed", err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = selinux.Setfscreatecon("")
|
||||
if err == nil {
|
||||
t.Log(selinux.Getfscreatecon())
|
||||
} else {
|
||||
t.Log("setfscreatecon failed", err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log(selinux.Getpidcon(1))
|
||||
} else {
|
||||
t.Log("Disabled")
|
||||
}
|
||||
}
|
|
@ -3,6 +3,7 @@
|
|||
package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/apparmor"
|
||||
|
@ -18,16 +19,22 @@ type linuxSetnsInit struct {
|
|||
config *initConfig
|
||||
}
|
||||
|
||||
func (l *linuxSetnsInit) getSessionRingName() string {
|
||||
return fmt.Sprintf("_ses.%s", l.config.ContainerId)
|
||||
}
|
||||
|
||||
func (l *linuxSetnsInit) Init() error {
|
||||
// do not inherit the parent's session keyring
|
||||
if _, err := keyctl.JoinSessionKeyring("_ses"); err != nil {
|
||||
if _, err := keyctl.JoinSessionKeyring(l.getSessionRingName()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := setupRlimits(l.config.Config); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := setOomScoreAdj(l.config.Config.OomScoreAdj); err != nil {
|
||||
return err
|
||||
if l.config.NoNewPrivileges {
|
||||
if err := system.Prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if l.config.Config.Seccomp != nil {
|
||||
if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil {
|
||||
|
@ -37,11 +44,11 @@ func (l *linuxSetnsInit) Init() error {
|
|||
if err := finalizeNamespace(l.config); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := apparmor.ApplyProfile(l.config.Config.AppArmorProfile); err != nil {
|
||||
if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil {
|
||||
return err
|
||||
}
|
||||
if l.config.Config.ProcessLabel != "" {
|
||||
if err := label.SetProcessLabel(l.config.Config.ProcessLabel); err != nil {
|
||||
if l.config.ProcessLabel != "" {
|
||||
if err := label.SetProcessLabel(l.config.ProcessLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
package stacktrace
|
||||
|
||||
import "testing"
|
||||
|
||||
func captureFunc() Stacktrace {
|
||||
return Capture(0)
|
||||
}
|
||||
|
||||
func TestCaptureTestFunc(t *testing.T) {
|
||||
stack := captureFunc()
|
||||
|
||||
if len(stack.Frames) == 0 {
|
||||
t.Fatal("expected stack frames to be returned")
|
||||
}
|
||||
|
||||
// the first frame is the caller
|
||||
frame := stack.Frames[0]
|
||||
if expected := "captureFunc"; frame.Function != expected {
|
||||
t.Fatalf("expteced function %q but recevied %q", expected, frame.Function)
|
||||
}
|
||||
if expected := "github.com/opencontainers/runc/libcontainer/stacktrace"; frame.Package != expected {
|
||||
t.Fatalf("expected package %q but received %q", expected, frame.Package)
|
||||
}
|
||||
if expected := "capture_test.go"; frame.File != expected {
|
||||
t.Fatalf("expected file %q but received %q", expected, frame.File)
|
||||
}
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
package stacktrace
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestParsePackageName(t *testing.T) {
|
||||
var (
|
||||
name = "github.com/opencontainers/runc/libcontainer/stacktrace.captureFunc"
|
||||
expectedPackage = "github.com/opencontainers/runc/libcontainer/stacktrace"
|
||||
expectedFunction = "captureFunc"
|
||||
)
|
||||
|
||||
pack, funcName := parseFunctionName(name)
|
||||
if pack != expectedPackage {
|
||||
t.Fatalf("expected package %q but received %q", expectedPackage, pack)
|
||||
}
|
||||
|
||||
if funcName != expectedFunction {
|
||||
t.Fatalf("expected function %q but received %q", expectedFunction, funcName)
|
||||
}
|
||||
}
|
|
@ -3,6 +3,7 @@
|
|||
package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
|
@ -21,23 +22,39 @@ type linuxStandardInit struct {
|
|||
config *initConfig
|
||||
}
|
||||
|
||||
func (l *linuxStandardInit) getSessionRingParams() (string, uint32, uint32) {
|
||||
var newperms uint32
|
||||
|
||||
if l.config.Config.Namespaces.Contains(configs.NEWUSER) {
|
||||
// with user ns we need 'other' search permissions
|
||||
newperms = 0x8
|
||||
} else {
|
||||
// without user ns we need 'UID' search permissions
|
||||
newperms = 0x80000
|
||||
}
|
||||
|
||||
// create a unique per session container name that we can
|
||||
// join in setns; however, other containers can also join it
|
||||
return fmt.Sprintf("_ses.%s", l.config.ContainerId), 0xffffffff, newperms
|
||||
}
|
||||
|
||||
// PR_SET_NO_NEW_PRIVS isn't exposed in Golang so we define it ourselves copying the value
|
||||
// the kernel
|
||||
const PR_SET_NO_NEW_PRIVS = 0x26
|
||||
|
||||
func (l *linuxStandardInit) Init() error {
|
||||
ringname, keepperms, newperms := l.getSessionRingParams()
|
||||
|
||||
// do not inherit the parent's session keyring
|
||||
sessKeyId, err := keyctl.JoinSessionKeyring("")
|
||||
sessKeyId, err := keyctl.JoinSessionKeyring(ringname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// make session keyring searcheable
|
||||
// without user ns we need 'UID' search permissions
|
||||
// with user ns we need 'other' search permissions
|
||||
if err := keyctl.ModKeyringPerm(sessKeyId, 0xffffffff, 0x080008); err != nil {
|
||||
if err := keyctl.ModKeyringPerm(sessKeyId, keepperms, newperms); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// join any namespaces via a path to the namespace fd if provided
|
||||
if err := joinExistingNamespaces(l.config.Config.Namespaces); err != nil {
|
||||
return err
|
||||
}
|
||||
var console *linuxConsole
|
||||
if l.config.Console != "" {
|
||||
console = newConsoleFromPath(l.config.Console)
|
||||
|
@ -45,9 +62,6 @@ func (l *linuxStandardInit) Init() error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
if _, err := syscall.Setsid(); err != nil {
|
||||
return err
|
||||
}
|
||||
if console != nil {
|
||||
if err := system.Setctty(); err != nil {
|
||||
return err
|
||||
|
@ -62,13 +76,11 @@ func (l *linuxStandardInit) Init() error {
|
|||
if err := setupRlimits(l.config.Config); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := setOomScoreAdj(l.config.Config.OomScoreAdj); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
label.Init()
|
||||
// InitializeMountNamespace() can be executed only for a new mount namespace
|
||||
if l.config.Config.Namespaces.Contains(configs.NEWNS) {
|
||||
if err := setupRootfs(l.config.Config, console); err != nil {
|
||||
if err := setupRootfs(l.config.Config, console, l.pipe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -77,10 +89,10 @@ func (l *linuxStandardInit) Init() error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
if err := apparmor.ApplyProfile(l.config.Config.AppArmorProfile); err != nil {
|
||||
if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := label.SetProcessLabel(l.config.Config.ProcessLabel); err != nil {
|
||||
if err := label.SetProcessLabel(l.config.ProcessLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -103,6 +115,11 @@ func (l *linuxStandardInit) Init() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if l.config.NoNewPrivileges {
|
||||
if err := system.Prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Tell our parent that we're ready to Execv. This must be done before the
|
||||
// Seccomp rules have been applied, because we need to be able to read and
|
||||
// write to a socket.
|
||||
|
@ -128,5 +145,6 @@ func (l *linuxStandardInit) Init() error {
|
|||
if syscall.Getppid() != l.parentPid {
|
||||
return syscall.Kill(syscall.Getpid(), syscall.SIGKILL)
|
||||
}
|
||||
|
||||
return system.Execv(l.config.Args[0], l.config.Args[0:], os.Environ())
|
||||
}
|
||||
|
|
|
@ -1,79 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package libcontainer
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestStateStatus(t *testing.T) {
|
||||
states := map[containerState]Status{
|
||||
&stoppedState{}: Destroyed,
|
||||
&runningState{}: Running,
|
||||
&restoredState{}: Running,
|
||||
&pausedState{}: Paused,
|
||||
}
|
||||
for s, status := range states {
|
||||
if s.status() != status {
|
||||
t.Fatalf("state returned %s but expected %s", s.status(), status)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isStateTransitionError(err error) bool {
|
||||
_, ok := err.(*stateTransitionError)
|
||||
return ok
|
||||
}
|
||||
|
||||
func TestStoppedStateTransition(t *testing.T) {
|
||||
s := &stoppedState{c: &linuxContainer{}}
|
||||
valid := []containerState{
|
||||
&stoppedState{},
|
||||
&runningState{},
|
||||
&restoredState{},
|
||||
}
|
||||
for _, v := range valid {
|
||||
if err := s.transition(v); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
err := s.transition(&pausedState{})
|
||||
if err == nil {
|
||||
t.Fatal("transition to paused state should fail")
|
||||
}
|
||||
if !isStateTransitionError(err) {
|
||||
t.Fatal("expected stateTransitionError")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPausedStateTransition(t *testing.T) {
|
||||
s := &pausedState{c: &linuxContainer{}}
|
||||
valid := []containerState{
|
||||
&pausedState{},
|
||||
&runningState{},
|
||||
&stoppedState{},
|
||||
}
|
||||
for _, v := range valid {
|
||||
if err := s.transition(v); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRestoredStateTransition(t *testing.T) {
|
||||
s := &restoredState{c: &linuxContainer{}}
|
||||
valid := []containerState{
|
||||
&stoppedState{},
|
||||
&runningState{},
|
||||
}
|
||||
for _, v := range valid {
|
||||
if err := s.transition(v); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
err := s.transition(&createdState{})
|
||||
if err == nil {
|
||||
t.Fatal("transition to created state should fail")
|
||||
}
|
||||
if !isStateTransitionError(err) {
|
||||
t.Fatal("expected stateTransitionError")
|
||||
}
|
||||
}
|
|
@ -112,3 +112,11 @@ func RunningInUserNS() bool {
|
|||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func Prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) {
|
||||
_, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1,472 +0,0 @@
|
|||
package user
|
||||
|
||||
import (
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestUserParseLine(t *testing.T) {
|
||||
var (
|
||||
a, b string
|
||||
c []string
|
||||
d int
|
||||
)
|
||||
|
||||
parseLine("", &a, &b)
|
||||
if a != "" || b != "" {
|
||||
t.Fatalf("a and b should be empty ('%v', '%v')", a, b)
|
||||
}
|
||||
|
||||
parseLine("a", &a, &b)
|
||||
if a != "a" || b != "" {
|
||||
t.Fatalf("a should be 'a' and b should be empty ('%v', '%v')", a, b)
|
||||
}
|
||||
|
||||
parseLine("bad boys:corny cows", &a, &b)
|
||||
if a != "bad boys" || b != "corny cows" {
|
||||
t.Fatalf("a should be 'bad boys' and b should be 'corny cows' ('%v', '%v')", a, b)
|
||||
}
|
||||
|
||||
parseLine("", &c)
|
||||
if len(c) != 0 {
|
||||
t.Fatalf("c should be empty (%#v)", c)
|
||||
}
|
||||
|
||||
parseLine("d,e,f:g:h:i,j,k", &c, &a, &b, &c)
|
||||
if a != "g" || b != "h" || len(c) != 3 || c[0] != "i" || c[1] != "j" || c[2] != "k" {
|
||||
t.Fatalf("a should be 'g', b should be 'h', and c should be ['i','j','k'] ('%v', '%v', '%#v')", a, b, c)
|
||||
}
|
||||
|
||||
parseLine("::::::::::", &a, &b, &c)
|
||||
if a != "" || b != "" || len(c) != 0 {
|
||||
t.Fatalf("a, b, and c should all be empty ('%v', '%v', '%#v')", a, b, c)
|
||||
}
|
||||
|
||||
parseLine("not a number", &d)
|
||||
if d != 0 {
|
||||
t.Fatalf("d should be 0 (%v)", d)
|
||||
}
|
||||
|
||||
parseLine("b:12:c", &a, &d, &b)
|
||||
if a != "b" || b != "c" || d != 12 {
|
||||
t.Fatalf("a should be 'b' and b should be 'c', and d should be 12 ('%v', '%v', %v)", a, b, d)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUserParsePasswd(t *testing.T) {
|
||||
users, err := ParsePasswdFilter(strings.NewReader(`
|
||||
root:x:0:0:root:/root:/bin/bash
|
||||
adm:x:3:4:adm:/var/adm:/bin/false
|
||||
this is just some garbage data
|
||||
`), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if len(users) != 3 {
|
||||
t.Fatalf("Expected 3 users, got %v", len(users))
|
||||
}
|
||||
if users[0].Uid != 0 || users[0].Name != "root" {
|
||||
t.Fatalf("Expected users[0] to be 0 - root, got %v - %v", users[0].Uid, users[0].Name)
|
||||
}
|
||||
if users[1].Uid != 3 || users[1].Name != "adm" {
|
||||
t.Fatalf("Expected users[1] to be 3 - adm, got %v - %v", users[1].Uid, users[1].Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUserParseGroup(t *testing.T) {
|
||||
groups, err := ParseGroupFilter(strings.NewReader(`
|
||||
root:x:0:root
|
||||
adm:x:4:root,adm,daemon
|
||||
this is just some garbage data
|
||||
`), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if len(groups) != 3 {
|
||||
t.Fatalf("Expected 3 groups, got %v", len(groups))
|
||||
}
|
||||
if groups[0].Gid != 0 || groups[0].Name != "root" || len(groups[0].List) != 1 {
|
||||
t.Fatalf("Expected groups[0] to be 0 - root - 1 member, got %v - %v - %v", groups[0].Gid, groups[0].Name, len(groups[0].List))
|
||||
}
|
||||
if groups[1].Gid != 4 || groups[1].Name != "adm" || len(groups[1].List) != 3 {
|
||||
t.Fatalf("Expected groups[1] to be 4 - adm - 3 members, got %v - %v - %v", groups[1].Gid, groups[1].Name, len(groups[1].List))
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidGetExecUser(t *testing.T) {
|
||||
const passwdContent = `
|
||||
root:x:0:0:root user:/root:/bin/bash
|
||||
adm:x:42:43:adm:/var/adm:/bin/false
|
||||
this is just some garbage data
|
||||
`
|
||||
const groupContent = `
|
||||
root:x:0:root
|
||||
adm:x:43:
|
||||
grp:x:1234:root,adm
|
||||
this is just some garbage data
|
||||
`
|
||||
defaultExecUser := ExecUser{
|
||||
Uid: 8888,
|
||||
Gid: 8888,
|
||||
Sgids: []int{8888},
|
||||
Home: "/8888",
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
ref string
|
||||
expected ExecUser
|
||||
}{
|
||||
{
|
||||
ref: "root",
|
||||
expected: ExecUser{
|
||||
Uid: 0,
|
||||
Gid: 0,
|
||||
Sgids: []int{0, 1234},
|
||||
Home: "/root",
|
||||
},
|
||||
},
|
||||
{
|
||||
ref: "adm",
|
||||
expected: ExecUser{
|
||||
Uid: 42,
|
||||
Gid: 43,
|
||||
Sgids: []int{1234},
|
||||
Home: "/var/adm",
|
||||
},
|
||||
},
|
||||
{
|
||||
ref: "root:adm",
|
||||
expected: ExecUser{
|
||||
Uid: 0,
|
||||
Gid: 43,
|
||||
Sgids: defaultExecUser.Sgids,
|
||||
Home: "/root",
|
||||
},
|
||||
},
|
||||
{
|
||||
ref: "adm:1234",
|
||||
expected: ExecUser{
|
||||
Uid: 42,
|
||||
Gid: 1234,
|
||||
Sgids: defaultExecUser.Sgids,
|
||||
Home: "/var/adm",
|
||||
},
|
||||
},
|
||||
{
|
||||
ref: "42:1234",
|
||||
expected: ExecUser{
|
||||
Uid: 42,
|
||||
Gid: 1234,
|
||||
Sgids: defaultExecUser.Sgids,
|
||||
Home: "/var/adm",
|
||||
},
|
||||
},
|
||||
{
|
||||
ref: "1337:1234",
|
||||
expected: ExecUser{
|
||||
Uid: 1337,
|
||||
Gid: 1234,
|
||||
Sgids: defaultExecUser.Sgids,
|
||||
Home: defaultExecUser.Home,
|
||||
},
|
||||
},
|
||||
{
|
||||
ref: "1337",
|
||||
expected: ExecUser{
|
||||
Uid: 1337,
|
||||
Gid: defaultExecUser.Gid,
|
||||
Sgids: defaultExecUser.Sgids,
|
||||
Home: defaultExecUser.Home,
|
||||
},
|
||||
},
|
||||
{
|
||||
ref: "",
|
||||
expected: ExecUser{
|
||||
Uid: defaultExecUser.Uid,
|
||||
Gid: defaultExecUser.Gid,
|
||||
Sgids: defaultExecUser.Sgids,
|
||||
Home: defaultExecUser.Home,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
passwd := strings.NewReader(passwdContent)
|
||||
group := strings.NewReader(groupContent)
|
||||
|
||||
execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group)
|
||||
if err != nil {
|
||||
t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error())
|
||||
t.Fail()
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(test.expected, *execUser) {
|
||||
t.Logf("got: %#v", execUser)
|
||||
t.Logf("expected: %#v", test.expected)
|
||||
t.Fail()
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidGetExecUser(t *testing.T) {
|
||||
const passwdContent = `
|
||||
root:x:0:0:root user:/root:/bin/bash
|
||||
adm:x:42:43:adm:/var/adm:/bin/false
|
||||
this is just some garbage data
|
||||
`
|
||||
const groupContent = `
|
||||
root:x:0:root
|
||||
adm:x:43:
|
||||
grp:x:1234:root,adm
|
||||
this is just some garbage data
|
||||
`
|
||||
|
||||
tests := []string{
|
||||
// No such user/group.
|
||||
"notuser",
|
||||
"notuser:notgroup",
|
||||
"root:notgroup",
|
||||
"notuser:adm",
|
||||
"8888:notgroup",
|
||||
"notuser:8888",
|
||||
|
||||
// Invalid user/group values.
|
||||
"-1:0",
|
||||
"0:-3",
|
||||
"-5:-2",
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
passwd := strings.NewReader(passwdContent)
|
||||
group := strings.NewReader(groupContent)
|
||||
|
||||
execUser, err := GetExecUser(test, nil, passwd, group)
|
||||
if err == nil {
|
||||
t.Logf("got unexpected success when parsing '%s': %#v", test, execUser)
|
||||
t.Fail()
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetExecUserNilSources(t *testing.T) {
|
||||
const passwdContent = `
|
||||
root:x:0:0:root user:/root:/bin/bash
|
||||
adm:x:42:43:adm:/var/adm:/bin/false
|
||||
this is just some garbage data
|
||||
`
|
||||
const groupContent = `
|
||||
root:x:0:root
|
||||
adm:x:43:
|
||||
grp:x:1234:root,adm
|
||||
this is just some garbage data
|
||||
`
|
||||
|
||||
defaultExecUser := ExecUser{
|
||||
Uid: 8888,
|
||||
Gid: 8888,
|
||||
Sgids: []int{8888},
|
||||
Home: "/8888",
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
ref string
|
||||
passwd, group bool
|
||||
expected ExecUser
|
||||
}{
|
||||
{
|
||||
ref: "",
|
||||
passwd: false,
|
||||
group: false,
|
||||
expected: ExecUser{
|
||||
Uid: 8888,
|
||||
Gid: 8888,
|
||||
Sgids: []int{8888},
|
||||
Home: "/8888",
|
||||
},
|
||||
},
|
||||
{
|
||||
ref: "root",
|
||||
passwd: true,
|
||||
group: false,
|
||||
expected: ExecUser{
|
||||
Uid: 0,
|
||||
Gid: 0,
|
||||
Sgids: []int{8888},
|
||||
Home: "/root",
|
||||
},
|
||||
},
|
||||
{
|
||||
ref: "0",
|
||||
passwd: false,
|
||||
group: false,
|
||||
expected: ExecUser{
|
||||
Uid: 0,
|
||||
Gid: 8888,
|
||||
Sgids: []int{8888},
|
||||
Home: "/8888",
|
||||
},
|
||||
},
|
||||
{
|
||||
ref: "0:0",
|
||||
passwd: false,
|
||||
group: false,
|
||||
expected: ExecUser{
|
||||
Uid: 0,
|
||||
Gid: 0,
|
||||
Sgids: []int{8888},
|
||||
Home: "/8888",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
var passwd, group io.Reader
|
||||
|
||||
if test.passwd {
|
||||
passwd = strings.NewReader(passwdContent)
|
||||
}
|
||||
|
||||
if test.group {
|
||||
group = strings.NewReader(groupContent)
|
||||
}
|
||||
|
||||
execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group)
|
||||
if err != nil {
|
||||
t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error())
|
||||
t.Fail()
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(test.expected, *execUser) {
|
||||
t.Logf("got: %#v", execUser)
|
||||
t.Logf("expected: %#v", test.expected)
|
||||
t.Fail()
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAdditionalGroups(t *testing.T) {
|
||||
const groupContent = `
|
||||
root:x:0:root
|
||||
adm:x:43:
|
||||
grp:x:1234:root,adm
|
||||
adm:x:4343:root,adm-duplicate
|
||||
this is just some garbage data
|
||||
`
|
||||
tests := []struct {
|
||||
groups []string
|
||||
expected []int
|
||||
hasError bool
|
||||
}{
|
||||
{
|
||||
// empty group
|
||||
groups: []string{},
|
||||
expected: []int{},
|
||||
},
|
||||
{
|
||||
// single group
|
||||
groups: []string{"adm"},
|
||||
expected: []int{43},
|
||||
},
|
||||
{
|
||||
// multiple groups
|
||||
groups: []string{"adm", "grp"},
|
||||
expected: []int{43, 1234},
|
||||
},
|
||||
{
|
||||
// invalid group
|
||||
groups: []string{"adm", "grp", "not-exist"},
|
||||
expected: nil,
|
||||
hasError: true,
|
||||
},
|
||||
{
|
||||
// group with numeric id
|
||||
groups: []string{"43"},
|
||||
expected: []int{43},
|
||||
},
|
||||
{
|
||||
// group with unknown numeric id
|
||||
groups: []string{"adm", "10001"},
|
||||
expected: []int{43, 10001},
|
||||
},
|
||||
{
|
||||
// groups specified twice with numeric and name
|
||||
groups: []string{"adm", "43"},
|
||||
expected: []int{43},
|
||||
},
|
||||
{
|
||||
// groups with too small id
|
||||
groups: []string{"-1"},
|
||||
expected: nil,
|
||||
hasError: true,
|
||||
},
|
||||
{
|
||||
// groups with too large id
|
||||
groups: []string{strconv.Itoa(1 << 31)},
|
||||
expected: nil,
|
||||
hasError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
group := strings.NewReader(groupContent)
|
||||
|
||||
gids, err := GetAdditionalGroups(test.groups, group)
|
||||
if test.hasError && err == nil {
|
||||
t.Errorf("Parse(%#v) expects error but has none", test)
|
||||
continue
|
||||
}
|
||||
if !test.hasError && err != nil {
|
||||
t.Errorf("Parse(%#v) has error %v", test, err)
|
||||
continue
|
||||
}
|
||||
sort.Sort(sort.IntSlice(gids))
|
||||
if !reflect.DeepEqual(gids, test.expected) {
|
||||
t.Errorf("Gids(%v), expect %v from groups %v", gids, test.expected, test.groups)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAdditionalGroupsNumeric(t *testing.T) {
|
||||
tests := []struct {
|
||||
groups []string
|
||||
expected []int
|
||||
hasError bool
|
||||
}{
|
||||
{
|
||||
// numeric groups only
|
||||
groups: []string{"1234", "5678"},
|
||||
expected: []int{1234, 5678},
|
||||
},
|
||||
{
|
||||
// numeric and alphabetic
|
||||
groups: []string{"1234", "fake"},
|
||||
expected: nil,
|
||||
hasError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
gids, err := GetAdditionalGroups(test.groups, nil)
|
||||
if test.hasError && err == nil {
|
||||
t.Errorf("Parse(%#v) expects error but has none", test)
|
||||
continue
|
||||
}
|
||||
if !test.hasError && err != nil {
|
||||
t.Errorf("Parse(%#v) has error %v", test, err)
|
||||
continue
|
||||
}
|
||||
sort.Sort(sort.IntSlice(gids))
|
||||
if !reflect.DeepEqual(gids, test.expected) {
|
||||
t.Errorf("Gids(%v), expect %v from groups %v", gids, test.expected, test.groups)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -63,6 +63,11 @@ func WriteJSON(w io.Writer, v interface{}) error {
|
|||
// be a subdirectory of the prefixed path. This is all done lexically, so paths
|
||||
// that include symlinks won't be safe as a result of using CleanPath.
|
||||
func CleanPath(path string) string {
|
||||
// Deal with empty strings nicely.
|
||||
if path == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Ensure that all paths are cleaned (especially problematic ones like
|
||||
// "/../../../../../" which can cause lots of issues).
|
||||
path = filepath.Clean(path)
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
package utils
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestGenerateName(t *testing.T) {
|
||||
name, err := GenerateRandomName("veth", 5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := 5 + len("veth")
|
||||
if len(name) != expected {
|
||||
t.Fatalf("expected name to be %d chars but received %d", expected, len(name))
|
||||
}
|
||||
|
||||
name, err = GenerateRandomName("veth", 65)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected = 64 + len("veth")
|
||||
if len(name) != expected {
|
||||
t.Fatalf("expected name to be %d chars but received %d", expected, len(name))
|
||||
}
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
package xattr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
var ErrNotSupportedPlatform = fmt.Errorf("platform and architecture is not supported %s %s", runtime.GOOS, runtime.GOARCH)
|
|
@ -1,53 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package xattr
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
)
|
||||
|
||||
func XattrEnabled(path string) bool {
|
||||
if Setxattr(path, "user.test", "") == syscall.ENOTSUP {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func stringsfromByte(buf []byte) (result []string) {
|
||||
offset := 0
|
||||
for index, b := range buf {
|
||||
if b == 0 {
|
||||
result = append(result, string(buf[offset:index]))
|
||||
offset = index + 1
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func Listxattr(path string) ([]string, error) {
|
||||
size, err := system.Llistxattr(path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf := make([]byte, size)
|
||||
read, err := system.Llistxattr(path, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names := stringsfromByte(buf[:read])
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func Getxattr(path, attr string) (string, error) {
|
||||
value, err := system.Lgetxattr(path, attr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(value), nil
|
||||
}
|
||||
|
||||
func Setxattr(path, xattr, value string) error {
|
||||
return system.Lsetxattr(path, xattr, []byte(value), 0)
|
||||
}
|
|
@ -1,78 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package xattr_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/xattr"
|
||||
)
|
||||
|
||||
func TestXattr(t *testing.T) {
|
||||
tmp := "xattr_test"
|
||||
out, err := os.OpenFile(tmp, os.O_WRONLY|os.O_CREATE, 0)
|
||||
if err != nil {
|
||||
t.Fatal("failed")
|
||||
}
|
||||
defer os.Remove(tmp)
|
||||
attr := "user.test"
|
||||
out.Close()
|
||||
|
||||
if !xattr.XattrEnabled(tmp) {
|
||||
t.Log("Disabled")
|
||||
t.Fatal("failed")
|
||||
}
|
||||
t.Log("Success")
|
||||
|
||||
err = xattr.Setxattr(tmp, attr, "test")
|
||||
if err != nil {
|
||||
t.Fatal("failed")
|
||||
}
|
||||
|
||||
var value string
|
||||
value, err = xattr.Getxattr(tmp, attr)
|
||||
if err != nil {
|
||||
t.Fatal("failed")
|
||||
}
|
||||
if value != "test" {
|
||||
t.Fatal("failed")
|
||||
}
|
||||
t.Log("Success")
|
||||
|
||||
var names []string
|
||||
names, err = xattr.Listxattr(tmp)
|
||||
if err != nil {
|
||||
t.Fatal("failed")
|
||||
}
|
||||
|
||||
var found int
|
||||
for _, name := range names {
|
||||
if name == attr {
|
||||
found = 1
|
||||
}
|
||||
}
|
||||
// Listxattr doesn't return trusted.* and system.* namespace
|
||||
// attrs when run in unprevileged mode.
|
||||
if found != 1 {
|
||||
t.Fatal("failed")
|
||||
}
|
||||
t.Log("Success")
|
||||
|
||||
big := "0000000000000000000000000000000000000000000000000000000000000000000008c6419ad822dfe29283fb3ac98dcc5908810cb31f4cfe690040c42c144b7492eicompslf20dxmlpgz"
|
||||
// Test for long xattrs larger than 128 bytes
|
||||
err = xattr.Setxattr(tmp, attr, big)
|
||||
if err != nil {
|
||||
t.Fatal("failed to add long value")
|
||||
}
|
||||
value, err = xattr.Getxattr(tmp, attr)
|
||||
if err != nil {
|
||||
t.Fatal("failed to get long value")
|
||||
}
|
||||
t.Log("Success")
|
||||
|
||||
if value != big {
|
||||
t.Fatal("failed, value doesn't match")
|
||||
}
|
||||
t.Log("Success")
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
// +build !linux
|
||||
|
||||
package xattr
|
||||
|
||||
func Listxattr(path string) ([]string, error) {
|
||||
return nil, ErrNotSupportedPlatform
|
||||
}
|
||||
|
||||
func Getxattr(path, attr string) (string, error) {
|
||||
return "", ErrNotSupportedPlatform
|
||||
}
|
||||
|
||||
func Setxattr(path, xattr, value string) error {
|
||||
return ErrNotSupportedPlatform
|
||||
}
|
|
@ -1,71 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/opencontainers/runc/libcontainer"
|
||||
)
|
||||
|
||||
var listCommand = cli.Command{
|
||||
Name: "list",
|
||||
Usage: "lists containers started by runc with the given root",
|
||||
Action: func(context *cli.Context) {
|
||||
factory, err := loadFactory(context)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
// get the list of containers
|
||||
root := context.GlobalString("root")
|
||||
absRoot, err := filepath.Abs(root)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
list, err := ioutil.ReadDir(absRoot)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
w := tabwriter.NewWriter(os.Stdout, 12, 1, 3, ' ', 0)
|
||||
fmt.Fprint(w, "ID\tPID\tSTATUS\tCREATED\n")
|
||||
// output containers
|
||||
for _, item := range list {
|
||||
if item.IsDir() {
|
||||
if err := outputListInfo(item.Name(), factory, w); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := w.Flush(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func outputListInfo(id string, factory libcontainer.Factory, w *tabwriter.Writer) error {
|
||||
container, err := factory.Load(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
containerStatus, err := container.Status()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state, err := container.State()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(w, "%s\t%d\t%s\t%s\n",
|
||||
container.ID(),
|
||||
state.BaseState.InitProcessPid,
|
||||
containerStatus.String(),
|
||||
state.BaseState.Created.Format(time.RFC3339Nano))
|
||||
return nil
|
||||
}
|
103
vendor/src/github.com/opencontainers/runc/main.go
vendored
103
vendor/src/github.com/opencontainers/runc/main.go
vendored
|
@ -1,103 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/opencontainers/specs"
|
||||
)
|
||||
|
||||
const (
|
||||
version = "0.0.8"
|
||||
specConfig = "config.json"
|
||||
usage = `Open Container Initiative runtime
|
||||
|
||||
runc is a command line client for running applications packaged according to
|
||||
the Open Container Format (OCF) and is a compliant implementation of the
|
||||
Open Container Initiative specification.
|
||||
|
||||
runc integrates well with existing process supervisors to provide a production
|
||||
container runtime environment for applications. It can be used with your
|
||||
existing process monitoring tools and the container will be spawned as a
|
||||
direct child of the process supervisor.
|
||||
|
||||
After creating config files for your root filesystem with runc, you can execute
|
||||
a container in your shell by running:
|
||||
|
||||
# cd /mycontainer
|
||||
# runc start [ -b bundle ] <container-id>
|
||||
|
||||
If not specified, the default value for the 'bundle' is the current directory.
|
||||
'Bundle' is the directory where '` + specConfig + `' must be located.`
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "runc"
|
||||
app.Usage = usage
|
||||
app.Version = fmt.Sprintf("%s\nspec version %s", version, specs.Version)
|
||||
app.Flags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "enable debug output for logging",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "log",
|
||||
Usage: "set the log file path where internal debug information is written",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "log-format",
|
||||
Value: "text",
|
||||
Usage: "set the format used by logs ('text' (default), or 'json')",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "root",
|
||||
Value: specs.LinuxStateDirectory,
|
||||
Usage: "root directory for storage of container state (this should be located in tmpfs)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "criu",
|
||||
Value: "criu",
|
||||
Usage: "path to the criu binary used for checkpoint and restore",
|
||||
},
|
||||
}
|
||||
app.Commands = []cli.Command{
|
||||
checkpointCommand,
|
||||
deleteCommand,
|
||||
eventsCommand,
|
||||
execCommand,
|
||||
killCommand,
|
||||
listCommand,
|
||||
pauseCommand,
|
||||
restoreCommand,
|
||||
resumeCommand,
|
||||
specCommand,
|
||||
startCommand,
|
||||
}
|
||||
app.Before = func(context *cli.Context) error {
|
||||
if context.GlobalBool("debug") {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
if path := context.GlobalString("log"); path != "" {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.SetOutput(f)
|
||||
}
|
||||
switch context.GlobalString("log-format") {
|
||||
case "text":
|
||||
// retain logrus's default.
|
||||
case "json":
|
||||
logrus.SetFormatter(new(logrus.JSONFormatter))
|
||||
default:
|
||||
logrus.Fatalf("unknown log-format %q", context.GlobalString("log-format"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package main
|
||||
|
||||
import _ "github.com/opencontainers/runc/libcontainer/nsenter"
|
|
@ -1,20 +0,0 @@
|
|||
// +build !linux
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
checkpointCommand cli.Command
|
||||
eventsCommand cli.Command
|
||||
restoreCommand cli.Command
|
||||
specCommand cli.Command
|
||||
killCommand cli.Command
|
||||
)
|
||||
|
||||
func runAction(*cli.Context) {
|
||||
logrus.Fatal("Current OS is not supported yet")
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue