Compare commits

...

102 Commits

Author SHA1 Message Date
Mrunal Patel 01826ef50b
Merge pull request #1194 from runcom/bump-v1.0.7
Bump v1.0.7
2017-11-30 12:53:46 -10:00
Antonio Murdaca a5dfa11498
version: bump to v1.0.8
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-30 22:13:18 +01:00
Antonio Murdaca 7a3e37e253
version: bump to v1.0.7
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-30 22:10:57 +01:00
Mrunal Patel eb8df95183
Merge pull request #1187 from runcom/fixups-env-1.7
[release-1.0] Fix env handling on exec
2017-11-30 08:54:50 -10:00
Antonio Murdaca 762cb4cca5
test: add exec/execsync env conflict test
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-30 12:54:38 +01:00
Antonio Murdaca cc0f78dfc4
container_create: correctly set image and kube envs
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-30 12:00:42 +01:00
Antonio Murdaca e4470612a2
oci: do not append conmon env to container process
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-30 11:25:31 +01:00
Antonio Murdaca 3a36f553a4
container_exec: use process file with runc exec
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-30 11:25:23 +01:00
Daniel J Walsh c30571b431
Merge pull request #1182 from runcom/fix-image-pull-v1
[release-1.0] Fix image pull
2017-11-28 09:48:36 -05:00
Antonio Murdaca 327c673182
version: bump to v1.0.7-dev
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-28 13:01:34 +01:00
Antonio Murdaca f19b368f70
version: bump to v1.0.6
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-28 13:01:29 +01:00
Antonio Murdaca dd571523ec
image_pull: fix image resolver
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-28 13:01:11 +01:00
Mrunal Patel 3078f59068
Merge pull request #1175 from runcom/fix-cve-1.0
[release-1.0] Add /proc/scsi to masked paths
2017-11-22 08:34:30 -10:00
Antonio Murdaca bfc061b1c4
version: bump v1.0.6-dev
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-22 12:29:59 +01:00
Antonio Murdaca 665895770a
version: bump v1.0.5
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-22 12:29:41 +01:00
Antonio Murdaca 56760df738
Add /proc/scsi to masked paths
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-22 12:28:18 +01:00
Mrunal Patel 4fa5505a7c
Merge pull request #1143 from runcom/bump-v1.0.4
Bump v1.0.4
2017-11-12 06:02:10 -10:00
Antonio Murdaca d45e90673f
version: bump to v1.0.5-dev
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-11 14:05:44 +01:00
Antonio Murdaca 4aceedee21
version: bump to v1.0.4
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-11 14:05:44 +01:00
Antonio Murdaca 8bdb7b912d
container_list: guard against list filter being nil
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-11 14:05:44 +01:00
Antonio Murdaca bc4319c7a8
*: add crictl.yaml
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-11 14:05:40 +01:00
Antonio Murdaca ed34ff3255
server: validate labels size to avoid dos
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-11 12:14:07 +01:00
Mrunal Patel ce319adcfe
Merge pull request #1140 from runcom/backports-image-pull-metrics
[release-1.0] Backports image pull fix and metrics
2017-11-10 16:01:28 -10:00
Antonio Murdaca 12cb424833
server: add prometheus metrics for CRI operations
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-10 14:22:13 +01:00
Antonio Murdaca 5488bfeb9e
image_pull: repull when image ID (config digest) changed
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-10 14:22:10 +01:00
Daniel J Walsh 8a39d94a0d
Merge pull request #1134 from runcom/fix-cve-2017-14992
[release-1.0] vendor.conf: update vbatts/tar-split to v0.10.2
2017-11-09 09:23:31 -05:00
Antonio Murdaca dc8d1e9383
version: bump to v1.0.4-dev
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-09 11:59:37 +01:00
Antonio Murdaca 17bcfb495c
version: bump v1.0.3
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-09 11:59:18 +01:00
Antonio Murdaca b63b96722c
vendor.conf: update vbatts/tar-split to v0.10.2
Fix CVE-2017-14992

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-09 11:50:36 +01:00
Mrunal Patel 66586603b9
Merge pull request #1121 from runcom/carry-logs-fix-v1
[release-1.0] test: Fix partial log line parsing
2017-11-06 08:01:30 -08:00
Mrunal Patel b0da62fcb0
test: Fix partial log line parsing
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-06 12:41:16 +01:00
Daniel J Walsh 00eca36d8e
Merge pull request #1118 from runcom/setup-cwd-v1
[release-1.0] container_create: setup cwd for containers
2017-11-04 07:37:11 -04:00
Antonio Murdaca 762827be57
container_create: setup cwd for containers
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-03 19:21:24 +01:00
Antonio Murdaca f4afea6480
Merge pull request #1113 from mrunalp/env_fix
Add HOSTNAME env var to container
2017-11-03 08:57:39 +01:00
Mrunal Patel 06b4946e68 travis: Take out make lint for go tip
It is failing and our source can't be compatible with stable and tip
at the same time.

Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
2017-11-02 20:23:50 -07:00
Mrunal Patel 894c98bde8 test: Add a test for HOSTNAME env
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
2017-11-02 20:23:45 -07:00
Mrunal Patel 7e1e21f1f4 Add HOSTNAME env var to container
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
2017-11-02 10:21:01 -07:00
Mrunal Patel f6c2df62c9
Merge pull request #1105 from runcom/1100-carry-v1
[release-1.0] Report error when arguments given to crio command
2017-11-01 15:34:13 -07:00
Mrunal Patel 0eebfa6671
Merge pull request #1107 from runcom/add-dot-github-v1
[release-1.0] *: add .github
2017-11-01 14:23:32 -07:00
Antonio Murdaca 430ff2b01d
*: add .github
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-01 20:20:34 +01:00
Daniel J Walsh 222216256f
Report error when arguments given to crio command
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2017-11-01 19:26:24 +01:00
Daniel J Walsh d54ad94656
Merge pull request #1099 from runcom/makefile-fixes-v1
[release-1.0] Makefile: output binaries under bin/
2017-10-31 09:03:41 -04:00
Mrunal Patel db9d8a691b
Merge pull request #1096 from runcom/remove-VERSION-file
*: remove VERSION file
2017-10-30 13:13:59 -07:00
Antonio Murdaca 2f95aaf84c
Merge pull request #1098 from mrunalp/release_1.0.2
Release 1.0.2
2017-10-30 21:09:18 +01:00
Ed Santiago 1ac7da151b
restore lost cni-plugin option
Commit d5b5028c undid part of my pr#953 (cni plugin path). Restore it.

Signed-off-by: Ed Santiago <santiago@redhat.com>
2017-10-30 18:58:18 +01:00
Antonio Murdaca f8e583fca1
Makefile: output binaries under bin/
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-10-30 18:57:58 +01:00
Ed Santiago e78c44fe25
fixup! Restore conmon permissions in teardown()
Signed-off-by: Ed Santiago <santiago@redhat.com>
2017-10-30 18:57:57 +01:00
Ed Santiago 6dcf27f8cb
Issue #1024: don't chmod a nonexistent file
New network test makes improper assumptions about conmon path.
Use predefined CONMON_BINARY variable instead.

Signed-off-by: Ed Santiago <santiago@redhat.com>
2017-10-30 18:57:57 +01:00
Mrunal Patel 532ed47d03 Bump up to 1.0.3-dev
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
2017-10-30 09:40:35 -07:00
Mrunal Patel 748bc46605 Release v1.0.2
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
2017-10-30 09:40:10 -07:00
Daniel J Walsh 29186a6b2c
Merge pull request #1089 from runcom/sort-mounts-v1
[release-1.0] container_create: sort mounts before adding them to the spec
2017-10-30 12:23:47 -04:00
Antonio Murdaca e4f8dff230
*: remove VERSION file
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-10-30 15:12:40 +01:00
Daniel J Walsh c31d1d15f7
Merge pull request #1093 from runcom/makefile-fixes-v1
[release-1.0] Makefile fixes for v1.0.x
2017-10-30 09:10:17 -04:00
Daniel J Walsh ae6e74731c
Merge pull request #1091 from runcom/cmux-http-read-timeout-v1
[release-1.0] cmd: crio: set ReadTimeout on the info endpoint
2017-10-30 09:07:06 -04:00
Daniel J Walsh fc2457a3f0
Strip out debuginfo and other content to make images smaller
This can be overriden by passing in the

make SHRINKFLAGS=

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2017-10-30 09:54:58 +01:00
Daniel J Walsh 266e1c10ec
Change buildtags based on installed environment.
Determine if selinux is available before building cri-o with support.
Don't add ostree support to crio or any tools other then kpod.
cri-o does not use ostree.

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2017-10-30 09:54:49 +01:00
Antonio Murdaca 6a912eb2d3
cmd: crio: set ReadTimeout on the info endpoint
This will avoid the goroutines leak we've been seeing during
performance tests. Goroutines count returns to normal after containers
cleanup.

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-10-29 21:59:31 +01:00
Antonio Murdaca 085bdf8ff5
container_create: sort mounts before adding them to the spec
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-10-29 12:31:42 +01:00
Antonio Murdaca 1544488a10
Merge pull request #1084 from lsm5/release-1.0-unitfile-fixes
systemd: expand limits for tests
2017-10-27 22:23:29 +02:00
Lokesh Mandvekar 73b6543864 systemd: expand limits for tests
Borrowed from:
https://github.com/projectatomic/atomic-system-containers/pull/136

From: Antonio Murdaca <runcom@redhat.com>
Signed-off-by: Lokesh Mandvekar <lsm5@fedoraproject.org>
2017-10-27 13:35:41 -04:00
Mrunal Patel ffe0d51a3c Merge pull request #1078 from mrunalp/close_ch_1.0
server: correctly return and close ch from exits routine
2017-10-26 19:06:39 -07:00
Antonio Murdaca 98ca0803d7 server: correctly return and close ch from exits routine
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-10-26 16:37:49 -07:00
Antonio Murdaca 387c1c6bff Merge pull request #1075 from mrunalp/release_1.0.1
Release 1.0.1
2017-10-25 19:42:45 +02:00
Mrunal Patel c1ccd5f295 version: Bump up to 1.0.2-dev
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
2017-10-25 07:44:40 -07:00
Mrunal Patel 64a30e1654 version: Release 1.0.1
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
2017-10-25 07:42:06 -07:00
Daniel J Walsh 155e83d75b Merge pull request #1051 from rhatdan/release-1.0
We need to release the SELinux label when we destroy the sandbox
2017-10-24 21:43:09 -07:00
Daniel J Walsh d8aaba71b7 Merge pull request #1045 from runcom/fix-host-pid-v1
[release-1.0] fix host pid handling for containers and share uts ns
2017-10-24 19:45:37 -07:00
Daniel J Walsh d2ea9cc2fa We need to release the SELinux label when we destroy the sandbox
This will release the MCS Label to be used again.  Only do this if we
don't have another sandbox using the same label.

Also vendor in the latest selinux go bindings, which fixes a leak and
properly reserves the SELinux label we are going to use.

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2017-10-24 19:30:57 -07:00
Antonio Murdaca 1461072521 Merge pull request #1059 from mrunalp/update_godbus_dep_1.0
vendor: Update godbus dependency to a389bd
2017-10-24 23:11:35 +02:00
Antonio Murdaca 0385463de0 Merge pull request #1055 from mrunalp/oom_test_loop_1.0
test: Test for OOM condition in a loop
2017-10-24 22:43:49 +02:00
Mrunal Patel 3a504024d5 Merge pull request #1040 from runcom/fix-process-exec-v1
[release-1.0] Fix process exec v1
2017-10-24 13:41:52 -07:00
Antonio Murdaca a45c16d7fa
fix host pid handling for containers and share uts ns
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-10-24 22:38:09 +02:00
Mrunal Patel a90213930b vendor: Update godbus dependency to a389bdde4dd695d414e47b755e95e72b7826432c
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
2017-10-24 12:27:03 -07:00
Mrunal Patel 17db40dcda test: Test for OOM condition in a loop
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
2017-10-24 11:25:46 -07:00
Antonio Murdaca a32c3d4b9a
oci: respect process spec on exec
This patch fixes exec to use the original (start-time) process exec
configuration. Otherwise, we were creating a brand new spec process w/o
additional groups for instance.
Spotted while integrating CRI-O with cri-test...The test was failing
with:
```
• Failure [10.640 seconds]
[k8s.io] Security Context
/home/amurdaca/go/src/github.com/kubernetes-incubator/cri-tools/pkg/framework/framework.go:72
  bucket
  /home/amurdaca/go/src/github.com/kubernetes-incubator/cri-tools/pkg/validate/security_context.go:407
    runtime should support SupplementalGroups [It]
    /home/amurdaca/go/src/github.com/kubernetes-incubator/cri-tools/pkg/validate/security_context.go:272

    Expected
        <[]string | len:1, cap:1>: ["0"]
    to contain element matching
        <string>: 1234
```

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-10-23 11:18:08 +02:00
Mrunal Patel a2ab0a5eb0 Merge pull request #1047 from runcom/fix-e2e-v1
[release-1.0] Fix e2e v1
2017-10-20 10:24:45 -07:00
Antonio Murdaca ed89aa630e
contrib: test: fix e2e cmdline
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-10-20 08:32:04 +02:00
Daniel J Walsh 76508edc10 Merge pull request #1032 from rhatdan/seccomp
Update to latest seccomp filters in moby
2017-10-18 11:45:58 -04:00
Mrunal Patel 5f826acfaf Merge pull request #1033 from runcom/fix-stop-1.0
oci: fixes to properly handle container stop action
2017-10-18 08:05:00 -07:00
Antonio Murdaca 9b797f0cb9
oci: fixes to properly handle container stop action
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-10-18 11:57:47 +02:00
Mrunal Patel bb737b9121 Merge pull request #1022 from runcom/fix-version-rel-1
version: fix version handling and kube info
2017-10-17 14:37:01 -07:00
Mrunal Patel 70f6306a51 Merge pull request #1027 from umohnani8/secrets_1.0
Fix logic flaw in secrets mount
2017-10-17 13:56:42 -07:00
umohnani8 c0f6f4fb48 Fix logic flaw in secrets mount
Tested on a REHL box and found out that the mounts were not showing up
Had a logic flaw, where if the mount was "host:container"
Was setting the mount source to "host" and destination to "ctrRunDir/container"
When instead, the mount source should be "ctrRunDir/container" and destination "container"
with the data copied from "host" to "ctrRunDir/container"

Signed-off-by: umohnani8 <umohnani@redhat.com>
2017-10-17 13:55:29 -04:00
Antonio Murdaca 7efdae80bc
version: fix version handling and kube info
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-10-17 10:48:31 +02:00
Mrunal Patel a636972c3e version: Release 1.0.0
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
2017-10-13 11:26:46 -07:00
Mrunal Patel 5b62041194 Merge pull request #1010 from runcom/oci-kill-all
oci: kill all processes in a container not just the main one
2017-10-13 08:54:58 -07:00
Mrunal Patel 38c2a34b46 Merge pull request #1009 from sameo/topic/ctr-create-2s-fix
oci: Remove useless crio-conmon- cgroup deletion
2017-10-13 08:53:29 -07:00
Antonio Murdaca ab2a4839d7
oci: kill all processes in a container not just the main one
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-10-13 14:37:25 +02:00
Daniel J Walsh c4f7506896 Merge pull request #1000 from nalind/bats-fixes
Fixes to use of bats in integration tests
2017-10-13 07:07:59 -04:00
Samuel Ortiz 29121c8c0c oci: Remove useless crio-conmon- cgroup deletion
It always fails because conmon is still there.
But more importantly it adds a 2 seconds delay to the container
creation as we're trying to delete a cgroup but we can't.

With this patch a container creation is down to typically less than
150ms instead of 2+ seconds.

Signed-off-by: Samuel Ortiz <sameo@linux.intel.com>
2017-10-13 11:58:23 +02:00
Nalin Dahyabhai ddb8fb30cc Correct our usage of the bats run helper
The bats "run" helper function sets "$status", so there's no point to
checking the value of "$status" when we haven't used the "run" helper to
run a command, and we almost always want to be checking the value after
we have used the helper.

There's no need to run commands like 'sleep' or 'rm -f' with the helper,
since they're not expected to fail, and if they do, it's probably
indicative of a larger problem that we want to allow to cause tests to
fail.

Helper functions like start_crio already check "$status" when they call
"run", so we don't need to check it again after they return.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2017-10-12 17:54:47 -04:00
Nalin Dahyabhai a88f6840d8 Look up the container's name for kpod-stop-by-name
In the kpod-stop-by-name test, use 'kpod inspect' to look up the name of
the container, rather than predicting the name that crio will assign.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2017-10-12 17:54:47 -04:00
Mrunal Patel 436194290a Merge pull request #1004 from umohnani8/secrets_patch
Follow up changes on secrets patch
2017-10-12 14:40:46 -07:00
umohnani8 d1aea31786 Follow up changes on secrets patch
Deleted mounts.conf file and moved the secrets mount paths
to a list (default-mounts) in crio.conf

Signed-off-by: umohnani8 <umohnani@redhat.com>
2017-10-12 15:10:07 -04:00
Daniel J Walsh 5b41729b6c Merge pull request #942 from umohnani8/secrets_patch
Add secrets support to crio
2017-10-12 11:04:20 -04:00
Daniel J Walsh a8224f8be1 Merge pull request #1002 from mrunalp/lint_fix
test: Modify Fatal to Fatalf as we have a specifier
2017-10-12 09:27:40 -04:00
umohnani8 d5b5028cb9 Add secrets patch to crio
Allows the user to define secret paths in /etc/containers/mounts.conf
These are then volume mounted into the container

Signed-off-by: umohnani8 <umohnani@redhat.com>
2017-10-11 20:00:38 -04:00
Mrunal Patel bb4b2e9fea test: Modify Fatal to Fatalf as we have a specifier
Signed-off-by: Mrunal Patel <mpatel@redhat.com>
2017-10-11 14:51:11 -07:00
Daniel J Walsh d7cbdfce76 Merge pull request #886 from baude/kpod_json
Return Valid JSON for empty data
2017-10-11 16:26:23 -04:00
baude 3907e0d346 Return Valid JSON for empty data
For commands that ask for JSON results, if the input to the Go JSON
marshaller is empty, it will return a byte array with a literal
"null" in it.  If that is the case, we should output [] instead
as at least that is valid JSON and will not break consumers of the
data.

Signed-off-by: baude <bbaude@redhat.com>
2017-10-11 13:28:18 -05:00
Daniel J Walsh 3363064622 Merge pull request #996 from mrunalp/fix_format
test: Fix format specifier
2017-10-11 13:03:16 -04:00
Mrunal Patel 7c2c9a8c85 test: Fix format specifier
Signed-off-by: Mrunal Patel <mpatel@redhat.com>
2017-10-10 16:23:54 -07:00
124 changed files with 6203 additions and 969 deletions

7
.github/CODEOWNERS vendored Normal file
View File

@ -0,0 +1,7 @@
# GitHub code owners
# See https://help.github.com/articles/about-codeowners/
#
# KEEP THIS FILE SORTED. Order is important. Last match takes precedence.
* @mrunalp @runcom
pkg/storage/** @nalind @runcom @rhatdan

58
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,58 @@
<!--
If you are reporting a new issue, make sure that we do not have any duplicates
already open. You can ensure this by searching the issue list for this
repository. If there is a duplicate, please close your issue and add a comment
to the existing issue instead.
If you suspect your issue is a bug, please edit your issue description to
include the BUG REPORT INFORMATION shown below. If you fail to provide this
information within 7 days, we cannot debug your issue and will close it. We
will, however, reopen it if you later provide the information.
For more information about reporting issues, see
https://github.com/kubernetes-incubator/cri-o/blob/master/CONTRIBUTING.md#reporting-issues
---------------------------------------------------
GENERAL SUPPORT INFORMATION
---------------------------------------------------
The GitHub issue tracker is for bug reports and feature requests.
General support for **CRI-O** can be found at the following locations:
- IRC - #cri-o channel on irc.freenode.org
- Slack - kubernetes.slack.com #sig-node channel
- Post a question on StackOverflow, using the CRI-O tag
---------------------------------------------------
BUG REPORT INFORMATION
---------------------------------------------------
Use the commands below to provide key information from your environment:
You do NOT have to include this information if this is a FEATURE REQUEST
-->
**Description**
<!--
Briefly describe the problem you are having in a few paragraphs.
-->
**Steps to reproduce the issue:**
1.
2.
3.
**Describe the results you received:**
**Describe the results you expected:**
**Additional information you deem important (e.g. issue happens only occasionally):**
**Output of `crio --version`:**
```
(paste your output here)
```
**Additional environment details (AWS, VirtualBox, physical, etc.):**

23
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,23 @@
<!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/kubernetes-incubator/cri-o/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
**- How I did it**
**- How to verify it**
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->

6
.gitignore vendored
View File

@ -1,17 +1,13 @@
/.artifacts/
/_output/
/conmon/conmon
/conmon/conmon.o
/docs/*.[158]
/docs/*.[158].gz
/kpod
/crioctl
/crio
/crio.conf
*.o
*.orig
/pause/pause
/pause/pause.o
/bin/
/test/bin2img/bin2img
/test/checkseccomp/checkseccomp
/test/copyimg/copyimg

View File

@ -47,8 +47,6 @@ jobs:
go: 1.9.x
- script:
- make .gitvalidation
- make gofmt
- make lint
- make testunit
- make docs
- make

View File

@ -97,7 +97,7 @@ RUN set -x \
&& rm -rf "$GOPATH"
# Install crictl
ENV CRICTL_COMMIT 16e6fe4d7199c5689db4630a9330e6a8a12cecd1
ENV CRICTL_COMMIT b42fc3f364dd48f649d55926c34492beeb9b2e99
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/kubernetes-incubator/cri-tools.git "$GOPATH/src/github.com/kubernetes-incubator/cri-tools" \

View File

@ -11,7 +11,9 @@ LIBEXECDIR ?= ${PREFIX}/libexec
MANDIR ?= ${PREFIX}/share/man
ETCDIR ?= ${DESTDIR}/etc
ETCDIR_CRIO ?= ${ETCDIR}/crio
BUILDTAGS ?= selinux seccomp $(shell hack/btrfs_tag.sh) $(shell hack/libdm_tag.sh) $(shell hack/btrfs_installed_tag.sh)
BUILDTAGS ?= seccomp $(shell hack/btrfs_tag.sh) $(shell hack/libdm_tag.sh) $(shell hack/btrfs_installed_tag.sh) $(shell hack/ostree_tag.sh) $(shell hack/selinux_tag.sh)
CRICTL_CONFIG_DIR=${DESTDIR}/etc
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
OCIUMOUNTINSTALLDIR=$(PREFIX)/share/oci-umount/oci-umount.d
@ -22,7 +24,6 @@ COMMIT_NO := $(shell git rev-parse HEAD 2> /dev/null || true)
GIT_COMMIT := $(if $(shell git status --porcelain --untracked-files=no),"${COMMIT_NO}-dirty","${COMMIT_NO}")
BUILD_INFO := $(shell date +%s)
VERSION := ${shell cat ./VERSION}
KPOD_VERSION := ${shell cat ./KPOD_VERSION}
# If GOPATH not specified, use one in the local directory
@ -35,8 +36,11 @@ GOPKGBASEDIR := $(shell dirname "$(GOPKGDIR)")
# Update VPATH so make finds .gopathok
VPATH := $(VPATH):$(GOPATH)
LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} -X main.buildInfo=${BUILD_INFO} -X main.version=${VERSION} -X main.kpodVersion=${KPOD_VERSION}'
SHRINKFLAGS := -s -w
BASE_LDFLAGS := ${SHRINKFLAGS} -X main.gitCommit=${GIT_COMMIT} -X main.buildInfo=${BUILD_INFO}
KPOD_LDFLAGS := -X main.kpodVersion=${KPOD_VERSION}
LDFLAGS := -ldflags '${BASE_LDFLAGS}'
LDFLAGS_KPOD := -ldflags '${BASE_LDFLAGS} ${KPOD_LDFLAGS}'
all: binaries crio.conf docs
@ -46,7 +50,7 @@ help:
@echo "Usage: make <target>"
@echo
@echo " * 'install' - Install binaries to system locations"
@echo " * 'binaries' - Build crio, conmon and crioctl"
@echo " * 'binaries' - Build crio, conmon, pause, crioctl and kpod"
@echo " * 'integration' - Execute integration tests"
@echo " * 'clean' - Clean artifacts"
@echo " * 'lint' - Execute the source code linter"
@ -73,25 +77,25 @@ pause:
$(MAKE) -C $@
test/bin2img/bin2img: .gopathok $(wildcard test/bin2img/*.go)
$(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/test/bin2img
$(GO) build $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/bin2img
test/copyimg/copyimg: .gopathok $(wildcard test/copyimg/*.go)
$(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/test/copyimg
$(GO) build $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/copyimg
test/checkseccomp/checkseccomp: .gopathok $(wildcard test/checkseccomp/*.go)
$(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/test/checkseccomp
$(GO) build $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/checkseccomp
crio: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/crio $(PROJECT))
$(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/cmd/crio
$(GO) build $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o bin/$@ $(PROJECT)/cmd/crio
crioctl: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/crioctl $(PROJECT))
$(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/cmd/crioctl
$(GO) build $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o bin/$@ $(PROJECT)/cmd/crioctl
kpod: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/kpod $(PROJECT))
$(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/cmd/kpod
$(GO) build $(LDFLAGS_KPOD) -tags "$(BUILDTAGS)" -o bin/$@ $(PROJECT)/cmd/kpod
crio.conf: crio
./crio --config="" config --default > crio.conf
./bin/crio --config="" config --default > crio.conf
clean:
ifneq ($(GOPATH),)
@ -102,7 +106,7 @@ endif
rm -fr test/testdata/redis-image
find . -name \*~ -delete
find . -name \#\* -delete
rm -f crioctl crio kpod
rm -f bin/crioctl bin/crio bin/kpod
make -C conmon clean
make -C pause clean
rm -f test/bin2img/bin2img
@ -121,10 +125,11 @@ integration: crioimage
testunit:
$(GO) test -tags "$(BUILDTAGS)" -cover $(PACKAGES)
localintegration: clean binaries
localintegration: clean binaries test-binaries
./test/test_runner.sh ${TESTFLAGS}
binaries: crio crioctl kpod conmon pause test/bin2img/bin2img test/copyimg/copyimg test/checkseccomp/checkseccomp
binaries: crio conmon pause kpod crioctl
test-binaries: test/bin2img/bin2img test/copyimg/copyimg test/checkseccomp/checkseccomp
MANPAGES_MD := $(wildcard docs/*.md)
MANPAGES := $(MANPAGES_MD:%.md=%)
@ -143,11 +148,11 @@ docs: $(MANPAGES)
install: .gopathok install.bin install.man
install.bin:
install ${SELINUXOPT} -D -m 755 crio $(BINDIR)/crio
install ${SELINUXOPT} -D -m 755 crioctl $(BINDIR)/crioctl
install ${SELINUXOPT} -D -m 755 kpod $(BINDIR)/kpod
install ${SELINUXOPT} -D -m 755 conmon/conmon $(LIBEXECDIR)/crio/conmon
install ${SELINUXOPT} -D -m 755 pause/pause $(LIBEXECDIR)/crio/pause
install ${SELINUXOPT} -D -m 755 bin/crio $(BINDIR)/crio
install ${SELINUXOPT} -D -m 755 bin/crioctl $(BINDIR)/crioctl
install ${SELINUXOPT} -D -m 755 bin/kpod $(BINDIR)/kpod
install ${SELINUXOPT} -D -m 755 bin/conmon $(LIBEXECDIR)/crio/conmon
install ${SELINUXOPT} -D -m 755 bin/pause $(LIBEXECDIR)/crio/pause
install.man:
install ${SELINUXOPT} -d -m 755 $(MANDIR)/man1
@ -161,6 +166,7 @@ install.config:
install ${SELINUXOPT} -D -m 644 crio.conf $(ETCDIR_CRIO)/crio.conf
install ${SELINUXOPT} -D -m 644 seccomp.json $(ETCDIR_CRIO)/seccomp.json
install ${SELINUXOPT} -D -m 644 crio-umount.conf $(OCIUMOUNTINSTALLDIR)/crio-umount.conf
install ${SELINUXOPT} -D -m 644 crictl.yaml $(CRICTL_CONFIG_DIR)
install.completions:
install ${SELINUXOPT} -d -m 755 ${BASHINSTALLDIR}

View File

@ -4,7 +4,7 @@
[![Build Status](https://img.shields.io/travis/kubernetes-incubator/cri-o.svg?maxAge=2592000&style=flat-square)](https://travis-ci.org/kubernetes-incubator/cri-o)
[![Go Report Card](https://goreportcard.com/badge/github.com/kubernetes-incubator/cri-o?style=flat-square)](https://goreportcard.com/report/github.com/kubernetes-incubator/cri-o)
### Status: Release Candidate 3
### Status: Stable
## What is the scope of this project?
@ -256,5 +256,4 @@ To run a full cluster, see [the instructions](kubernetes.md).
1. Support for log management, networking integration using CNI, pluggable image/storage management (done)
1. Support for exec/attach (done)
1. Target fully automated kubernetes testing without failures [e2e status](https://github.com/kubernetes-incubator/cri-o/issues/533)
1. Release 1.0
1. Track upstream k8s releases

View File

@ -1 +0,0 @@
1.0.0-rc4-dev

View File

@ -108,6 +108,10 @@ cgroup_manager = "{{ .CgroupManager }}"
# hooks_dir_path is the oci hooks directory for automatically executed hooks
hooks_dir_path = "{{ .HooksDirPath }}"
# default_mounts is the mounts list to be mounted for the container when created
default_mounts = [
{{ range $mount := .DefaultMounts }}{{ printf "\t%q, \n" $mount }}{{ end }}]
# pids_limit is the number of processes allowed in a container
pids_limit = {{ .PidsLimit }}

View File

@ -10,10 +10,12 @@ import (
"os/signal"
"sort"
"strings"
"time"
"github.com/containers/storage/pkg/reexec"
"github.com/kubernetes-incubator/cri-o/libkpod"
"github.com/kubernetes-incubator/cri-o/server"
"github.com/kubernetes-incubator/cri-o/version"
"github.com/opencontainers/selinux/go-selinux"
"github.com/sirupsen/logrus"
"github.com/soheilhy/cmux"
@ -23,10 +25,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// This is populated by the Makefile from the VERSION file
// in the repository
var version = ""
// gitCommit is the commit that the binary is being built from.
// It will be populated by the Makefile.
var gitCommit = ""
@ -127,6 +125,9 @@ func mergeConfig(config *server.Config, ctx *cli.Context) error {
if ctx.GlobalIsSet("hooks-dir-path") {
config.HooksDirPath = ctx.GlobalString("hooks-dir-path")
}
if ctx.GlobalIsSet("default-mounts") {
config.DefaultMounts = ctx.GlobalStringSlice("default-mounts")
}
if ctx.GlobalIsSet("pids-limit") {
config.PidsLimit = ctx.GlobalInt64("pids-limit")
}
@ -179,9 +180,7 @@ func main() {
app := cli.NewApp()
var v []string
if version != "" {
v = append(v, version)
}
v = append(v, version.Version)
if gitCommit != "" {
v = append(v, fmt.Sprintf("commit: %s", gitCommit))
}
@ -322,6 +321,11 @@ func main() {
Value: libkpod.DefaultHooksDirPath,
Hidden: true,
},
cli.StringSliceFlag{
Name: "default-mounts",
Usage: "add one or more default mount paths in the form host:container",
Hidden: true,
},
cli.BoolFlag{
Name: "profile",
Usage: "enable pprof remote profiler on localhost:6060",
@ -405,6 +409,16 @@ func main() {
}()
}
args := c.Args()
if len(args) > 0 {
for _, command := range app.Commands {
if args[0] == command.Name {
break
}
}
return fmt.Errorf("command %q not supported", args[0])
}
config := c.App.Metadata["config"].(*server.Config)
if !config.SELinux {
@ -467,7 +481,8 @@ func main() {
infoMux := service.GetInfoMux()
srv := &http.Server{
Handler: infoMux,
Handler: infoMux,
ReadTimeout: 5 * time.Second,
}
graceful := false

View File

@ -8,6 +8,7 @@ import (
"text/tabwriter"
"text/template"
"bytes"
"github.com/ghodss/yaml"
"github.com/pkg/errors"
)
@ -59,6 +60,16 @@ func (j JSONStructArray) Out() error {
if err != nil {
return err
}
// JSON returns a byte array with a literal null [110 117 108 108] in it
// if it is passed empty data. We used bytes.Compare to see if that is
// the case.
if diff := bytes.Compare(data, []byte("null")); diff == 0 {
data = []byte("[]")
}
// If the we did get NULL back, we should spit out {} which is
// at least valid JSON for the consumer.
fmt.Printf("%s\n", data)
return nil
}

View File

@ -398,7 +398,9 @@ func getJSONOutput(containers []*libkpod.ContainerData, nSpace bool) (psOutput [
func generatePsOutput(containers []*libkpod.ContainerData, server *libkpod.ContainerServer, opts psOptions) error {
containersOutput := getContainers(containers, opts)
if len(containersOutput) == 0 {
// In the case of JSON, we want to continue so we at least pass
// {} --valid JSON-- to the consumer
if len(containersOutput) == 0 && opts.format != formats.JSONString {
return nil
}

View File

@ -6,6 +6,7 @@ import (
"github.com/kubernetes-incubator/cri-o/libkpod"
"github.com/pkg/errors"
"github.com/urfave/cli"
"golang.org/x/net/context"
)
var (
@ -53,7 +54,7 @@ func rmCmd(c *cli.Context) error {
force := c.Bool("force")
for _, container := range c.Args() {
id, err2 := server.Remove(container, force)
id, err2 := server.Remove(context.Background(), container, force)
if err2 != nil {
if err == nil {
err = err2

View File

@ -7,6 +7,7 @@ import (
"github.com/kubernetes-incubator/cri-o/libkpod"
"github.com/pkg/errors"
"github.com/urfave/cli"
"golang.org/x/net/context"
)
var (
@ -61,7 +62,7 @@ func stopCmd(c *cli.Context) error {
}
var lastError error
for _, container := range c.Args() {
cid, err := server.ContainerStop(container, stopTimeout)
cid, err := server.ContainerStop(context.Background(), container, stopTimeout)
if err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)

View File

@ -5,8 +5,8 @@ override LIBS += $(shell pkg-config --libs glib-2.0)
override CFLAGS += -std=c99 -Os -Wall -Wextra $(shell pkg-config --cflags glib-2.0)
conmon: $(obj)
$(CC) -o $@ $^ $(CFLAGS) $(LIBS)
$(CC) -o ../bin/$@ $^ $(CFLAGS) $(LIBS)
.PHONY: clean
clean:
rm -f $(obj) conmon
rm -f $(obj) ../bin/conmon

View File

@ -12,7 +12,7 @@ ExecStart=/usr/local/bin/crio \
$CRIO_STORAGE_OPTIONS \
$CRIO_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP $MAINPID
TasksMax=8192
TasksMax=infinity
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity

View File

@ -4,7 +4,7 @@
git:
repo: "https://github.com/kubernetes-incubator/cri-tools.git"
dest: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-tools"
version: "16e6fe4d7199c5689db4630a9330e6a8a12cecd1"
version: "b42fc3f364dd48f649d55926c34492beeb9b2e99"
- name: install crictl
command: "/usr/bin/go install github.com/kubernetes-incubator/cri-tools/cmd/crictl"

View File

@ -4,7 +4,7 @@
git:
repo: "https://github.com/runcom/kubernetes.git"
dest: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes"
version: "cri-o-node-e2e-patched"
version: "cri-o-node-e2e-patched-logs"
- name: install etcd
command: "hack/install-etcd.sh"

View File

@ -42,7 +42,7 @@
e2e_shell_cmd: >
/usr/bin/go run hack/e2e.go
--test
-test_args="-host=https://{{ ansible_default_ipv4.address }}:6443
--test_args="-host=https://{{ ansible_default_ipv4.address }}:6443
--ginkgo.focus=\[Conformance\]
--report-dir={{ artifacts }}"
&> {{ artifacts }}/e2e.log

View File

@ -108,4 +108,4 @@
- name: Update the kernel cmdline to include quota support
command: grubby --update-kernel=ALL --args="rootflags=pquota"
when: ansible_distribution in ['RedHat', 'CentOS']
when: ansible_distribution in ['RedHat', 'CentOS']

1
crictl.yaml Normal file
View File

@ -0,0 +1 @@
runtime-endpoint: /var/run/crio.sock

View File

@ -105,6 +105,9 @@ Example:
**no_pivot**=*true*|*false*
Instructs the runtime to not use pivot_root, but instead use MS_MOVE
**default_mounts**=[]
List of mount points, in the form host:container, to be mounted in every container
## CRIO.IMAGE TABLE
**default_transport**

4
hack/ostree_tag.sh Executable file
View File

@ -0,0 +1,4 @@
#!/bin/bash
if ! pkg-config ostree-1 2> /dev/null ; then
echo containers_image_ostree_stub
fi

4
hack/selinux_tag.sh Executable file
View File

@ -0,0 +1,4 @@
#!/bin/bash
if pkg-config libselinux 2> /dev/null ; then
echo selinux
fi

View File

@ -145,6 +145,10 @@ type RuntimeConfig struct {
// HooksDirPath location of oci hooks config files
HooksDirPath string `toml:"hooks_dir_path"`
// DefaultMounts is the list of mounts to be mounted for each container
// The format of each mount is "host-path:container-path"
DefaultMounts []string `toml:"default_mounts"`
// Hooks List of hooks to run with container
Hooks map[string]HookParams

View File

@ -19,6 +19,7 @@ import (
"github.com/kubernetes-incubator/cri-o/pkg/storage"
"github.com/opencontainers/runc/libcontainer"
rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -168,6 +169,7 @@ func New(config *Config) (*ContainerServer, error) {
containers: oci.NewMemoryStore(),
infraContainers: oci.NewMemoryStore(),
sandboxes: make(map[string]*sandbox.Sandbox),
processLevels: make(map[string]int),
},
config: config,
}, nil
@ -388,6 +390,7 @@ func (c *ContainerServer) LoadSandbox(id string) error {
if err != nil {
return err
}
scontainer.SetSpec(&m)
scontainer.SetMountPoint(m.Annotations[annotations.MountPoint])
if m.Annotations[annotations.Volumes] != "" {
@ -511,6 +514,7 @@ func (c *ContainerServer) LoadContainer(id string) error {
if err != nil {
return err
}
ctr.SetSpec(&m)
ctr.SetMountPoint(m.Annotations[annotations.MountPoint])
c.ContainerStateFromDisk(ctr)
@ -609,6 +613,8 @@ type containerServerState struct {
containers oci.ContainerStorer
infraContainers oci.ContainerStorer
sandboxes map[string]*sandbox.Sandbox
// processLevels The number of sandboxes using the same SELinux MCS level. Need to release MCS Level, when count reaches 0
processLevels map[string]int
}
// AddContainer adds a container to the container state store
@ -696,6 +702,7 @@ func (c *ContainerServer) AddSandbox(sb *sandbox.Sandbox) {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.state.sandboxes[sb.ID()] = sb
c.state.processLevels[selinux.NewContext(sb.ProcessLabel())["level"]]++
}
// GetSandbox returns a sandbox by its ID
@ -728,7 +735,14 @@ func (c *ContainerServer) HasSandbox(id string) bool {
func (c *ContainerServer) RemoveSandbox(id string) {
c.stateLock.Lock()
defer c.stateLock.Unlock()
processLabel := c.state.sandboxes[id].ProcessLabel()
delete(c.state.sandboxes, id)
level := selinux.NewContext(processLabel)["level"]
c.state.processLevels[level]--
if c.state.processLevels[level] == 0 {
label.ReleaseLabel(processLabel)
delete(c.state.processLevels, level)
}
}
// ListSandboxes lists all sandboxes in the state store

View File

@ -6,10 +6,11 @@ import (
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// Remove removes a container
func (c *ContainerServer) Remove(container string, force bool) (string, error) {
func (c *ContainerServer) Remove(ctx context.Context, container string, force bool) (string, error) {
ctr, err := c.LookupContainer(container)
if err != nil {
return "", err
@ -22,7 +23,7 @@ func (c *ContainerServer) Remove(container string, force bool) (string, error) {
return "", errors.Errorf("cannot remove paused container %s", ctrID)
case oci.ContainerStateCreated, oci.ContainerStateRunning:
if force {
_, err = c.ContainerStop(container, -1)
_, err = c.ContainerStop(ctx, container, 10)
if err != nil {
return "", errors.Wrapf(err, "unable to stop container %s", ctrID)
}

View File

@ -3,10 +3,11 @@ package libkpod
import (
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// ContainerStop stops a running container with a grace period (i.e., timeout).
func (c *ContainerServer) ContainerStop(container string, timeout int64) (string, error) {
func (c *ContainerServer) ContainerStop(ctx context.Context, container string, timeout int64) (string, error) {
ctr, err := c.LookupContainer(container)
if err != nil {
return "", errors.Wrapf(err, "failed to find container %s", container)
@ -20,7 +21,7 @@ func (c *ContainerServer) ContainerStop(container string, timeout int64) (string
return "", errors.Errorf("cannot stop paused container %s", ctrID)
default:
if cStatus.Status != oci.ContainerStateStopped {
if err := c.runtime.StopContainer(ctr, timeout); err != nil {
if err := c.runtime.StopContainer(ctx, ctr, timeout); err != nil {
return "", errors.Wrapf(err, "failed to stop container %s", ctrID)
}
if err := c.storageRuntimeServer.StopContainer(ctrID); err != nil {

View File

@ -48,6 +48,7 @@ type Container struct {
imageRef string
volumes []ContainerVolume
mountPoint string
spec *specs.Spec
}
// ContainerVolume is a bind mount for the container.
@ -99,6 +100,16 @@ func NewContainer(id string, name string, bundlePath string, logPath string, net
return c, nil
}
// SetSpec loads the OCI spec in the container struct
func (c *Container) SetSpec(s *specs.Spec) {
c.spec = s
}
// Spec returns a copy of the spec for the container
func (c *Container) Spec() specs.Spec {
return *c.spec
}
// GetStopSignal returns the container's own stop signal configured from the
// image configuration or the default one.
func (c *Container) GetStopSignal() string {

View File

@ -17,6 +17,7 @@ import (
"github.com/kubernetes-incubator/cri-o/utils"
rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"golang.org/x/sys/unix"
kwait "k8s.io/apimachinery/pkg/util/wait"
)
@ -39,6 +40,10 @@ const (
SystemdCgroupsManager = "systemd"
// ContainerExitsDir is the location of container exit dirs
ContainerExitsDir = "/var/run/crio/exits"
// killContainerTimeout is the timeout that we wait for the container to
// be SIGKILLed.
killContainerTimeout = 2 * time.Minute
)
// New creates a new Runtime with options provided
@ -224,11 +229,12 @@ func (r *Runtime) CreateContainer(c *Container, cgroupParent string) (err error)
if err != nil {
logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
} else {
// XXX: this defer does nothing as the cgroup can't be deleted cause
// it contains the conmon pid in tasks
// we need to remove this defer and delete the cgroup once conmon exits
// maybe need a conmon monitor?
defer control.Delete()
// Here we should defer a crio-connmon- cgroup hierarchy deletion, but it will
// always fail as conmon's pid is still there.
// Fortunately, kubelet takes care of deleting this for us, so the leak will
// only happens in corner case where one does a manual deletion of the container
// through e.g. runc. This should be handled by implementing a conmon monitoring
// routine that does the cgroup cleanup once conmon is terminated.
if err := control.Add(cgroups.Process{Pid: cmd.Process.Pid}); err != nil {
logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
}
@ -406,7 +412,7 @@ func (r *Runtime) ExecSync(c *Container, command []string, timeout int64) (resp
os.RemoveAll(logPath)
}()
f, err := ioutil.TempFile("", "exec-process")
f, err := ioutil.TempFile("", "exec-sync-process")
if err != nil {
return nil, ExecSyncError{
ExitCode: -1,
@ -429,11 +435,8 @@ func (r *Runtime) ExecSync(c *Container, command []string, timeout int64) (resp
}
args = append(args, "-l", logPath)
pspec := rspec.Process{
Env: r.conmonEnv,
Args: command,
Cwd: "/",
}
pspec := c.Spec().Process
pspec.Args = command
processJSON, err := json.Marshal(pspec)
if err != nil {
return nil, ExecSyncError{
@ -541,25 +544,7 @@ func (r *Runtime) ExecSync(c *Container, command []string, timeout int64) (resp
}, nil
}
// StopContainer stops a container. Timeout is given in seconds.
func (r *Runtime) StopContainer(c *Container, timeout int64) error {
c.opLock.Lock()
defer c.opLock.Unlock()
// Check if the process is around before sending a signal
err := unix.Kill(c.state.Pid, 0)
if err == unix.ESRCH {
c.state.Finished = time.Now()
return nil
}
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, r.Path(c), "kill", c.id, c.GetStopSignal()); err != nil {
return fmt.Errorf("failed to stop container %s, %v", c.id, err)
}
if timeout == -1 {
// default 10 seconds delay
timeout = 10
}
func waitContainerStop(ctx context.Context, c *Container, timeout time.Duration) error {
done := make(chan struct{})
// we could potentially re-use "done" channel to exit the loop on timeout
// but we use another channel "chControl" so that we won't never incur in the
@ -587,7 +572,10 @@ func (r *Runtime) StopContainer(c *Container, timeout int64) error {
select {
case <-done:
return nil
case <-time.After(time.Duration(timeout) * time.Second):
case <-ctx.Done():
close(chControl)
return ctx.Err()
case <-time.After(timeout):
close(chControl)
err := unix.Kill(c.state.Pid, unix.SIGKILL)
if err != nil && err != unix.ESRCH {
@ -596,10 +584,39 @@ func (r *Runtime) StopContainer(c *Container, timeout int64) error {
}
c.state.Finished = time.Now()
return nil
}
// StopContainer stops a container. Timeout is given in seconds.
func (r *Runtime) StopContainer(ctx context.Context, c *Container, timeout int64) error {
c.opLock.Lock()
defer c.opLock.Unlock()
// Check if the process is around before sending a signal
err := unix.Kill(c.state.Pid, 0)
if err == unix.ESRCH {
c.state.Finished = time.Now()
return nil
}
if timeout > 0 {
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, r.Path(c), "kill", c.id, c.GetStopSignal()); err != nil {
return fmt.Errorf("failed to stop container %s, %v", c.id, err)
}
err = waitContainerStop(ctx, c, time.Duration(timeout)*time.Second)
if err == nil {
return nil
}
logrus.Warnf("Stop container %q timed out: %v", c.ID(), err)
}
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, r.Path(c), "kill", "--all", c.id, "KILL"); err != nil {
return fmt.Errorf("failed to stop container %s, %v", c.id, err)
}
return waitContainerStop(ctx, c, killContainerTimeout)
}
// DeleteContainer deletes a container.
func (r *Runtime) DeleteContainer(c *Container) error {
c.opLock.Lock()

View File

@ -5,9 +5,9 @@ override LIBS +=
override CFLAGS += -std=c99 -Os -Wall -Wextra -static
pause: $(obj)
$(CC) -o $@ $^ $(CFLAGS) $(LIBS)
strip $@
$(CC) -o ../bin/$@ $^ $(CFLAGS) $(LIBS)
strip ../bin/$@
.PHONY: clean
clean:
rm -f $(obj) pause
rm -f $(obj) ../bin/pause

View File

@ -2,10 +2,8 @@ package storage
import (
"errors"
"fmt"
"net"
"path/filepath"
"regexp"
"strings"
"github.com/containers/image/copy"
@ -16,7 +14,12 @@ import (
"github.com/containers/image/transports/alltransports"
"github.com/containers/image/types"
"github.com/containers/storage"
distreference "github.com/docker/distribution/reference"
digest "github.com/opencontainers/go-digest"
)
var (
// ErrCannotParseImageID is returned when we try to ResolveNames for an image ID
ErrCannotParseImageID = errors.New("cannot parse an image ID")
)
// ImageResult wraps a subset of information about an image: its ID, its names,
@ -25,6 +28,10 @@ type ImageResult struct {
ID string
Names []string
Size *uint64
// TODO(runcom): this is an hack for https://github.com/kubernetes-incubator/cri-o/pull/1136
// drop this when we have proper image IDs (as in, image IDs should be just
// the config blog digest which is stable across same images).
ConfigDigest digest.Digest
}
type indexInfo struct {
@ -47,6 +54,9 @@ type ImageServer interface {
ListImages(systemContext *types.SystemContext, filter string) ([]ImageResult, error)
// ImageStatus returns status of an image which matches the filter.
ImageStatus(systemContext *types.SystemContext, filter string) (*ImageResult, error)
// PrepareImage returns an Image where the config digest can be grabbed
// for further analysis. Call Close() on the resulting image.
PrepareImage(systemContext *types.SystemContext, imageName string, options *copy.Options) (types.Image, error)
// PullImage imports an image from the specified location.
PullImage(systemContext *types.SystemContext, imageName string, options *copy.Options) (types.ImageReference, error)
// RemoveImage deletes the specified image.
@ -146,14 +156,16 @@ func (svc *imageService) ImageStatus(systemContext *types.SystemContext, nameOrI
if err != nil {
return nil, err
}
defer img.Close()
size := imageSize(img)
img.Close()
return &ImageResult{
ID: image.ID,
Names: image.Names,
Size: size,
}, nil
res := &ImageResult{
ID: image.ID,
Names: image.Names,
Size: size,
ConfigDigest: img.ConfigInfo().Digest,
}
return res, nil
}
func imageSize(img types.Image) *uint64 {
@ -165,7 +177,7 @@ func imageSize(img types.Image) *uint64 {
}
func (svc *imageService) CanPull(imageName string, options *copy.Options) (bool, error) {
srcRef, err := svc.prepareImage(imageName, options)
srcRef, err := svc.prepareReference(imageName, options)
if err != nil {
return false, err
}
@ -182,9 +194,9 @@ func (svc *imageService) CanPull(imageName string, options *copy.Options) (bool,
return true, nil
}
// prepareImage creates an image reference from an image string and set options
// prepareReference creates an image reference from an image string and set options
// for the source context
func (svc *imageService) prepareImage(imageName string, options *copy.Options) (types.ImageReference, error) {
func (svc *imageService) prepareReference(imageName string, options *copy.Options) (types.ImageReference, error) {
if imageName == "" {
return nil, storage.ErrNotAnImage
}
@ -212,6 +224,18 @@ func (svc *imageService) prepareImage(imageName string, options *copy.Options) (
return srcRef, nil
}
func (svc *imageService) PrepareImage(systemContext *types.SystemContext, imageName string, options *copy.Options) (types.Image, error) {
if options == nil {
options = &copy.Options{}
}
srcRef, err := svc.prepareReference(imageName, options)
if err != nil {
return nil, err
}
return srcRef.NewImage(systemContext)
}
func (svc *imageService) PullImage(systemContext *types.SystemContext, imageName string, options *copy.Options) (types.ImageReference, error) {
policy, err := signature.DefaultPolicy(systemContext)
if err != nil {
@ -225,7 +249,7 @@ func (svc *imageService) PullImage(systemContext *types.SystemContext, imageName
options = &copy.Options{}
}
srcRef, err := svc.prepareImage(imageName, options)
srcRef, err := svc.prepareReference(imageName, options)
if err != nil {
return nil, err
}
@ -307,113 +331,27 @@ func (svc *imageService) isSecureIndex(indexName string) bool {
return true
}
func isValidHostname(hostname string) bool {
return hostname != "" && !strings.Contains(hostname, "/") &&
(strings.Contains(hostname, ".") ||
strings.Contains(hostname, ":") || hostname == "localhost")
}
func isReferenceFullyQualified(reposName reference.Named) bool {
indexName, _, _ := splitReposName(reposName)
return indexName != ""
}
const (
// defaultHostname is the default built-in hostname
defaultHostname = "docker.io"
// legacyDefaultHostname is automatically converted to DefaultHostname
legacyDefaultHostname = "index.docker.io"
// defaultRepoPrefix is the prefix used for default repositories in default host
defaultRepoPrefix = "library/"
)
// splitReposName breaks a reposName into an index name and remote name
func splitReposName(reposName reference.Named) (indexName string, remoteName reference.Named, err error) {
var remoteNameStr string
indexName, remoteNameStr = distreference.SplitHostname(reposName)
if !isValidHostname(indexName) {
// This is a Docker Index repos (ex: samalba/hipache or ubuntu)
// 'docker.io'
indexName = ""
remoteName = reposName
} else {
remoteName, err = withName(remoteNameStr)
}
return
}
func validateName(name string) error {
if err := validateID(strings.TrimPrefix(name, defaultHostname+"/")); err == nil {
return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name)
}
return nil
}
var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
// validateID checks whether an ID string is a valid image ID.
func validateID(id string) error {
if ok := validHex.MatchString(id); !ok {
return fmt.Errorf("image ID %q is invalid", id)
}
return nil
}
// withName returns a named object representing the given string. If the input
// is invalid ErrReferenceInvalidFormat will be returned.
func withName(name string) (reference.Named, error) {
name, err := normalize(name)
if err != nil {
return nil, err
}
if err := validateName(name); err != nil {
return nil, err
}
r, err := distreference.WithName(name)
return r, err
}
// splitHostname splits a repository name to hostname and remotename string.
// If no valid hostname is found, empty string will be returned as a resulting
// hostname. Repository name needs to be already validated before.
func splitHostname(name string) (hostname, remoteName string) {
func splitDockerDomain(name string) (domain, remainder string) {
i := strings.IndexRune(name, '/')
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
hostname, remoteName = "", name
domain, remainder = "", name
} else {
hostname, remoteName = name[:i], name[i+1:]
}
if hostname == legacyDefaultHostname {
hostname = defaultHostname
}
if hostname == defaultHostname && !strings.ContainsRune(remoteName, '/') {
remoteName = defaultRepoPrefix + remoteName
domain, remainder = name[:i], name[i+1:]
}
return
}
// normalize returns a repository name in its normalized form, meaning it
// will contain library/ prefix for official images.
func normalize(name string) (string, error) {
host, remoteName := splitHostname(name)
if strings.ToLower(remoteName) != remoteName {
return "", errors.New("invalid reference format: repository name must be lowercase")
}
if host == defaultHostname {
if strings.HasPrefix(remoteName, defaultRepoPrefix) {
remoteName = strings.TrimPrefix(remoteName, defaultRepoPrefix)
}
return host + "/" + remoteName, nil
}
return name, nil
}
func (svc *imageService) ResolveNames(imageName string) ([]string, error) {
r, err := reference.ParseNormalizedNamed(imageName)
// This to prevent any image ID to go through this routine
_, err := reference.ParseNormalizedNamed(imageName)
if err != nil {
if strings.Contains(err.Error(), "cannot specify 64-byte hexadecimal strings") {
return nil, ErrCannotParseImageID
}
return nil, err
}
if isReferenceFullyQualified(r) {
domain, remainder := splitDockerDomain(imageName)
if domain != "" {
// this means the image is already fully qualified
return []string{imageName}, nil
}
@ -425,10 +363,13 @@ func (svc *imageService) ResolveNames(imageName string) ([]string, error) {
// this means we got an image in the form of "busybox"
// we need to use additional registries...
// normalize the unqualified image to be domain/repo/image...
_, rest := splitDomain(r.Name())
images := []string{}
for _, r := range svc.registries {
images = append(images, filepath.Join(r, rest))
rem := remainder
if r == "docker.io" && !strings.ContainsRune(remainder, '/') {
rem = "library/" + rem
}
images = append(images, filepath.Join(r, rem))
}
return images, nil
}

View File

@ -1,125 +0,0 @@
package storage
// This is a fork of docker/distribution code to be used when manipulating image
// references.
// DO NOT EDIT THIS FILE.
import "regexp"
var (
// alphaNumericRegexp defines the alpha numeric atom, typically a
// component of names. This only allows lower case characters and digits.
alphaNumericRegexp = match(`[a-z0-9]+`)
// separatorRegexp defines the separators allowed to be embedded in name
// components. This allow one period, one or two underscore and multiple
// dashes.
separatorRegexp = match(`(?:[._]|__|[-]*)`)
// nameComponentRegexp restricts registry path component names to start
// with at least one letter or number, with following parts able to be
// separated by one period, one or two underscore and multiple dashes.
nameComponentRegexp = expression(
alphaNumericRegexp,
optional(repeated(separatorRegexp, alphaNumericRegexp)))
// domainComponentRegexp restricts the registry domain component of a
// repository name to start with a component as defined by domainRegexp
// and followed by an optional port.
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
// domainRegexp defines the structure of potential domain components
// that may be part of image names. This is purposely a subset of what is
// allowed by DNS to ensure backwards compatibility with Docker image
// names.
domainRegexp = expression(
domainComponentRegexp,
optional(repeated(literal(`.`), domainComponentRegexp)),
optional(literal(`:`), match(`[0-9]+`)))
// NameRegexp is the format for the name component of references. The
// regexp has capturing groups for the domain and name part omitting
// the separating forward slash from either.
NameRegexp = expression(
optional(domainRegexp, literal(`/`)),
nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp)))
// anchoredNameRegexp is used to parse a name value, capturing the
// domain and trailing components.
anchoredNameRegexp = anchored(
optional(capture(domainRegexp), literal(`/`)),
capture(nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp))))
// IdentifierRegexp is the format for string identifier used as a
// content addressable identifier using sha256. These identifiers
// are like digests without the algorithm, since sha256 is used.
IdentifierRegexp = match(`([a-f0-9]{64})`)
// ShortIdentifierRegexp is the format used to represent a prefix
// of an identifier. A prefix may be used to match a sha256 identifier
// within a list of trusted identifiers.
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
)
// match compiles the string to a regular expression.
var match = regexp.MustCompile
// literal compiles s into a literal regular expression, escaping any regexp
// reserved characters.
func literal(s string) *regexp.Regexp {
re := match(regexp.QuoteMeta(s))
if _, complete := re.LiteralPrefix(); !complete {
panic("must be a literal")
}
return re
}
func splitDomain(name string) (string, string) {
match := anchoredNameRegexp.FindStringSubmatch(name)
if len(match) != 3 {
return "", name
}
return match[1], match[2]
}
// expression defines a full expression, where each regular expression must
// follow the previous.
func expression(res ...*regexp.Regexp) *regexp.Regexp {
var s string
for _, re := range res {
s += re.String()
}
return match(s)
}
// optional wraps the expression in a non-capturing group and makes the
// production optional.
func optional(res ...*regexp.Regexp) *regexp.Regexp {
return match(group(expression(res...)).String() + `?`)
}
// repeated wraps the regexp in a non-capturing group to get one or more
// matches.
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
return match(group(expression(res...)).String() + `+`)
}
// group wraps the regexp in a non-capturing group.
func group(res ...*regexp.Regexp) *regexp.Regexp {
return match(`(?:` + expression(res...).String() + `)`)
}
// capture wraps the expression in a capturing group.
func capture(res ...*regexp.Regexp) *regexp.Regexp {
return match(`(` + expression(res...).String() + `)`)
}
// anchored anchors the regular expression by adding start and end delimiters.
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
return match(`^` + expression(res...).String() + `$`)
}

84
pkg/storage/image_test.go Normal file
View File

@ -0,0 +1,84 @@
package storage
import (
"reflect"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestResolveNames(t *testing.T) {
cases := []struct {
name string
additionalRegistries []string
imageName string
expected []string
err bool
errContains string
}{
{
name: "test unqualified images get correctly qualified in order and correct tag",
additionalRegistries: []string{"testregistry.com", "registry.access.redhat.com", "docker.io"},
imageName: "openshift3/ose-deployer:sometag",
expected: []string{"testregistry.com/openshift3/ose-deployer:sometag", "registry.access.redhat.com/openshift3/ose-deployer:sometag", "docker.io/openshift3/ose-deployer:sometag"},
err: false,
},
{
name: "test unqualified images get correctly qualified in order and correct digest",
additionalRegistries: []string{"testregistry.com", "registry.access.redhat.com", "docker.io"},
imageName: "openshift3/ose-deployer@sha256:dc5f67a48da730d67bf4bfb8824ea8a51be26711de090d6d5a1ffff2723168a3",
expected: []string{"testregistry.com/openshift3/ose-deployer@sha256:dc5f67a48da730d67bf4bfb8824ea8a51be26711de090d6d5a1ffff2723168a3", "registry.access.redhat.com/openshift3/ose-deployer@sha256:dc5f67a48da730d67bf4bfb8824ea8a51be26711de090d6d5a1ffff2723168a3", "docker.io/openshift3/ose-deployer@sha256:dc5f67a48da730d67bf4bfb8824ea8a51be26711de090d6d5a1ffff2723168a3"},
err: false,
},
{
name: "test unqualified images get correctly qualified in order",
additionalRegistries: []string{"testregistry.com", "registry.access.redhat.com", "docker.io"},
imageName: "openshift3/ose-deployer:latest",
expected: []string{"testregistry.com/openshift3/ose-deployer:latest", "registry.access.redhat.com/openshift3/ose-deployer:latest", "docker.io/openshift3/ose-deployer:latest"},
err: false,
},
{
name: "test unqualified images get correctly qualified from official library",
additionalRegistries: []string{"testregistry.com", "registry.access.redhat.com", "docker.io"},
imageName: "nginx:latest",
expected: []string{"testregistry.com/nginx:latest", "registry.access.redhat.com/nginx:latest", "docker.io/library/nginx:latest"},
err: false,
},
{
name: "test qualified images returns just qualified",
additionalRegistries: []string{"testregistry.com", "registry.access.redhat.com", "docker.io"},
imageName: "mypersonalregistry.com/nginx:latest",
expected: []string{"mypersonalregistry.com/nginx:latest"},
err: false,
},
{
name: "test we don't have names w/o registries",
imageName: "openshift3/ose-deployer:latest",
err: true,
},
{
name: "test we cannot resolve names from an image ID",
imageName: "6ad733544a6317992a6fac4eb19fe1df577d4dec7529efec28a5bd0edad0fd30",
err: true,
errContains: "cannot parse an image ID",
},
}
for _, c := range cases {
svc := &imageService{
registries: c.additionalRegistries,
}
names, err := svc.ResolveNames(c.imageName)
if !c.err {
require.NoError(t, err, c.name)
if !reflect.DeepEqual(names, c.expected) {
t.Fatalf("Exepected: %v, Got: %v: %q", c.expected, names, c.name)
}
} else {
require.Error(t, err, c.name)
if c.errContains != "" {
assert.Contains(t, err.Error(), c.errContains)
}
}
}
}

View File

@ -6,6 +6,7 @@ import (
"net"
"os"
"path/filepath"
"time"
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/kubernetes-incubator/cri-o/utils"
@ -25,10 +26,15 @@ const (
)
// Attach prepares a streaming endpoint to attach to a running container.
func (s *Server) Attach(ctx context.Context, req *pb.AttachRequest) (*pb.AttachResponse, error) {
func (s *Server) Attach(ctx context.Context, req *pb.AttachRequest) (resp *pb.AttachResponse, err error) {
const operation = "attach"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
logrus.Debugf("AttachRequest %+v", req)
resp, err := s.GetAttach(req)
resp, err = s.GetAttach(req)
if err != nil {
return nil, fmt.Errorf("unable to prepare attach endpoint")
}

View File

@ -8,6 +8,7 @@ import (
"os"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
@ -46,29 +47,54 @@ const (
defaultSystemdParent = "system.slice"
)
func addOCIBindMounts(mountLabel string, containerConfig *pb.ContainerConfig, specgen *generate.Generator) ([]oci.ContainerVolume, error) {
type orderedMounts []rspec.Mount
// Len returns the number of mounts. Used in sorting.
func (m orderedMounts) Len() int {
return len(m)
}
// Less returns true if the number of parts (a/b/c would be 3 parts) in the
// mount indexed by parameter 1 is less than that of the mount indexed by
// parameter 2. Used in sorting.
func (m orderedMounts) Less(i, j int) bool {
return m.parts(i) < m.parts(j)
}
// Swap swaps two items in an array of mounts. Used in sorting
func (m orderedMounts) Swap(i, j int) {
m[i], m[j] = m[j], m[i]
}
// parts returns the number of parts in the destination of a mount. Used in sorting.
func (m orderedMounts) parts(i int) int {
return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator))
}
func addOCIBindMounts(mountLabel string, containerConfig *pb.ContainerConfig, specgen *generate.Generator) ([]oci.ContainerVolume, []rspec.Mount, error) {
volumes := []oci.ContainerVolume{}
ociMounts := []rspec.Mount{}
mounts := containerConfig.GetMounts()
for _, mount := range mounts {
dest := mount.ContainerPath
if dest == "" {
return nil, fmt.Errorf("Mount.ContainerPath is empty")
return nil, nil, fmt.Errorf("Mount.ContainerPath is empty")
}
src := mount.HostPath
if src == "" {
return nil, fmt.Errorf("Mount.HostPath is empty")
return nil, nil, fmt.Errorf("Mount.HostPath is empty")
}
if _, err := os.Stat(src); err != nil && os.IsNotExist(err) {
if err1 := os.MkdirAll(src, 0644); err1 != nil {
return nil, fmt.Errorf("Failed to mkdir %s: %s", src, err)
return nil, nil, fmt.Errorf("Failed to mkdir %s: %s", src, err)
}
}
src, err := resolveSymbolicLink(src)
if err != nil {
return nil, fmt.Errorf("failed to resolve symlink %q: %v", src, err)
return nil, nil, fmt.Errorf("failed to resolve symlink %q: %v", src, err)
}
options := []string{"rw"}
@ -80,7 +106,7 @@ func addOCIBindMounts(mountLabel string, containerConfig *pb.ContainerConfig, sp
if mount.SelinuxRelabel {
// Need a way in kubernetes to determine if the volume is shared or private
if err := label.Relabel(src, mountLabel, true); err != nil && err != unix.ENOTSUP {
return nil, fmt.Errorf("relabel failed %s: %v", src, err)
return nil, nil, fmt.Errorf("relabel failed %s: %v", src, err)
}
}
@ -90,45 +116,55 @@ func addOCIBindMounts(mountLabel string, containerConfig *pb.ContainerConfig, sp
Readonly: mount.Readonly,
})
specgen.AddBindMount(src, dest, options)
ociMounts = append(ociMounts, rspec.Mount{
Source: src,
Destination: dest,
Options: options,
})
}
return volumes, nil
return volumes, ociMounts, nil
}
func addImageVolumes(rootfs string, s *Server, containerInfo *storage.ContainerInfo, specgen *generate.Generator, mountLabel string) error {
func addImageVolumes(rootfs string, s *Server, containerInfo *storage.ContainerInfo, specgen *generate.Generator, mountLabel string) ([]rspec.Mount, error) {
mounts := []rspec.Mount{}
for dest := range containerInfo.Config.Config.Volumes {
fp, err := symlink.FollowSymlinkInScope(filepath.Join(rootfs, dest), rootfs)
if err != nil {
return err
return nil, err
}
switch s.config.ImageVolumes {
case libkpod.ImageVolumesMkdir:
if err1 := os.MkdirAll(fp, 0644); err1 != nil {
return err1
return nil, err1
}
case libkpod.ImageVolumesBind:
volumeDirName := stringid.GenerateNonCryptoID()
src := filepath.Join(containerInfo.RunDir, "mounts", volumeDirName)
if err1 := os.MkdirAll(src, 0644); err1 != nil {
return err1
return nil, err1
}
// Label the source with the sandbox selinux mount label
if mountLabel != "" {
if err1 := label.Relabel(src, mountLabel, true); err1 != nil && err1 != unix.ENOTSUP {
return fmt.Errorf("relabel failed %s: %v", src, err1)
return nil, fmt.Errorf("relabel failed %s: %v", src, err1)
}
}
logrus.Debugf("Adding bind mounted volume: %s to %s", src, dest)
specgen.AddBindMount(src, dest, []string{"rw"})
mounts = append(mounts, rspec.Mount{
Source: src,
Destination: dest,
Options: []string{"rw"},
})
case libkpod.ImageVolumesIgnore:
logrus.Debugf("Ignoring volume %v", dest)
default:
logrus.Fatalf("Unrecognized image volumes setting")
}
}
return nil
return mounts, nil
}
// resolveSymbolicLink resolves a possbile symlink path. If the path is a symlink, returns resolved
@ -384,8 +420,23 @@ func ensureSaneLogPath(logPath string) error {
return nil
}
// addSecretsBindMounts mounts user defined secrets to the container
func addSecretsBindMounts(mountLabel, ctrRunDir string, defaultMounts []string, specgen generate.Generator) ([]rspec.Mount, error) {
containerMounts := specgen.Spec().Mounts
mounts, err := secretMounts(defaultMounts, mountLabel, ctrRunDir, containerMounts)
if err != nil {
return nil, err
}
return mounts, nil
}
// CreateContainer creates a new container in specified PodSandbox
func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (res *pb.CreateContainerResponse, err error) {
const operation = "create_container"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
logrus.Debugf("CreateContainerRequest %+v", req)
s.updateLock.RLock()
@ -549,7 +600,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
}
}
containerVolumes, err := addOCIBindMounts(mountLabel, containerConfig, &specgen)
containerVolumes, ociMounts, err := addOCIBindMounts(mountLabel, containerConfig, &specgen)
if err != nil {
return nil, err
}
@ -569,6 +620,10 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
labels := containerConfig.GetLabels()
if err := validateLabels(labels); err != nil {
return nil, err
}
metadata := containerConfig.GetMetadata()
kubeAnnotations := containerConfig.GetAnnotations()
@ -733,6 +788,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/proc/scsi",
"/sys/firmware",
} {
specgen.AddLinuxMaskedPaths(mp)
@ -756,10 +812,20 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
logrus.Debugf("pod container state %+v", podInfraState)
ipcNsPath := fmt.Sprintf("/proc/%d/ns/ipc", podInfraState.Pid)
if err := specgen.AddOrReplaceLinuxNamespace("ipc", ipcNsPath); err != nil {
if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.IPCNamespace), ipcNsPath); err != nil {
return nil, err
}
utsNsPath := fmt.Sprintf("/proc/%d/ns/uts", podInfraState.Pid)
if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.UTSNamespace), utsNsPath); err != nil {
return nil, err
}
// Do not share pid ns for now
if containerConfig.GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostPid() {
specgen.RemoveLinuxNamespace(string(rspec.PIDNamespace))
}
netNsPath := sb.NetNsPath()
if netNsPath == "" {
// The sandbox does not have a permanent namespace,
@ -767,7 +833,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
netNsPath = fmt.Sprintf("/proc/%d/ns/net", podInfraState.Pid)
}
if err := specgen.AddOrReplaceLinuxNamespace("network", netNsPath); err != nil {
if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.NetworkNamespace), netNsPath); err != nil {
return nil, err
}
@ -782,8 +848,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
}
images, err := s.StorageImageServer().ResolveNames(image)
if err != nil {
// This means we got an image ID
if strings.Contains(err.Error(), "cannot specify 64-byte hexadecimal strings") {
if err == storage.ErrCannotParseImageID {
images = append(images, image)
} else {
return nil, err
@ -856,7 +921,9 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
specgen.AddBindMount("/etc/hosts", "/etc/hosts", options)
}
// Set hostname and add env for hostname
specgen.SetHostname(sb.Hostname())
specgen.AddProcessEnv("HOSTNAME", sb.Hostname())
specgen.AddAnnotation(annotations.Name, containerName)
specgen.AddAnnotation(annotations.ContainerID, containerID)
@ -928,7 +995,8 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
}
// Add image volumes
if err := addImageVolumes(mountPoint, s, &containerInfo, &specgen, mountLabel); err != nil {
volumeMounts, err := addImageVolumes(mountPoint, s, &containerInfo, &specgen, mountLabel)
if err != nil {
return nil, err
}
@ -938,30 +1006,46 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
}
specgen.SetProcessArgs(processArgs)
// Add environment variables from CRI and image config
envs := containerConfig.GetEnvs()
if envs != nil {
for _, item := range envs {
key := item.Key
value := item.Value
if key == "" {
envs := []string{}
if containerConfig.GetEnvs() == nil && containerImageConfig != nil {
envs = containerImageConfig.Config.Env
} else {
for _, item := range containerConfig.GetEnvs() {
if item.GetKey() == "" {
continue
}
specgen.AddProcessEnv(key, value)
envs = append(envs, item.GetKey()+"="+item.GetValue())
}
if containerImageConfig != nil {
for _, imageEnv := range containerImageConfig.Config.Env {
var found bool
parts := strings.SplitN(imageEnv, "=", 2)
if len(parts) != 2 {
continue
}
imageEnvKey := parts[0]
if imageEnvKey == "" {
continue
}
for _, kubeEnv := range envs {
kubeEnvKey := strings.SplitN(kubeEnv, "=", 2)[0]
if kubeEnvKey == "" {
continue
}
if imageEnvKey == kubeEnvKey {
found = true
break
}
}
if !found {
envs = append(envs, imageEnv)
}
}
}
}
if containerImageConfig != nil {
for _, item := range containerImageConfig.Config.Env {
parts := strings.SplitN(item, "=", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("invalid env from image: %s", item)
}
if parts[0] == "" {
continue
}
specgen.AddProcessEnv(parts[0], parts[1])
}
for _, e := range envs {
parts := strings.SplitN(e, "=", 2)
specgen.AddProcessEnv(parts[0], parts[1])
}
// Set working directory
@ -978,6 +1062,32 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
containerCwd = runtimeCwd
}
specgen.SetProcessCwd(containerCwd)
if err := setupWorkingDirectory(mountPoint, mountLabel, containerCwd); err != nil {
if err1 := s.StorageRuntimeServer().StopContainer(containerID); err1 != nil {
return nil, fmt.Errorf("can't umount container after cwd error %v: %v", err, err1)
}
return nil, err
}
var secretMounts []rspec.Mount
if len(s.config.DefaultMounts) > 0 {
var err error
secretMounts, err = addSecretsBindMounts(mountLabel, containerInfo.RunDir, s.config.DefaultMounts, specgen)
if err != nil {
return nil, fmt.Errorf("failed to mount secrets: %v", err)
}
}
mounts := []rspec.Mount{}
mounts = append(mounts, ociMounts...)
mounts = append(mounts, volumeMounts...)
mounts = append(mounts, secretMounts...)
sort.Sort(orderedMounts(mounts))
for _, m := range mounts {
specgen.AddBindMount(m.Source, m.Destination, m.Options)
}
if err := s.setupOCIHooks(&specgen, sb, containerConfig, processArgs[0]); err != nil {
return nil, err
@ -1013,6 +1123,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
if err != nil {
return nil, err
}
container.SetSpec(specgen.Spec())
container.SetMountPoint(mountPoint)
for _, cv := range containerVolumes {
@ -1135,3 +1246,19 @@ func clearReadOnly(m *rspec.Mount) {
}
m.Options = opt
}
func setupWorkingDirectory(rootfs, mountLabel, containerCwd string) error {
fp, err := symlink.FollowSymlinkInScope(filepath.Join(rootfs, containerCwd), rootfs)
if err != nil {
return err
}
if err := os.MkdirAll(fp, 0755); err != nil {
return err
}
if mountLabel != "" {
if err1 := label.Relabel(fp, mountLabel, true); err1 != nil && err1 != unix.ENOTSUP {
return fmt.Errorf("relabel failed %s: %v", fp, err1)
}
}
return nil
}

View File

@ -1,10 +1,13 @@
package server
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"time"
"github.com/docker/docker/pkg/pools"
"github.com/kubernetes-incubator/cri-o/oci"
@ -18,10 +21,16 @@ import (
)
// Exec prepares a streaming endpoint to execute a command in the container.
func (s *Server) Exec(ctx context.Context, req *pb.ExecRequest) (*pb.ExecResponse, error) {
func (s *Server) Exec(ctx context.Context, req *pb.ExecRequest) (resp *pb.ExecResponse, err error) {
const operation = "exec"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
logrus.Debugf("ExecRequest %+v", req)
resp, err := s.GetExec(req)
resp, err = s.GetExec(req)
if err != nil {
return nil, fmt.Errorf("unable to prepare exec endpoint")
}
@ -46,12 +55,29 @@ func (ss streamService) Exec(containerID string, cmd []string, stdin io.Reader,
return fmt.Errorf("container is not created or running")
}
f, err := ioutil.TempFile("", "exec-process")
if err != nil {
return err
}
defer os.RemoveAll(f.Name())
pspec := c.Spec().Process
pspec.Args = cmd
processJSON, err := json.Marshal(pspec)
if err != nil {
return err
}
if err := ioutil.WriteFile(f.Name(), processJSON, 0644); err != nil {
return err
}
args := []string{"exec"}
if tty {
args = append(args, "-t")
}
args = append(args, "-p", f.Name())
args = append(args, c.ID())
args = append(args, cmd...)
execCmd := exec.Command(ss.runtimeServer.Runtime().Path(c), args...)
var cmdErr error
if tty {

View File

@ -2,6 +2,7 @@ package server
import (
"fmt"
"time"
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/sirupsen/logrus"
@ -10,7 +11,12 @@ import (
)
// ExecSync runs a command in a container synchronously.
func (s *Server) ExecSync(ctx context.Context, req *pb.ExecSyncRequest) (*pb.ExecSyncResponse, error) {
func (s *Server) ExecSync(ctx context.Context, req *pb.ExecSyncRequest) (resp *pb.ExecSyncResponse, err error) {
const operation = "exec_sync"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
logrus.Debugf("ExecSyncRequest %+v", req)
c, err := s.GetContainerFromRequest(req.ContainerId)
if err != nil {
@ -35,7 +41,7 @@ func (s *Server) ExecSync(ctx context.Context, req *pb.ExecSyncRequest) (*pb.Exe
if err != nil {
return nil, err
}
resp := &pb.ExecSyncResponse{
resp = &pb.ExecSyncResponse{
Stdout: execResp.Stdout,
Stderr: execResp.Stderr,
ExitCode: execResp.ExitCode,

View File

@ -1,6 +1,8 @@
package server
import (
"time"
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
@ -27,41 +29,49 @@ func filterContainer(c *pb.Container, filter *pb.ContainerFilter) bool {
}
// ListContainers lists all containers by filters.
func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersRequest) (*pb.ListContainersResponse, error) {
func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersRequest) (resp *pb.ListContainersResponse, err error) {
const operation = "list_containers"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
logrus.Debugf("ListContainersRequest %+v", req)
var ctrs []*pb.Container
filter := req.Filter
filter := req.GetFilter()
ctrList, err := s.ContainerServer.ListContainers()
if err != nil {
return nil, err
}
// Filter using container id and pod id first.
if filter.Id != "" {
id, err := s.CtrIDIndex().Get(filter.Id)
if err != nil {
return nil, err
}
c := s.ContainerServer.GetContainer(id)
if c != nil {
if filter.PodSandboxId != "" {
if c.Sandbox() == filter.PodSandboxId {
ctrList = []*oci.Container{c}
} else {
ctrList = []*oci.Container{}
}
} else {
ctrList = []*oci.Container{c}
if filter != nil {
// Filter using container id and pod id first.
if filter.Id != "" {
id, err := s.CtrIDIndex().Get(filter.Id)
if err != nil {
return nil, err
}
}
} else {
if filter.PodSandboxId != "" {
pod := s.ContainerServer.GetSandbox(filter.PodSandboxId)
if pod == nil {
ctrList = []*oci.Container{}
} else {
ctrList = pod.Containers().List()
c := s.ContainerServer.GetContainer(id)
if c != nil {
if filter.PodSandboxId != "" {
if c.Sandbox() == filter.PodSandboxId {
ctrList = []*oci.Container{c}
} else {
ctrList = []*oci.Container{}
}
} else {
ctrList = []*oci.Container{c}
}
}
} else {
if filter.PodSandboxId != "" {
pod := s.ContainerServer.GetSandbox(filter.PodSandboxId)
if pod == nil {
ctrList = []*oci.Container{}
} else {
ctrList = pod.Containers().List()
}
}
}
}
@ -101,7 +111,7 @@ func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersReque
}
}
resp := &pb.ListContainersResponse{
resp = &pb.ListContainersResponse{
Containers: ctrs,
}
logrus.Debugf("ListContainersResponse: %+v", resp)

View File

@ -6,6 +6,7 @@ import (
"io"
"os/exec"
"strings"
"time"
"github.com/docker/docker/pkg/pools"
"github.com/kubernetes-incubator/cri-o/oci"
@ -15,11 +16,15 @@ import (
)
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox.
func (s *Server) PortForward(ctx context.Context, req *pb.PortForwardRequest) (*pb.PortForwardResponse, error) {
func (s *Server) PortForward(ctx context.Context, req *pb.PortForwardRequest) (resp *pb.PortForwardResponse, err error) {
const operation = "port_forward"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
logrus.Debugf("PortForwardRequest %+v", req)
resp, err := s.GetPortForward(req)
resp, err = s.GetPortForward(req)
if err != nil {
return nil, fmt.Errorf("unable to prepare portforward endpoint")
}

View File

@ -1,6 +1,8 @@
package server
import (
"time"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
@ -8,13 +10,20 @@ import (
// RemoveContainer removes the container. If the container is running, the container
// should be force removed.
func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*pb.RemoveContainerResponse, error) {
_, err := s.ContainerServer.Remove(req.ContainerId, true)
func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (resp *pb.RemoveContainerResponse, err error) {
const operation = "remove_container"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
logrus.Debugf("RemoveContainerRequest: %+v", req)
_, err = s.ContainerServer.Remove(ctx, req.ContainerId, true)
if err != nil {
return nil, err
}
resp := &pb.RemoveContainerResponse{}
resp = &pb.RemoveContainerResponse{}
logrus.Debugf("RemoveContainerResponse: %+v", resp)
return resp, nil
}

View File

@ -2,6 +2,7 @@ package server
import (
"fmt"
"time"
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/sirupsen/logrus"
@ -10,7 +11,12 @@ import (
)
// StartContainer starts the container.
func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (*pb.StartContainerResponse, error) {
func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (resp *pb.StartContainerResponse, err error) {
const operation = "start_container"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
logrus.Debugf("StartContainerRequest %+v", req)
c, err := s.GetContainerFromRequest(req.ContainerId)
if err != nil {
@ -37,7 +43,7 @@ func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerReque
return nil, fmt.Errorf("failed to start container %s: %v", c.ID(), err)
}
resp := &pb.StartContainerResponse{}
resp = &pb.StartContainerResponse{}
logrus.Debugf("StartContainerResponse %+v", resp)
return resp, nil
}

View File

@ -2,6 +2,7 @@ package server
import (
"fmt"
"time"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
@ -9,6 +10,11 @@ import (
// ContainerStats returns stats of the container. If the container does not
// exist, the call returns an error.
func (s *Server) ContainerStats(ctx context.Context, req *pb.ContainerStatsRequest) (*pb.ContainerStatsResponse, error) {
func (s *Server) ContainerStats(ctx context.Context, req *pb.ContainerStatsRequest) (resp *pb.ContainerStatsResponse, err error) {
const operation = "container_stats"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
return nil, fmt.Errorf("not implemented")
}

View File

@ -2,12 +2,18 @@ package server
import (
"fmt"
"time"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// ListContainerStats returns stats of all running containers.
func (s *Server) ListContainerStats(ctx context.Context, req *pb.ListContainerStatsRequest) (*pb.ListContainerStatsResponse, error) {
func (s *Server) ListContainerStats(ctx context.Context, req *pb.ListContainerStatsRequest) (resp *pb.ListContainerStatsResponse, err error) {
const operation = "list_container_stats"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
return nil, fmt.Errorf("not implemented")
}

View File

@ -1,6 +1,8 @@
package server
import (
"time"
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
@ -14,7 +16,12 @@ const (
)
// ContainerStatus returns status of the container.
func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (*pb.ContainerStatusResponse, error) {
func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (resp *pb.ContainerStatusResponse, err error) {
const operation = "container_status"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
logrus.Debugf("ContainerStatusRequest %+v", req)
c, err := s.GetContainerFromRequest(req.ContainerId)
if err != nil {
@ -22,7 +29,7 @@ func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusReq
}
containerID := c.ID()
resp := &pb.ContainerStatusResponse{
resp = &pb.ContainerStatusResponse{
Status: &pb.ContainerStatus{
Id: containerID,
Metadata: c.Metadata(),

View File

@ -1,19 +1,28 @@
package server
import (
"time"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// StopContainer stops a running container with a grace period (i.e., timeout).
func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest) (*pb.StopContainerResponse, error) {
_, err := s.ContainerServer.ContainerStop(req.ContainerId, req.Timeout)
func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest) (resp *pb.StopContainerResponse, err error) {
const operation = "stop_container"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
logrus.Debugf("StopContainerRequest %+v", req)
_, err = s.ContainerServer.ContainerStop(ctx, req.ContainerId, req.Timeout)
if err != nil {
return nil, err
}
resp := &pb.StopContainerResponse{}
resp = &pb.StopContainerResponse{}
logrus.Debugf("StopContainerResponse %s: %+v", req.ContainerId, resp)
return resp, nil
}

View File

@ -1,11 +1,19 @@
package server
import (
"time"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// UpdateRuntimeConfig updates the configuration of a running container.
func (s *Server) UpdateRuntimeConfig(ctx context.Context, req *pb.UpdateRuntimeConfigRequest) (*pb.UpdateRuntimeConfigResponse, error) {
func (s *Server) UpdateRuntimeConfig(ctx context.Context, req *pb.UpdateRuntimeConfigRequest) (resp *pb.UpdateRuntimeConfigResponse, err error) {
const operation = "update_runtime_config"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
return &pb.UpdateRuntimeConfigResponse{}, nil
}

View File

@ -2,12 +2,19 @@ package server
import (
"fmt"
"time"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// ImageFsInfo returns information of the filesystem that is used to store images.
func (s *Server) ImageFsInfo(ctx context.Context, req *pb.ImageFsInfoRequest) (*pb.ImageFsInfoResponse, error) {
func (s *Server) ImageFsInfo(ctx context.Context, req *pb.ImageFsInfoRequest) (resp *pb.ImageFsInfoResponse, err error) {
const operation = "image_fs_info"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
return nil, fmt.Errorf("not implemented")
}

View File

@ -1,13 +1,21 @@
package server
import (
"time"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// ListImages lists existing images.
func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb.ListImagesResponse, error) {
func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (resp *pb.ListImagesResponse, err error) {
const operation = "list_images"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
logrus.Debugf("ListImagesRequest: %+v", req)
filter := ""
reqFilter := req.GetFilter()
@ -21,21 +29,21 @@ func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb
if err != nil {
return nil, err
}
response := pb.ListImagesResponse{}
resp = &pb.ListImagesResponse{}
for _, result := range results {
if result.Size != nil {
response.Images = append(response.Images, &pb.Image{
resp.Images = append(resp.Images, &pb.Image{
Id: result.ID,
RepoTags: result.Names,
Size_: *result.Size,
})
} else {
response.Images = append(response.Images, &pb.Image{
resp.Images = append(resp.Images, &pb.Image{
Id: result.ID,
RepoTags: result.Names,
})
}
}
logrus.Debugf("ListImagesResponse: %+v", response)
return &response, nil
logrus.Debugf("ListImagesResponse: %+v", resp)
return resp, nil
}

View File

@ -3,16 +3,24 @@ package server
import (
"encoding/base64"
"strings"
"time"
"github.com/containers/image/copy"
"github.com/containers/image/types"
"github.com/kubernetes-incubator/cri-o/pkg/storage"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// PullImage pulls a image with authentication config.
func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.PullImageResponse, error) {
func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (resp *pb.PullImageResponse, err error) {
const operation = "pull_image"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
logrus.Debugf("PullImageRequest: %+v", req)
// TODO: what else do we need here? (Signatures when the story isn't just pulling from docker://)
image := ""
@ -24,7 +32,6 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P
var (
images []string
pulled string
err error
)
images, err = s.StorageImageServer().ResolveNames(image)
if err != nil {
@ -67,11 +74,23 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P
}
// let's be smart, docker doesn't repull if image already exists.
_, err = s.StorageImageServer().ImageStatus(s.ImageContext(), img)
var storedImage *storage.ImageResult
storedImage, err = s.StorageImageServer().ImageStatus(s.ImageContext(), img)
if err == nil {
logrus.Debugf("image %s already in store, skipping pull", img)
pulled = img
break
tmpImg, err := s.StorageImageServer().PrepareImage(s.ImageContext(), img, options)
if err == nil {
tmpImgConfigDigest := tmpImg.ConfigInfo().Digest
if tmpImgConfigDigest.String() == "" {
// this means we are playing with a schema1 image, in which
// case, we're going to repull the image in any case
logrus.Debugf("image config digest is empty, re-pulling image")
} else if tmpImgConfigDigest.String() == storedImage.ConfigDigest.String() {
logrus.Debugf("image %s already in store, skipping pull", img)
pulled = img
break
}
}
logrus.Debugf("image in store has different ID, re-pulling %s", img)
}
_, err = s.StorageImageServer().PullImage(s.ImageContext(), img, options)
@ -85,7 +104,7 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P
if pulled == "" && err != nil {
return nil, err
}
resp := &pb.PullImageResponse{
resp = &pb.PullImageResponse{
ImageRef: pulled,
}
logrus.Debugf("PullImageResponse: %+v", resp)

View File

@ -2,15 +2,22 @@ package server
import (
"fmt"
"strings"
"time"
"github.com/kubernetes-incubator/cri-o/pkg/storage"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// RemoveImage removes the image.
func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*pb.RemoveImageResponse, error) {
func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (resp *pb.RemoveImageResponse, err error) {
const operation = "remove_image"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
logrus.Debugf("RemoveImageRequest: %+v", req)
image := ""
img := req.GetImage()
@ -22,13 +29,11 @@ func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*
}
var (
images []string
err error
deleted bool
)
images, err = s.StorageImageServer().ResolveNames(image)
if err != nil {
// This means we got an image ID
if strings.Contains(err.Error(), "cannot specify 64-byte hexadecimal strings") {
if err == storage.ErrCannotParseImageID {
images = append(images, image)
} else {
return nil, err
@ -46,7 +51,7 @@ func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*
if !deleted && err != nil {
return nil, err
}
resp := &pb.RemoveImageResponse{}
resp = &pb.RemoveImageResponse{}
logrus.Debugf("RemoveImageResponse: %+v", resp)
return resp, nil
}

View File

@ -2,9 +2,10 @@ package server
import (
"fmt"
"strings"
"time"
"github.com/containers/storage"
pkgstorage "github.com/kubernetes-incubator/cri-o/pkg/storage"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
@ -12,7 +13,13 @@ import (
)
// ImageStatus returns the status of the image.
func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*pb.ImageStatusResponse, error) {
func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (resp *pb.ImageStatusResponse, err error) {
const operation = "image_status"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
logrus.Debugf("ImageStatusRequest: %+v", req)
image := ""
img := req.GetImage()
@ -24,8 +31,7 @@ func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*
}
images, err := s.StorageImageServer().ResolveNames(image)
if err != nil {
// This means we got an image ID
if strings.Contains(err.Error(), "cannot specify 64-byte hexadecimal strings") {
if err == pkgstorage.ErrCannotParseImageID {
images = append(images, image)
} else {
return nil, err
@ -40,7 +46,7 @@ func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*
}
return nil, err
}
resp := &pb.ImageStatusResponse{
resp = &pb.ImageStatusResponse{
Image: &pb.Image{
Id: status.ID,
RepoTags: status.Names,

View File

@ -48,7 +48,7 @@ func (s *Server) getContainerInfo(id string, getContainerFunc func(id string) *o
return types.ContainerInfo{
Name: ctr.Name(),
Pid: ctrState.Pid,
Image: ctr.Image(),
Image: ctr.ImageName(),
CreatedTime: ctrState.Created.UnixNano(),
Labels: ctr.Labels(),
Annotations: ctr.Annotations(),

View File

@ -96,7 +96,7 @@ func TestGetContainerInfo(t *testing.T) {
t.Fatalf("expected same created time %d, got %d", created.UnixNano(), ci.CreatedTime)
}
if ci.Pid != 42 {
t.Fatalf("expected pid 42, got %s", ci.Pid)
t.Fatalf("expected pid 42, got %v", ci.Pid)
}
if ci.Name != "testname" {
t.Fatalf("expected name testname, got %s", ci.Name)
@ -114,7 +114,7 @@ func TestGetContainerInfo(t *testing.T) {
t.Fatalf("expected sandbox to be testsandboxid, got %s", ci.Sandbox)
}
if ci.IP != "1.1.1.42" {
t.Fatal("expected ip 1.1.1.42, got %s", ci.IP)
t.Fatalf("expected ip 1.1.1.42, got %s", ci.IP)
}
if len(ci.Annotations) == 0 {
t.Fatal("annotations are empty")

70
server/metrics/metrics.go Normal file
View File

@ -0,0 +1,70 @@
package metrics
import (
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
)
const (
// CRIOOperationsKey is the key for CRI-O operation metrics.
CRIOOperationsKey = "crio_operations"
// CRIOOperationsLatencyKey is the key for the operation latency metrics.
CRIOOperationsLatencyKey = "crio_operations_latency_microseconds"
// CRIOOperationsErrorsKey is the key for the operation error metrics.
CRIOOperationsErrorsKey = "crio_operations_errors"
// TODO(runcom):
// timeouts
subsystem = "container_runtime"
)
var (
// CRIOOperations collects operation counts by operation type.
CRIOOperations = prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: subsystem,
Name: CRIOOperationsKey,
Help: "Cumulative number of CRI-O operations by operation type.",
},
[]string{"operation_type"},
)
// CRIOOperationsLatency collects operation latency numbers by operation
// type.
CRIOOperationsLatency = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Subsystem: subsystem,
Name: CRIOOperationsLatencyKey,
Help: "Latency in microseconds of CRI-O operations. Broken down by operation type.",
},
[]string{"operation_type"},
)
// CRIOOperationsErrors collects operation errors by operation
// type.
CRIOOperationsErrors = prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: subsystem,
Name: CRIOOperationsErrorsKey,
Help: "Cumulative number of CRI-O operation errors by operation type.",
},
[]string{"operation_type"},
)
)
var registerMetrics sync.Once
// Register all metrics
func Register() {
registerMetrics.Do(func() {
prometheus.MustRegister(CRIOOperations)
prometheus.MustRegister(CRIOOperationsLatency)
prometheus.MustRegister(CRIOOperationsErrors)
})
}
// SinceInMicroseconds gets the time since the specified start in microseconds.
func SinceInMicroseconds(start time.Time) float64 {
return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
}

View File

@ -1,12 +1,19 @@
package server
import (
"time"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// Status returns the status of the runtime
func (s *Server) Status(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) {
func (s *Server) Status(ctx context.Context, req *pb.StatusRequest) (resp *pb.StatusResponse, err error) {
const operation = "status"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
// Deal with Runtime conditions
runtimeReady, err := s.Runtime().RuntimeReady()
@ -22,7 +29,7 @@ func (s *Server) Status(ctx context.Context, req *pb.StatusRequest) (*pb.StatusR
runtimeReadyConditionString := pb.RuntimeReady
networkReadyConditionString := pb.NetworkReady
resp := &pb.StatusResponse{
resp = &pb.StatusResponse{
Status: &pb.RuntimeStatus{
Conditions: []*pb.RuntimeCondition{
{

View File

@ -1,6 +1,8 @@
package server
import (
"time"
"github.com/kubernetes-incubator/cri-o/libkpod/sandbox"
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/sirupsen/logrus"
@ -28,7 +30,13 @@ func filterSandbox(p *pb.PodSandbox, filter *pb.PodSandboxFilter) bool {
}
// ListPodSandbox returns a list of SandBoxes.
func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxRequest) (*pb.ListPodSandboxResponse, error) {
func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxRequest) (resp *pb.ListPodSandboxResponse, err error) {
const operation = "list_pod_sandbox"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
logrus.Debugf("ListPodSandboxRequest %+v", req)
var pods []*pb.PodSandbox
var podList []*sandbox.Sandbox
@ -82,7 +90,7 @@ func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxReque
}
}
resp := &pb.ListPodSandboxResponse{
resp = &pb.ListPodSandboxResponse{
Items: pods,
}
logrus.Debugf("ListPodSandboxResponse %+v", resp)

View File

@ -2,6 +2,7 @@ package server
import (
"fmt"
"time"
"github.com/containers/storage"
"github.com/kubernetes-incubator/cri-o/libkpod/sandbox"
@ -15,7 +16,13 @@ import (
// RemovePodSandbox deletes the sandbox. If there are any running containers in the
// sandbox, they should be force deleted.
func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (*pb.RemovePodSandboxResponse, error) {
func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (resp *pb.RemovePodSandboxResponse, err error) {
const operation = "remove_pod_sandbox"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
logrus.Debugf("RemovePodSandboxRequest %+v", req)
sb, err := s.getPodSandboxFromRequest(req.PodSandboxId)
if err != nil {
@ -27,7 +34,7 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR
// the the CRI interface which expects to not error out in not found
// cases.
resp := &pb.RemovePodSandboxResponse{}
resp = &pb.RemovePodSandboxResponse{}
logrus.Warnf("could not get sandbox %s, it's probably been removed already: %v", req.PodSandboxId, err)
return resp, nil
}
@ -41,7 +48,7 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR
if !sb.Stopped() {
cState := s.Runtime().ContainerStatus(c)
if cState.Status == oci.ContainerStateCreated || cState.Status == oci.ContainerStateRunning {
if err := s.Runtime().StopContainer(c, -1); err != nil {
if err := s.Runtime().StopContainer(ctx, c, 10); err != nil {
// Assume container is already stopped
logrus.Warnf("failed to stop container %s: %v", c.Name(), err)
}
@ -92,7 +99,7 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR
return nil, fmt.Errorf("failed to delete pod sandbox %s from index: %v", sb.ID(), err)
}
resp := &pb.RemovePodSandboxResponse{}
resp = &pb.RemovePodSandboxResponse{}
logrus.Debugf("RemovePodSandboxResponse %+v", resp)
return resp, nil
}

View File

@ -16,6 +16,7 @@ import (
"github.com/kubernetes-incubator/cri-o/libkpod/sandbox"
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/kubernetes-incubator/cri-o/pkg/annotations"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
@ -94,6 +95,12 @@ var (
// RunPodSandbox creates and runs a pod-level sandbox.
func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest) (resp *pb.RunPodSandboxResponse, err error) {
const operation = "run_pod_sandbox"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
s.updateLock.RLock()
defer s.updateLock.RUnlock()
@ -220,6 +227,10 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
// add labels
labels := req.GetConfig().GetLabels()
if err := validateLabels(labels); err != nil {
return nil, err
}
// Add special container name label for the infra container
labelsJSON := []byte{}
if labels != nil {
@ -424,7 +435,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
// set up namespaces
if hostNetwork {
err = g.RemoveLinuxNamespace("network")
err = g.RemoveLinuxNamespace(string(runtimespec.NetworkNamespace))
if err != nil {
return nil, err
}
@ -445,21 +456,21 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
}()
// Pass the created namespace path to the runtime
err = g.AddOrReplaceLinuxNamespace("network", sb.NetNsPath())
err = g.AddOrReplaceLinuxNamespace(string(runtimespec.NetworkNamespace), sb.NetNsPath())
if err != nil {
return nil, err
}
}
if namespaceOptions.HostPid {
err = g.RemoveLinuxNamespace("pid")
if securityContext.GetNamespaceOptions().GetHostPid() {
err = g.RemoveLinuxNamespace(string(runtimespec.PIDNamespace))
if err != nil {
return nil, err
}
}
if namespaceOptions.HostIpc {
err = g.RemoveLinuxNamespace("ipc")
if securityContext.GetNamespaceOptions().GetHostIpc() {
err = g.RemoveLinuxNamespace(string(runtimespec.IPCNamespace))
if err != nil {
return nil, err
}
@ -492,6 +503,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
if err != nil {
return nil, err
}
container.SetSpec(g.Spec())
container.SetMountPoint(mountPoint)
sb.SetInfraContainer(container)

View File

@ -1,6 +1,8 @@
package server
import (
"time"
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
@ -8,7 +10,13 @@ import (
)
// PodSandboxStatus returns the Status of the PodSandbox.
func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (*pb.PodSandboxStatusResponse, error) {
func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (resp *pb.PodSandboxStatusResponse, err error) {
const operation = "pod_sandbox_status"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
logrus.Debugf("PodSandboxStatusRequest %+v", req)
sb, err := s.getPodSandboxFromRequest(req.PodSandboxId)
if err != nil {
@ -24,7 +32,7 @@ func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusR
}
sandboxID := sb.ID()
resp := &pb.PodSandboxStatusResponse{
resp = &pb.PodSandboxStatusResponse{
Status: &pb.PodSandboxStatus{
Id: sandboxID,
CreatedAt: podInfraContainer.CreatedAt().UnixNano(),

View File

@ -2,6 +2,7 @@ package server
import (
"fmt"
"time"
"github.com/containers/storage"
"github.com/docker/docker/pkg/mount"
@ -18,7 +19,13 @@ import (
// StopPodSandbox stops the sandbox. If there are any running containers in the
// sandbox, they should be force terminated.
func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (*pb.StopPodSandboxResponse, error) {
func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (resp *pb.StopPodSandboxResponse, err error) {
const operation = "stop_pod_sandbox"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
logrus.Debugf("StopPodSandboxRequest %+v", req)
sb, err := s.getPodSandboxFromRequest(req.PodSandboxId)
if err != nil {
@ -30,14 +37,14 @@ func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxReque
// the the CRI interface which expects to not error out in not found
// cases.
resp := &pb.StopPodSandboxResponse{}
resp = &pb.StopPodSandboxResponse{}
logrus.Warnf("could not get sandbox %s, it's probably been stopped already: %v", req.PodSandboxId, err)
logrus.Debugf("StopPodSandboxResponse %s: %+v", req.PodSandboxId, resp)
return resp, nil
}
if sb.Stopped() {
resp := &pb.StopPodSandboxResponse{}
resp = &pb.StopPodSandboxResponse{}
logrus.Debugf("StopPodSandboxResponse %s: %+v", sb.ID(), resp)
return resp, nil
}
@ -56,7 +63,7 @@ func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxReque
for _, c := range containers {
cStatus := s.Runtime().ContainerStatus(c)
if cStatus.Status != oci.ContainerStateStopped {
if err := s.Runtime().StopContainer(c, -1); err != nil {
if err := s.Runtime().StopContainer(ctx, c, 10); err != nil {
return nil, fmt.Errorf("failed to stop container %s in pod sandbox %s: %v", c.Name(), sb.ID(), err)
}
if c.ID() == podInfraContainer.ID() {
@ -95,7 +102,7 @@ func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxReque
}
sb.SetStopped()
resp := &pb.StopPodSandboxResponse{}
resp = &pb.StopPodSandboxResponse{}
logrus.Debugf("StopPodSandboxResponse %s: %+v", sb.ID(), resp)
return resp, nil
}

162
server/secrets.go Normal file
View File

@ -0,0 +1,162 @@
package server
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// SecretData info
type SecretData struct {
Name string
Data []byte
}
// SaveTo saves secret data to given directory
func (s SecretData) SaveTo(dir string) error {
path := filepath.Join(dir, s.Name)
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil && !os.IsExist(err) {
return err
}
return ioutil.WriteFile(path, s.Data, 0700)
}
func readAll(root, prefix string) ([]SecretData, error) {
path := filepath.Join(root, prefix)
data := []SecretData{}
files, err := ioutil.ReadDir(path)
if err != nil {
if os.IsNotExist(err) {
return data, nil
}
return nil, err
}
for _, f := range files {
fileData, err := readFile(root, filepath.Join(prefix, f.Name()))
if err != nil {
// If the file did not exist, might be a dangling symlink
// Ignore the error
if os.IsNotExist(err) {
continue
}
return nil, err
}
data = append(data, fileData...)
}
return data, nil
}
func readFile(root, name string) ([]SecretData, error) {
path := filepath.Join(root, name)
s, err := os.Stat(path)
if err != nil {
return nil, err
}
if s.IsDir() {
dirData, err := readAll(root, name)
if err != nil {
return nil, err
}
return dirData, nil
}
bytes, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
return []SecretData{{Name: name, Data: bytes}}, nil
}
// getHostAndCtrDir separates the host:container paths
func getMountsMap(path string) (string, string, error) {
arr := strings.SplitN(path, ":", 2)
if len(arr) == 2 {
return arr[0], arr[1], nil
}
return "", "", errors.Errorf("unable to get host and container dir")
}
func getHostSecretData(hostDir string) ([]SecretData, error) {
var allSecrets []SecretData
hostSecrets, err := readAll(hostDir, "")
if err != nil {
return nil, errors.Wrapf(err, "failed to read secrets from %q", hostDir)
}
return append(allSecrets, hostSecrets...), nil
}
// secretMount copies the contents of host directory to container directory
// and returns a list of mounts
func secretMounts(defaultMountsPaths []string, mountLabel, containerWorkingDir string, runtimeMounts []rspec.Mount) ([]rspec.Mount, error) {
var mounts []rspec.Mount
for _, path := range defaultMountsPaths {
hostDir, ctrDir, err := getMountsMap(path)
if err != nil {
return nil, err
}
// skip if the hostDir path doesn't exist
if _, err := os.Stat(hostDir); os.IsNotExist(err) {
logrus.Warnf("%q doesn't exist, skipping", hostDir)
continue
}
ctrDirOnHost := filepath.Join(containerWorkingDir, ctrDir)
// skip if ctrDir has already been mounted by caller
if isAlreadyMounted(runtimeMounts, ctrDir) {
logrus.Warnf("%q has already been mounted; cannot override mount", ctrDir)
continue
}
if err := os.RemoveAll(ctrDirOnHost); err != nil {
return nil, fmt.Errorf("remove container directory failed: %v", err)
}
if err := os.MkdirAll(ctrDirOnHost, 0755); err != nil {
return nil, fmt.Errorf("making container directory failed: %v", err)
}
hostDir, err = resolveSymbolicLink(hostDir)
if err != nil {
return nil, err
}
data, err := getHostSecretData(hostDir)
if err != nil {
return nil, errors.Wrapf(err, "getting host secret data failed")
}
for _, s := range data {
s.SaveTo(ctrDirOnHost)
}
label.Relabel(ctrDirOnHost, mountLabel, false)
m := rspec.Mount{
Source: ctrDirOnHost,
Destination: ctrDir,
}
mounts = append(mounts, m)
}
return mounts, nil
}
func isAlreadyMounted(mounts []rspec.Mount, mountPath string) bool {
for _, mount := range mounts {
if mount.Destination == mountPath {
return true
}
}
return false
}

View File

@ -20,6 +20,7 @@ import (
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/kubernetes-incubator/cri-o/pkg/storage"
"github.com/kubernetes-incubator/cri-o/server/apparmor"
"github.com/kubernetes-incubator/cri-o/server/metrics"
"github.com/kubernetes-incubator/cri-o/server/seccomp"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@ -35,8 +36,7 @@ import (
)
const (
runtimeAPIVersion = "v1alpha1"
shutdownFile = "/var/lib/crio/crio.shutdown"
shutdownFile = "/var/lib/crio/crio.shutdown"
)
func isTrue(annotaton string) bool {
@ -350,6 +350,7 @@ func (s *Server) getPodSandboxFromRequest(podSandboxID string) (*sandbox.Sandbox
// CreateMetricsEndpoint creates a /metrics endpoint
// for prometheus monitoring
func (s *Server) CreateMetricsEndpoint() (*http.ServeMux, error) {
metrics.Register()
mux := &http.ServeMux{}
mux.Handle("/metrics", prometheus.Handler())
return mux, nil
@ -419,6 +420,7 @@ func (s *Server) StartExitMonitor() {
}()
if err := watcher.Add(s.config.ContainerExitsDir); err != nil {
logrus.Errorf("watcher.Add(%q) failed: %s", s.config.ContainerExitsDir, err)
close(done)
}
<-done
}

View File

@ -5,9 +5,11 @@ import (
"io"
"os"
"strings"
"time"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/kubernetes-incubator/cri-o/libkpod/sandbox"
"github.com/kubernetes-incubator/cri-o/server/metrics"
"github.com/opencontainers/runtime-tools/validate"
"github.com/syndtr/gocapability/capability"
)
@ -16,6 +18,8 @@ const (
// According to http://man7.org/linux/man-pages/man5/resolv.conf.5.html:
// "The search list is currently limited to six domains with a total of 256 characters."
maxDNSSearches = 6
maxLabelSize = 4096
)
func copyFile(src, dest string) error {
@ -181,3 +185,28 @@ func getOCICapabilitiesList() []string {
}
return caps
}
func recordOperation(operation string, start time.Time) {
metrics.CRIOOperations.WithLabelValues(operation).Inc()
metrics.CRIOOperationsLatency.WithLabelValues(operation).Observe(metrics.SinceInMicroseconds(start))
}
// recordError records error for metric if an error occurred.
func recordError(operation string, err error) {
if err != nil {
// TODO(runcom): handle timeout from ctx as well
metrics.CRIOOperationsErrors.WithLabelValues(operation).Inc()
}
}
func validateLabels(labels map[string]string) error {
for k, v := range labels {
if (len(k) + len(v)) > maxLabelSize {
if len(k) > 10 {
k = k[:10]
}
return fmt.Errorf("label key and value greater than maximum size (%d bytes), key: %s", maxLabelSize, k)
}
}
return nil
}

View File

@ -1,29 +1,35 @@
package server
import (
"time"
"github.com/kubernetes-incubator/cri-o/version"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// Version returns the runtime name, runtime version and runtime API version
func (s *Server) Version(ctx context.Context, req *pb.VersionRequest) (*pb.VersionResponse, error) {
runtimeVersion, err := s.Runtime().Version()
if err != nil {
return nil, err
}
const (
// kubeAPIVersion is the api version of kubernetes.
// TODO: Track upstream code. For now it expects 0.1.0
version := "0.1.0"
kubeAPIVersion = "0.1.0"
// containerName is the name prepended in kubectl describe->Container ID:
// cri-o://<CONTAINER_ID>
containerName = "cri-o"
runtimeAPIVersion = "v1alpha1"
)
// taking const address
rav := runtimeAPIVersion
runtimeName := s.Runtime().Name()
// Version returns the runtime name, runtime version and runtime API version
func (s *Server) Version(ctx context.Context, req *pb.VersionRequest) (resp *pb.VersionResponse, err error) {
const operation = "version"
defer func() {
recordOperation(operation, time.Now())
recordError(operation, err)
}()
return &pb.VersionResponse{
Version: version,
RuntimeName: runtimeName,
RuntimeVersion: runtimeVersion,
RuntimeApiVersion: rav,
Version: kubeAPIVersion,
RuntimeName: containerName,
RuntimeVersion: version.Version,
RuntimeApiVersion: runtimeAPIVersion,
}, nil
}

View File

@ -27,7 +27,6 @@ function teardown() {
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
[ "$status" -eq 0 ]
run crioctl ctr execsync --id "$ctr_id" touch test.txt
echo "$output"
[ "$status" -eq 0 ]
@ -60,7 +59,6 @@ function teardown() {
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
[ "$status" -eq 0 ]
run crioctl ctr execsync --id "$ctr_id" touch test.txt
echo "$output"
[ "$status" -ne 0 ]
@ -94,7 +92,6 @@ function teardown() {
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
[ "$status" -eq 0 ]
run crioctl ctr execsync --id "$ctr_id" touch test.txt
echo "$output"
[ "$status" -ne 0 ]
@ -156,7 +153,6 @@ function teardown() {
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
[ "$status" -eq 0 ]
run crioctl ctr execsync --id "$ctr_id" touch test.txt
echo "$output"
[ "$status" -eq 0 ]

12
test/command.bats Normal file
View File

@ -0,0 +1,12 @@
#!/usr/bin/env bats
load helpers
@test "crio commands" {
run ${CRIO_BINARY} --config /dev/null config > /dev/null
echo "$output"
[ "$status" -eq 0 ]
run ${CRIO_BINARY} badoption > /dev/null
echo "$output"
[ "$status" -ne 0 ]
}

View File

@ -560,6 +560,7 @@ function teardown() {
run crioctl ctr execsync --id "$ctr_id" --timeout 1 sleep 10
echo "$output"
[[ "$output" =~ "command timed out" ]]
[ "$status" -ne 0 ]
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
@ -599,6 +600,31 @@ function teardown() {
stop_crio
}
@test "ctr hostname env" {
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run crioctl ctr execsync --id "$ctr_id" env
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "HOSTNAME" ]]
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
run crioctl pod remove --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_crio
}
@test "ctr execsync failure" {
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
@ -766,10 +792,16 @@ function teardown() {
echo "$output"
[ "$status" -eq 0 ]
# Wait for container to OOM
run sleep 100
run crioctl ctr status --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
attempt=0
while [ $attempt -le 100 ]; do
attempt=$((attempt+1))
run crioctl ctr status --id "$ctr_id"
echo "$output"
if [[ "$output" =~ "OOMKilled" ]]; then
break
fi
sleep 10
done
[[ "$output" =~ "OOMKilled" ]]
run crioctl pod stop --id "$pod_id"
echo "$output"
@ -864,3 +896,59 @@ function teardown() {
cleanup_pods
stop_crio
}
@test "ctr correctly setup working directory" {
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
notexistcwd=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["working_dir"] = "/thisshouldntexistatall"; json.dump(obj, sys.stdout)')
echo "$notexistcwd" > "$TESTDIR"/container_cwd_notexist.json
run crioctl ctr create --config "$TESTDIR"/container_cwd_notexist.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
filecwd=$(cat "$TESTDATA"/container_config.json | python -c 'import json,sys;obj=json.load(sys.stdin);obj["working_dir"] = "/etc/passwd"; obj["metadata"]["name"] = "container2"; json.dump(obj, sys.stdout)')
echo "$filecwd" > "$TESTDIR"/container_cwd_file.json
run crioctl ctr create --config "$TESTDIR"/container_cwd_file.json --pod "$pod_id"
echo "$output"
[ "$status" -ne 0 ]
ctr_id="$output"
[[ "$output" =~ "not a directory" ]]
cleanup_ctrs
cleanup_pods
stop_crio
}
@test "ctr execsync conflicting with conmon env" {
start_crio
run crictl runs "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run crictl create "$pod_id" "$TESTDATA"/container_redis_env_custom.json "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run crictl start "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run crictl exec "$ctr_id" env
echo "$output"
echo "$status"
[ "$status" -eq 0 ]
[[ "$output" =~ "acustompathinpath" ]]
run crictl exec --sync "$ctr_id" env
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "acustompathinpath" ]]
cleanup_ctrs
cleanup_pods
stop_crio
}

69
test/default_mounts.bats Normal file
View File

@ -0,0 +1,69 @@
#!/usr/bin/env bats
load helpers
IMAGE="redis:alpine"
function teardown() {
cleanup_test
}
@test "bind secrets mounts to container" {
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run crioctl image pull "$IMAGE"
[ "$status" -eq 0 ]
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run crioctl ctr execsync --id "$ctr_id" cat /proc/mounts
echo "$output"
[ "$status" -eq 0 ]
mount_info="$output"
run grep /container/path1 <<< "$mount_info"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_crio
}
@test "default mounts correctly sorted with other mounts" {
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run crioctl image pull "$IMAGE"
[ "$status" -eq 0 ]
host_path="$TESTDIR"/clash
mkdir "$host_path"
echo "clashing..." > "$host_path"/clashing.txt
sed -e "s,%HPATH%,$host_path,g" "$TESTDATA"/container_redis_default_mounts.json > "$TESTDIR"/defmounts_pre.json
sed -e 's,%CPATH%,\/container\/path1\/clash,g' "$TESTDIR"/defmounts_pre.json > "$TESTDIR"/defmounts.json
run crioctl ctr create --config "$TESTDIR"/defmounts.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run crioctl ctr execsync --id "$ctr_id" ls -la /container/path1/clash
echo "$output"
[ "$status" -eq 0 ]
run crioctl ctr execsync --id "$ctr_id" cat /container/path1/clash/clashing.txt
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "clashing..." ]]
run crioctl ctr execsync --id "$ctr_id" ls -la /container/path1
echo "$output"
[ "$status" -eq 0 ]
run crioctl ctr execsync --id "$ctr_id" cat /container/path1/test.txt
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "Testing secrets mounts!" ]]
cleanup_ctrs
cleanup_pods
stop_crio
}

View File

@ -10,16 +10,16 @@ TESTDATA="${INTEGRATION_ROOT}/testdata"
CRIO_ROOT=${CRIO_ROOT:-$(cd "$INTEGRATION_ROOT/../.."; pwd -P)}
# Path of the crio binary.
CRIO_BINARY=${CRIO_BINARY:-${CRIO_ROOT}/cri-o/crio}
CRIO_BINARY=${CRIO_BINARY:-${CRIO_ROOT}/cri-o/bin/crio}
# Path of the crictl binary.
CRICTL_PATH=$(command -v crictl || true)
CRICTL_BINARY=${CRICTL_PATH:-/usr/bin/crictl}
# Path to kpod binary.
KPOD_BINARY=${KPOD_BINARY:-${CRIO_ROOT}/cri-o/kpod}
KPOD_BINARY=${KPOD_BINARY:-${CRIO_ROOT}/cri-o/bin/kpod}
# Path of the conmon binary.
CONMON_BINARY=${CONMON_BINARY:-${CRIO_ROOT}/cri-o/conmon/conmon}
CONMON_BINARY=${CONMON_BINARY:-${CRIO_ROOT}/cri-o/bin/conmon}
# Path of the pause binary.
PAUSE_BINARY=${PAUSE_BINARY:-${CRIO_ROOT}/cri-o/pause/pause}
PAUSE_BINARY=${PAUSE_BINARY:-${CRIO_ROOT}/cri-o/bin/pause}
# Path of the default seccomp profile.
SECCOMP_PROFILE=${SECCOMP_PROFILE:-${CRIO_ROOT}/cri-o/seccomp.json}
# Name of the default apparmor profile.
@ -69,6 +69,15 @@ HOOKSDIR=$TESTDIR/hooks
mkdir ${HOOKSDIR}
HOOKS_OPTS="--hooks-dir-path=$HOOKSDIR"
# Setup default secrets mounts
MOUNT_PATH="$TESTDIR/secrets"
mkdir ${MOUNT_PATH}
MOUNT_FILE="${MOUNT_PATH}/test.txt"
touch ${MOUNT_FILE}
echo "Testing secrets mounts!" > ${MOUNT_FILE}
DEFAULT_MOUNTS_OPTS="--default-mounts=${MOUNT_PATH}:/container/path1"
# We may need to set some default storage options.
case "$(stat -f -c %T ${TESTDIR})" in
aufs)
@ -165,7 +174,7 @@ function crio() {
}
# DEPRECATED
OCIC_BINARY=${OCIC_BINARY:-${CRIO_ROOT}/cri-o/crioctl}
OCIC_BINARY=${OCIC_BINARY:-${CRIO_ROOT}/cri-o/bin/crioctl}
# Run crioctl using the binary specified by $OCIC_BINARY.
function crioctl() {
"$OCIC_BINARY" --connect "$CRIO_SOCKET" "$@"
@ -202,9 +211,9 @@ function retry() {
false
}
# Waits until the given crio becomes reachable.
# Waits until crio becomes reachable.
function wait_until_reachable() {
retry 15 1 crictl status
retry 15 1 crictl version
}
# Start crio.
@ -235,7 +244,7 @@ function start_crio() {
"$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --image-name=mrunalp/image-volume-test --import-from=dir:"$ARTIFACTS_PATH"/image-volume-test-image --add-name=docker.io/library/mrunalp/image-volume-test --signature-policy="$INTEGRATION_ROOT"/policy.json
"$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --image-name=busybox:latest --import-from=dir:"$ARTIFACTS_PATH"/busybox-image --add-name=docker.io/library/busybox:latest --signature-policy="$INTEGRATION_ROOT"/policy.json
"$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTS --runroot "$TESTDIR/crio-run" --image-name=runcom/stderr-test:latest --import-from=dir:"$ARTIFACTS_PATH"/stderr-test --add-name=docker.io/runcom/stderr-test:latest --signature-policy="$INTEGRATION_ROOT"/policy.json
"$CRIO_BINARY" ${HOOKS_OPTS} --conmon "$CONMON_BINARY" --listen "$CRIO_SOCKET" --cgroup-manager "$CGROUP_MANAGER" --registry "docker.io" --runtime "$RUNTIME_BINARY" --root "$TESTDIR/crio" --runroot "$TESTDIR/crio-run" $STORAGE_OPTS --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$CRIO_CNI_CONFIG" --cni-plugin-dir "$CRIO_CNI_PLUGIN" --signature-policy "$INTEGRATION_ROOT"/policy.json --image-volumes "$IMAGE_VOLUMES" --pids-limit "$PIDS_LIMIT" --log-size-max "$LOG_SIZE_MAX_LIMIT" --config /dev/null config >$CRIO_CONFIG
"$CRIO_BINARY" ${DEFAULT_MOUNTS_OPTS} ${HOOKS_OPTS} --conmon "$CONMON_BINARY" --listen "$CRIO_SOCKET" --cgroup-manager "$CGROUP_MANAGER" --registry "docker.io" --runtime "$RUNTIME_BINARY" --root "$TESTDIR/crio" --runroot "$TESTDIR/crio-run" $STORAGE_OPTS --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$CRIO_CNI_CONFIG" --cni-plugin-dir "$CRIO_CNI_PLUGIN" --signature-policy "$INTEGRATION_ROOT"/policy.json --image-volumes "$IMAGE_VOLUMES" --pids-limit "$PIDS_LIMIT" --log-size-max "$LOG_SIZE_MAX_LIMIT" --config /dev/null config >$CRIO_CONFIG
# Prepare the CNI configuration files, we're running with non host networking by default
if [[ -n "$4" ]]; then

View File

@ -10,7 +10,7 @@ cp hooks/checkhook.sh ${HOOKSDIR}
sed "s|HOOKSDIR|${HOOKSDIR}|" hooks/checkhook.json > ${HOOKSDIR}/checkhook.json
@test "pod test hooks" {
run rm -f /run/hookscheck
rm -f /run/hookscheck
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"

View File

@ -38,6 +38,7 @@ function teardown() {
[ "$status" -eq 0 ]
run ${KPOD_BINARY} ${KPOD_OPTIONS} images --format json
echo "$output"
[ "$status" -eq 0 ]
name=$(echo $output | python -c 'import sys; import json; print(json.loads(sys.stdin.read())[0])["names"][0]')
[ "$name" = "docker.io/library/${IMAGE}" ]
run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi ${IMAGE}

View File

@ -59,6 +59,7 @@ function teardown() {
ctr_id="$output"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
id="$output"
run ${KPOD_BINARY} ${KPOD_OPTIONS} pause "$id"
echo "$output"
@ -87,6 +88,7 @@ function teardown() {
ctr_id="$output"
run crioctl ctr start --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run ${KPOD_BINARY} ${KPOD_OPTIONS} pause "k8s_podsandbox1-redis_podsandbox1_redhat.test.crio_redhat-test-crio_0"
echo "$output"
[ "$status" -eq 0 ]
@ -115,6 +117,7 @@ function teardown() {
run crioctl ctr start --id "$ctr_id"
echo "$output"
id="$output"
[ "$status" -eq 0 ]
run ${KPOD_BINARY} ${KPOD_OPTIONS} pause "$id"
echo "$output"
[ "$status" -eq 0 ]

View File

@ -167,12 +167,10 @@ IMAGE="redis:alpine"
cleanup_ctrs
cleanup_pods
stop_crio
[ "$status" -eq 0 ]
}
@test "kpod ps namespace flag" {
start_crio
[ "$status" -eq 0 ]
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
@ -215,7 +213,6 @@ IMAGE="redis:alpine"
@test "kpod ps without namespace flag and format flag = json" {
start_crio
[ "$status" -eq 0 ]
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
@ -231,7 +228,6 @@ IMAGE="redis:alpine"
cleanup_ctrs
cleanup_pods
stop_crio
[ "$status" -eq 0 ]
}
@test "kpod ps format flag = go template" {

View File

@ -19,6 +19,7 @@ function teardown() {
[ "$status" -eq 0 ]
run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
ctr_id="$output"
[ "$status" -eq 0 ]
run ${KPOD_BINARY} $KPOD_OPTIONS rename "$ctr_id" "$NEW_NAME"
echo "$output"
[ "$status" -eq 0 ]

View File

@ -30,7 +30,6 @@ function teardown() {
run ${KPOD_BINARY} ${KPOD_OPTIONS} rmi $IMAGE
[ "$status" -eq 0 ]
rm -f alpine.tar
[ "$status" -eq 0 ]
}
@test "kpod save using stdout" {

View File

@ -25,7 +25,10 @@ function teardown() {
run crioctl ctr start --id "$ctr_id"
echo "$output"
id="$output"
[ "$status" -eq 0 ]
run ${KPOD_BINARY} ${KPOD_OPTIONS} stop "$id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_pods
stop_crio
}
@ -41,8 +44,15 @@ function teardown() {
[ "$status" -eq 0 ]
ctr_id="$output"
run crioctl ctr start --id "$ctr_id"
[ "$status" -eq 0 ]
run crioctl ctr inspect --id "$ctr_id"
echo "$output"
run ${KPOD_BINARY} ${KPOD_OPTIONS} stop "k8s_podsandbox1-redis_podsandbox1_redhat.test.crio_redhat-test-crio_0"
[ "$status" -eq 0 ]
ctr_name=$(python -c 'import json; import sys; print json.load(sys.stdin)["crio_annotations"]["io.kubernetes.cri-o.Name"]' <<< "$output")
echo container name is \""$ctr_name"\"
run ${KPOD_BINARY} ${KPOD_OPTIONS} stop "$ctr_name"
echo "$output"
[ "$status" -eq 0 ]
cleanup_pods
stop_crio
}

View File

@ -34,6 +34,7 @@ function container_start() {
@test "wait on a stopped container" {
run ${KPOD_BINARY} ${KPOD_OPTIONS} pull docker.io/library/busybox:latest
echo $output
[ "$status" -eq 0 ]
start_crio
pod_id=$( pod_run_from_template "test" "test" "test1-1" )
echo $pod_id
@ -50,6 +51,7 @@ function container_start() {
@test "wait on a sleeping container" {
run ${KPOD_BINARY} ${KPOD_OPTIONS} pull docker.io/library/busybox:latest
echo $output
[ "$status" -eq 0 ]
start_crio
pod_id=$( pod_run_from_template "test" "test" "test1-1" )
echo $pod_id
@ -57,6 +59,7 @@ function container_start() {
echo $ctr_id
run container_start $ctr_id
echo $output
[ "$status" -eq 0 ]
run ${KPOD_BINARY} ${KPOD_OPTIONS} wait $ctr_id
echo $output
[ "$status" -eq 0 ]

View File

@ -7,6 +7,7 @@ function teardown() {
cleanup_pods
stop_crio
rm -f /var/lib/cni/networks/crionet_test_args/*
chmod 0755 $CONMON_BINARY
cleanup_test
}
@ -121,10 +122,8 @@ function teardown() {
ctr2_id="$output"
ping_pod_from_pod $ctr1_id $ctr2_id
[ "$status" -eq 0 ]
ping_pod_from_pod $ctr2_id $ctr1_id
[ "$status" -eq 0 ]
}
@test "Ensure correct CNI plugin namespace/name/container-id arguments" {
@ -165,6 +164,7 @@ function teardown() {
[ "$status" -eq 0 ]
run crioctl ctr stop --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
}
@test "Clean up network if pod sandbox fails" {
@ -172,9 +172,11 @@ function teardown() {
# make conmon non-executable to cause the sandbox setup to fail after
# networking has been configured
chmod 0644 /go/src/github.com/kubernetes-incubator/cri-o/conmon/conmon
chmod 0644 $CONMON_BINARY
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
chmod 0755 /go/src/github.com/kubernetes-incubator/cri-o/conmon/conmon
chmod 0755 $CONMON_BINARY
echo "$output"
[ "$status" -ne 0 ]
# ensure that the server cleaned up sandbox networking if the sandbox
# failed after network setup

View File

@ -0,0 +1,67 @@
{
"metadata": {
"name": "podsandbox1-redis"
},
"image": {
"image": "redis:alpine"
},
"args": [
"docker-entrypoint.sh",
"redis-server"
],
"mounts": [
{
"container_path": "%CPATH%",
"host_path": "%HPATH%"
}
],
"working_dir": "/data",
"envs": [
{
"key": "PATH",
"value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
},
{
"key": "TERM",
"value": "xterm"
},
{
"key": "REDIS_VERSION",
"value": "3.2.3"
},
{
"key": "REDIS_DOWNLOAD_URL",
"value": "http://download.redis.io/releases/redis-3.2.3.tar.gz"
},
{
"key": "REDIS_DOWNLOAD_SHA1",
"value": "92d6d93ef2efc91e595c8bf578bf72baff397507"
}
],
"labels": {
"tier": "backend"
},
"annotations": {
"pod": "podsandbox1"
},
"readonly_rootfs": false,
"log_path": "",
"stdin": false,
"stdin_once": false,
"tty": false,
"linux": {
"resources": {
"cpu_period": 10000,
"cpu_quota": 20000,
"cpu_shares": 512,
"oom_score_adj": 30
},
"security_context": {
"capabilities": {
"add_capabilities": [
"sys_admin"
]
}
}
}
}

View File

@ -0,0 +1,62 @@
{
"metadata": {
"name": "podsandbox1-redis"
},
"image": {
"image": "redis:alpine"
},
"args": [
"docker-entrypoint.sh",
"redis-server"
],
"working_dir": "/data",
"envs": [
{
"key": "PATH",
"value": "/acustompathinpath:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
},
{
"key": "TERM",
"value": "xterm"
},
{
"key": "REDIS_VERSION",
"value": "3.2.3"
},
{
"key": "REDIS_DOWNLOAD_URL",
"value": "http://download.redis.io/releases/redis-3.2.3.tar.gz"
},
{
"key": "REDIS_DOWNLOAD_SHA1",
"value": "92d6d93ef2efc91e595c8bf578bf72baff397507"
}
],
"labels": {
"tier": "backend"
},
"annotations": {
"pod": "podsandbox1"
},
"readonly_rootfs": false,
"log_path": "",
"stdin": false,
"stdin_once": false,
"tty": false,
"linux": {
"resources": {
"memory_limit_in_bytes": 209715200,
"cpu_period": 10000,
"cpu_quota": 20000,
"cpu_shares": 512,
"oom_score_adj": 30
},
"security_context": {
"capabilities": {
"add_capabilities": [
"sys_admin"
]
}
}
}
}

View File

@ -10,7 +10,7 @@ github.com/ostreedev/ostree-go master
github.com/containers/storage 64bf27465d0d1edd89e7a4ce49866fea01145782
github.com/containernetworking/cni v0.4.0
google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go
github.com/opencontainers/selinux v1.0.0-rc1
github.com/opencontainers/selinux b29023b86e4a69d1b46b7e7b4e2b6fda03f0b9cd
github.com/opencontainers/go-digest v1.0.0-rc0
github.com/opencontainers/runtime-tools d3f7e9e9e631c7e87552d67dc7c86de33c3fb68a
github.com/opencontainers/runc 45bde006ca8c90e089894508708bcf0e2cdf9e13
@ -71,9 +71,9 @@ github.com/Microsoft/hcsshim 43f9725307998e09f2e3816c2c0c36dc98f0c982
github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46
github.com/emicklei/go-restful-swagger12 1.0.1
github.com/pkg/errors v0.8.0
github.com/godbus/dbus v4.0.0
github.com/godbus/dbus a389bdde4dd695d414e47b755e95e72b7826432c
github.com/urfave/cli v1.20.0
github.com/vbatts/tar-split v0.10.1
github.com/vbatts/tar-split v0.10.2
github.com/renstrom/dedent v1.0.0
github.com/hpcloud/tail v1.0.0
gopkg.in/fsnotify.v1 v1.4.2
@ -101,3 +101,5 @@ github.com/go-zoo/bone 031b4005dfe248ccba241a0c9de0f9e112fd6b7c
github.com/soheilhy/cmux v0.1.3
github.com/hashicorp/go-multierror 83588e72410abfbe4df460eeb6f30841ae47d4c4
github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
github.com/pmezard/go-difflib v1.0.0

View File

@ -1,60 +0,0 @@
## About
This directory contains a collection of scripts used to build and manage this
repository. If there are any issues regarding the intention of a particular
script (or even part of a certain script), please reach out to us.
It may help us either refine our current scripts, or add on new ones
that are appropriate for a given use case.
## DinD (dind.sh)
DinD is a wrapper script which allows Docker to be run inside a Docker
container. DinD requires the container to
be run with privileged mode enabled.
## Generate Authors (generate-authors.sh)
Generates AUTHORS; a file with all the names and corresponding emails of
individual contributors. AUTHORS can be found in the home directory of
this repository.
## Make
There are two make files, each with different extensions. Neither are supposed
to be called directly; only invoke `make`. Both scripts run inside a Docker
container.
### make.ps1
- The Windows native build script that uses PowerShell semantics; it is limited
unlike `hack\make.sh` since it does not provide support for the full set of
operations provided by the Linux counterpart, `make.sh`. However, `make.ps1`
does provide support for local Windows development and Windows to Windows CI.
More information is found within `make.ps1` by the author, @jhowardmsft
### make.sh
- Referenced via `make test` when running tests on a local machine,
or directly referenced when running tests inside a Docker development container.
- When running on a local machine, `make test` to run all tests found in
`test`, `test-unit`, `test-integration-cli`, and `test-docker-py` on
your local machine. The default timeout is set in `make.sh` to 60 minutes
(`${TIMEOUT:=60m}`), since it currently takes up to an hour to run
all of the tests.
- When running inside a Docker development container, `hack/make.sh` does
not have a single target that runs all the tests. You need to provide a
single command line with multiple targets that performs the same thing.
An example referenced from [Run targets inside a development container](https://docs.docker.com/opensource/project/test-and-docs/#run-targets-inside-a-development-container): `root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit test-integration-cli test-docker-py`
- For more information related to testing outside the scope of this README,
refer to
[Run tests and test documentation](https://docs.docker.com/opensource/project/test-and-docs/)
## Release (release.sh)
Releases any bundles built by `make` on a public AWS S3 bucket.
For information regarding configuration, please view `release.sh`.
## Vendor (vendor.sh)
A shell script that is a wrapper around Vndr. For information on how to use
this, please refer to [vndr's README](https://github.com/LK4D4/vndr/blob/master/README.md)

View File

@ -1,69 +0,0 @@
# Integration Testing on Swarm
IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster
## Architecture
### Master service
- Works as a funker caller
- Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`)
### Worker service
- Works as a funker callee
- Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`)
### Client
- Controls master and workers via `docker stack`
- No need to have a local daemon
Typically, the master and workers are supposed to be running on a cloud environment,
while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows.
## Requirement
- Docker daemon 1.13 or later
- Private registry for distributed execution with multiple nodes
## Usage
### Step 1: Prepare images
$ make build-integration-cli-on-swarm
Following environment variables are known to work in this step:
- `BUILDFLAGS`
- `DOCKER_INCREMENTAL_BINARY`
Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`.
### Step 2: Execute tests
$ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest
Following environment variables are known to work in this step:
- `DOCKER_GRAPHDRIVER`
- `DOCKER_EXPERIMENTAL`
#### Flags
Basic flags:
- `-replicas N`: the number of worker service replicas. i.e. degree of parallelism.
- `-chunks N`: the number of chunks. By default, `chunks` == `replicas`.
- `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`.
Experimental flags for mitigating makespan nonuniformity:
- `-shuffle`: Shuffle the test filter strings
Flags for debugging IT on Swarm itself:
- `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used.
- `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated.
- `-dry-run`: skip the actual workload
- `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm

View File

@ -1,2 +0,0 @@
# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here
github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773

View File

@ -1,3 +1,5 @@
[![Build Status](https://travis-ci.org/godbus/dbus.svg?branch=master)](https://travis-ci.org/godbus/dbus)
dbus
----
@ -29,6 +31,7 @@ gives a short overview over the basic usage.
#### Projects using godbus
- [notify](https://github.com/esiqveland/notify) provides desktop notifications over dbus into a library.
- [go-bluetooth](https://github.com/muka/go-bluetooth) provides a bluetooth client over bluez dbus API.
Please note that the API is considered unstable for now and may change without
further notice.

143
vendor/github.com/godbus/dbus/conn.go generated vendored
View File

@ -9,8 +9,6 @@ import (
"sync"
)
const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket"
var (
systemBus *Conn
systemBusLck sync.Mutex
@ -47,15 +45,13 @@ type Conn struct {
calls map[uint32]*Call
callsLck sync.RWMutex
handlers map[ObjectPath]map[string]exportedObj
handlersLck sync.RWMutex
handler Handler
out chan *Message
closed bool
outLck sync.RWMutex
signals []chan<- *Signal
signalsLck sync.Mutex
signalHandler SignalHandler
eavesdropped chan<- *Message
eavesdroppedLck sync.Mutex
@ -90,16 +86,33 @@ func SessionBus() (conn *Conn, err error) {
return
}
// SessionBusPrivate returns a new private connection to the session bus.
func SessionBusPrivate() (*Conn, error) {
func getSessionBusAddress() (string, error) {
sessionEnvLck.Lock()
defer sessionEnvLck.Unlock()
address := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
if address != "" && address != "autolaunch:" {
return Dial(address)
return address, nil
}
return getSessionBusPlatformAddress()
}
// SessionBusPrivate returns a new private connection to the session bus.
func SessionBusPrivate() (*Conn, error) {
address, err := getSessionBusAddress()
if err != nil {
return nil, err
}
return sessionBusPlatform()
return Dial(address)
}
// SessionBusPrivate returns a new private connection to the session bus.
func SessionBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) {
address, err := getSessionBusAddress()
if err != nil {
return nil, err
}
return DialHandler(address, handler, signalHandler)
}
// SystemBus returns a shared connection to the system bus, connecting to it if
@ -133,11 +146,12 @@ func SystemBus() (conn *Conn, err error) {
// SystemBusPrivate returns a new private connection to the system bus.
func SystemBusPrivate() (*Conn, error) {
address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
if address != "" {
return Dial(address)
}
return Dial(defaultSystemBusAddress)
return Dial(getSystemBusPlatformAddress())
}
// SystemBusPrivateHandler returns a new private connection to the system bus, using the provided handlers.
func SystemBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) {
return DialHandler(getSystemBusPlatformAddress(), handler, signalHandler)
}
// Dial establishes a new private connection to the message bus specified by address.
@ -146,21 +160,36 @@ func Dial(address string) (*Conn, error) {
if err != nil {
return nil, err
}
return newConn(tr)
return newConn(tr, NewDefaultHandler(), NewDefaultSignalHandler())
}
// DialHandler establishes a new private connection to the message bus specified by address, using the supplied handlers.
func DialHandler(address string, handler Handler, signalHandler SignalHandler) (*Conn, error) {
tr, err := getTransport(address)
if err != nil {
return nil, err
}
return newConn(tr, handler, signalHandler)
}
// NewConn creates a new private *Conn from an already established connection.
func NewConn(conn io.ReadWriteCloser) (*Conn, error) {
return newConn(genericTransport{conn})
return NewConnHandler(conn, NewDefaultHandler(), NewDefaultSignalHandler())
}
// NewConnHandler creates a new private *Conn from an already established connection, using the supplied handlers.
func NewConnHandler(conn io.ReadWriteCloser, handler Handler, signalHandler SignalHandler) (*Conn, error) {
return newConn(genericTransport{conn}, handler, signalHandler)
}
// newConn creates a new *Conn from a transport.
func newConn(tr transport) (*Conn, error) {
func newConn(tr transport, handler Handler, signalHandler SignalHandler) (*Conn, error) {
conn := new(Conn)
conn.transport = tr
conn.calls = make(map[uint32]*Call)
conn.out = make(chan *Message, 10)
conn.handlers = make(map[ObjectPath]map[string]exportedObj)
conn.handler = handler
conn.signalHandler = signalHandler
conn.nextSerial = 1
conn.serialUsed = map[uint32]bool{0: true}
conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus")
@ -188,16 +217,21 @@ func (conn *Conn) Close() error {
close(conn.out)
conn.closed = true
conn.outLck.Unlock()
conn.signalsLck.Lock()
for _, ch := range conn.signals {
close(ch)
if term, ok := conn.signalHandler.(Terminator); ok {
term.Terminate()
}
conn.signalsLck.Unlock()
if term, ok := conn.handler.(Terminator); ok {
term.Terminate()
}
conn.eavesdroppedLck.Lock()
if conn.eavesdropped != nil {
close(conn.eavesdropped)
}
conn.eavesdroppedLck.Unlock()
return conn.transport.Close()
}
@ -334,17 +368,7 @@ func (conn *Conn) inWorker() {
conn.namesLck.Unlock()
}
}
signal := &Signal{
Sender: sender,
Path: msg.Headers[FieldPath].value.(ObjectPath),
Name: iface + "." + member,
Body: msg.Body,
}
conn.signalsLck.Lock()
for _, ch := range conn.signals {
ch <- signal
}
conn.signalsLck.Unlock()
conn.handleSignal(msg)
case TypeMethodCall:
go conn.handleCall(msg)
}
@ -365,6 +389,21 @@ func (conn *Conn) inWorker() {
}
}
func (conn *Conn) handleSignal(msg *Message) {
iface := msg.Headers[FieldInterface].value.(string)
member := msg.Headers[FieldMember].value.(string)
// as per http://dbus.freedesktop.org/doc/dbus-specification.html ,
// sender is optional for signals.
sender, _ := msg.Headers[FieldSender].value.(string)
signal := &Signal{
Sender: sender,
Path: msg.Headers[FieldPath].value.(ObjectPath),
Name: iface + "." + member,
Body: msg.Body,
}
conn.signalHandler.DeliverSignal(iface, member, signal)
}
// Names returns the list of all names that are currently owned by this
// connection. The slice is always at least one element long, the first element
// being the unique name of the connection.
@ -455,7 +494,19 @@ func (conn *Conn) Send(msg *Message, ch chan *Call) *Call {
// sendError creates an error message corresponding to the parameters and sends
// it to conn.out.
func (conn *Conn) sendError(e Error, dest string, serial uint32) {
func (conn *Conn) sendError(err error, dest string, serial uint32) {
var e *Error
switch em := err.(type) {
case Error:
e = &em
case *Error:
e = em
case DBusError:
name, body := em.DBusError()
e = NewError(name, body)
default:
e = MakeFailedError(err)
}
msg := new(Message)
msg.Type = TypeError
msg.serial = conn.getSerial()
@ -498,6 +549,14 @@ func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) {
conn.outLck.RUnlock()
}
func (conn *Conn) defaultSignalAction(fn func(h *defaultSignalHandler, ch chan<- *Signal), ch chan<- *Signal) {
if !isDefaultSignalHandler(conn.signalHandler) {
return
}
handler := conn.signalHandler.(*defaultSignalHandler)
fn(handler, ch)
}
// Signal registers the given channel to be passed all received signal messages.
// The caller has to make sure that ch is sufficiently buffered; if a message
// arrives when a write to c is not possible, it is discarded.
@ -508,22 +567,12 @@ func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) {
// channel for eavesdropped messages, this channel receives all signals, and
// none of the channels passed to Signal will receive any signals.
func (conn *Conn) Signal(ch chan<- *Signal) {
conn.signalsLck.Lock()
conn.signals = append(conn.signals, ch)
conn.signalsLck.Unlock()
conn.defaultSignalAction((*defaultSignalHandler).addSignal, ch)
}
// RemoveSignal removes the given channel from the list of the registered channels.
func (conn *Conn) RemoveSignal(ch chan<- *Signal) {
conn.signalsLck.Lock()
for i := len(conn.signals) - 1; i >= 0; i-- {
if ch == conn.signals[i] {
copy(conn.signals[i:], conn.signals[i+1:])
conn.signals[len(conn.signals)-1] = nil
conn.signals = conn.signals[:len(conn.signals)-1]
}
}
conn.signalsLck.Unlock()
conn.defaultSignalAction((*defaultSignalHandler).removeSignal, ch)
}
// SupportsUnixFDs returns whether the underlying transport supports passing of

View File

@ -2,20 +2,32 @@ package dbus
import (
"errors"
"fmt"
"os"
"os/exec"
)
func sessionBusPlatform() (*Conn, error) {
const defaultSystemBusAddress = "unix:path=/opt/local/var/run/dbus/system_bus_socket"
func getSessionBusPlatformAddress() (string, error) {
cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET")
b, err := cmd.CombinedOutput()
if err != nil {
return nil, err
return "", err
}
if len(b) == 0 {
return nil, errors.New("dbus: couldn't determine address of session bus")
return "", errors.New("dbus: couldn't determine address of session bus")
}
return Dial("unix:path=" + string(b[:len(b)-1]))
return "unix:path=" + string(b[:len(b)-1]), nil
}
func getSystemBusPlatformAddress() string {
address := os.Getenv("DBUS_LAUNCHD_SESSION_BUS_SOCKET")
if address != "" {
return fmt.Sprintf("unix:path=%s", address)
}
return defaultSystemBusAddress
}

View File

@ -5,27 +5,38 @@ package dbus
import (
"bytes"
"errors"
"fmt"
"os"
"os/exec"
)
func sessionBusPlatform() (*Conn, error) {
const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket"
func getSessionBusPlatformAddress() (string, error) {
cmd := exec.Command("dbus-launch")
b, err := cmd.CombinedOutput()
if err != nil {
return nil, err
return "", err
}
i := bytes.IndexByte(b, '=')
j := bytes.IndexByte(b, '\n')
if i == -1 || j == -1 {
return nil, errors.New("dbus: couldn't determine address of session bus")
return "", errors.New("dbus: couldn't determine address of session bus")
}
env, addr := string(b[0:i]), string(b[i+1:j])
os.Setenv(env, addr)
return Dial(addr)
return addr, nil
}
func getSystemBusPlatformAddress() string {
address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
if address != "" {
return fmt.Sprintf("unix:path=%s", address)
}
return defaultSystemBusAddress
}

315
vendor/github.com/godbus/dbus/dbus.go generated vendored
View File

@ -2,6 +2,7 @@ package dbus
import (
"errors"
"fmt"
"reflect"
"strings"
)
@ -12,6 +13,8 @@ var (
uint8Type = reflect.TypeOf(uint8(0))
int16Type = reflect.TypeOf(int16(0))
uint16Type = reflect.TypeOf(uint16(0))
intType = reflect.TypeOf(int(0))
uintType = reflect.TypeOf(uint(0))
int32Type = reflect.TypeOf(int32(0))
uint32Type = reflect.TypeOf(uint32(0))
int64Type = reflect.TypeOf(int64(0))
@ -22,6 +25,7 @@ var (
objectPathType = reflect.TypeOf(ObjectPath(""))
variantType = reflect.TypeOf(Variant{Signature{""}, nil})
interfacesType = reflect.TypeOf([]interface{}{})
interfaceType = reflect.TypeOf((*interface{})(nil)).Elem()
unixFDType = reflect.TypeOf(UnixFD(0))
unixFDIndexType = reflect.TypeOf(UnixFDIndex(0))
)
@ -46,86 +50,251 @@ func Store(src []interface{}, dest ...interface{}) error {
}
for i := range src {
if err := store(src[i], dest[i]); err != nil {
if err := storeInterfaces(src[i], dest[i]); err != nil {
return err
}
}
return nil
}
func store(src, dest interface{}) error {
if reflect.TypeOf(dest).Elem() == reflect.TypeOf(src) {
reflect.ValueOf(dest).Elem().Set(reflect.ValueOf(src))
return nil
} else if hasStruct(dest) {
rv := reflect.ValueOf(dest).Elem()
switch rv.Kind() {
case reflect.Struct:
vs, ok := src.([]interface{})
if !ok {
return errors.New("dbus.Store: type mismatch")
}
t := rv.Type()
ndest := make([]interface{}, 0, rv.NumField())
for i := 0; i < rv.NumField(); i++ {
field := t.Field(i)
if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
ndest = append(ndest, rv.Field(i).Addr().Interface())
}
}
if len(vs) != len(ndest) {
return errors.New("dbus.Store: type mismatch")
}
err := Store(vs, ndest...)
if err != nil {
return errors.New("dbus.Store: type mismatch")
}
case reflect.Slice:
sv := reflect.ValueOf(src)
if sv.Kind() != reflect.Slice {
return errors.New("dbus.Store: type mismatch")
}
rv.Set(reflect.MakeSlice(rv.Type(), sv.Len(), sv.Len()))
for i := 0; i < sv.Len(); i++ {
if err := store(sv.Index(i).Interface(), rv.Index(i).Addr().Interface()); err != nil {
return err
}
}
case reflect.Map:
sv := reflect.ValueOf(src)
if sv.Kind() != reflect.Map {
return errors.New("dbus.Store: type mismatch")
}
keys := sv.MapKeys()
rv.Set(reflect.MakeMap(sv.Type()))
for _, key := range keys {
v := reflect.New(sv.Type().Elem())
if err := store(v, sv.MapIndex(key).Interface()); err != nil {
return err
}
rv.SetMapIndex(key, v.Elem())
}
default:
return errors.New("dbus.Store: type mismatch")
}
return nil
} else {
return errors.New("dbus.Store: type mismatch")
func storeInterfaces(src, dest interface{}) error {
return store(reflect.ValueOf(dest), reflect.ValueOf(src))
}
func store(dest, src reflect.Value) error {
if dest.Kind() == reflect.Ptr {
return store(dest.Elem(), src)
}
switch src.Kind() {
case reflect.Slice:
return storeSlice(dest, src)
case reflect.Map:
return storeMap(dest, src)
default:
return storeBase(dest, src)
}
}
func hasStruct(v interface{}) bool {
t := reflect.TypeOf(v)
for {
switch t.Kind() {
case reflect.Struct:
return true
case reflect.Slice, reflect.Ptr, reflect.Map:
t = t.Elem()
default:
return false
func storeBase(dest, src reflect.Value) error {
return setDest(dest, src)
}
func setDest(dest, src reflect.Value) error {
if !isVariant(src.Type()) && isVariant(dest.Type()) {
//special conversion for dbus.Variant
dest.Set(reflect.ValueOf(MakeVariant(src.Interface())))
return nil
}
if isVariant(src.Type()) && !isVariant(dest.Type()) {
src = getVariantValue(src)
}
if !src.Type().ConvertibleTo(dest.Type()) {
return fmt.Errorf(
"dbus.Store: type mismatch: cannot convert %s to %s",
src.Type(), dest.Type())
}
dest.Set(src.Convert(dest.Type()))
return nil
}
func kindsAreCompatible(dest, src reflect.Type) bool {
switch {
case isVariant(dest):
return true
case dest.Kind() == reflect.Interface:
return true
default:
return dest.Kind() == src.Kind()
}
}
func isConvertibleTo(dest, src reflect.Type) bool {
switch {
case isVariant(dest):
return true
case dest.Kind() == reflect.Interface:
return true
case dest.Kind() == reflect.Slice:
return src.Kind() == reflect.Slice &&
isConvertibleTo(dest.Elem(), src.Elem())
case dest.Kind() == reflect.Struct:
return src == interfacesType
default:
return src.ConvertibleTo(dest)
}
}
func storeMap(dest, src reflect.Value) error {
switch {
case !kindsAreCompatible(dest.Type(), src.Type()):
return fmt.Errorf(
"dbus.Store: type mismatch: "+
"map: cannot store a value of %s into %s",
src.Type(), dest.Type())
case isVariant(dest.Type()):
return storeMapIntoVariant(dest, src)
case dest.Kind() == reflect.Interface:
return storeMapIntoInterface(dest, src)
case isConvertibleTo(dest.Type().Key(), src.Type().Key()) &&
isConvertibleTo(dest.Type().Elem(), src.Type().Elem()):
return storeMapIntoMap(dest, src)
default:
return fmt.Errorf(
"dbus.Store: type mismatch: "+
"map: cannot convert a value of %s into %s",
src.Type(), dest.Type())
}
}
func storeMapIntoVariant(dest, src reflect.Value) error {
dv := reflect.MakeMap(src.Type())
err := store(dv, src)
if err != nil {
return err
}
return storeBase(dest, dv)
}
func storeMapIntoInterface(dest, src reflect.Value) error {
var dv reflect.Value
if isVariant(src.Type().Elem()) {
//Convert variants to interface{} recursively when converting
//to interface{}
dv = reflect.MakeMap(
reflect.MapOf(src.Type().Key(), interfaceType))
} else {
dv = reflect.MakeMap(src.Type())
}
err := store(dv, src)
if err != nil {
return err
}
return storeBase(dest, dv)
}
func storeMapIntoMap(dest, src reflect.Value) error {
if dest.IsNil() {
dest.Set(reflect.MakeMap(dest.Type()))
}
keys := src.MapKeys()
for _, key := range keys {
dkey := key.Convert(dest.Type().Key())
dval := reflect.New(dest.Type().Elem()).Elem()
err := store(dval, getVariantValue(src.MapIndex(key)))
if err != nil {
return err
}
dest.SetMapIndex(dkey, dval)
}
return nil
}
func storeSlice(dest, src reflect.Value) error {
switch {
case src.Type() == interfacesType && dest.Kind() == reflect.Struct:
//The decoder always decodes structs as slices of interface{}
return storeStruct(dest, src)
case !kindsAreCompatible(dest.Type(), src.Type()):
return fmt.Errorf(
"dbus.Store: type mismatch: "+
"slice: cannot store a value of %s into %s",
src.Type(), dest.Type())
case isVariant(dest.Type()):
return storeSliceIntoVariant(dest, src)
case dest.Kind() == reflect.Interface:
return storeSliceIntoInterface(dest, src)
case isConvertibleTo(dest.Type().Elem(), src.Type().Elem()):
return storeSliceIntoSlice(dest, src)
default:
return fmt.Errorf(
"dbus.Store: type mismatch: "+
"slice: cannot convert a value of %s into %s",
src.Type(), dest.Type())
}
}
func storeStruct(dest, src reflect.Value) error {
if isVariant(dest.Type()) {
return storeBase(dest, src)
}
dval := make([]interface{}, 0, dest.NumField())
dtype := dest.Type()
for i := 0; i < dest.NumField(); i++ {
field := dest.Field(i)
ftype := dtype.Field(i)
if ftype.PkgPath != "" {
continue
}
if ftype.Tag.Get("dbus") == "-" {
continue
}
dval = append(dval, field.Addr().Interface())
}
if src.Len() != len(dval) {
return fmt.Errorf(
"dbus.Store: type mismatch: "+
"destination struct does not have "+
"enough fields need: %d have: %d",
src.Len(), len(dval))
}
return Store(src.Interface().([]interface{}), dval...)
}
func storeSliceIntoVariant(dest, src reflect.Value) error {
dv := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
err := store(dv, src)
if err != nil {
return err
}
return storeBase(dest, dv)
}
func storeSliceIntoInterface(dest, src reflect.Value) error {
var dv reflect.Value
if isVariant(src.Type().Elem()) {
//Convert variants to interface{} recursively when converting
//to interface{}
dv = reflect.MakeSlice(reflect.SliceOf(interfaceType),
src.Len(), src.Cap())
} else {
dv = reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
}
err := store(dv, src)
if err != nil {
return err
}
return storeBase(dest, dv)
}
func storeSliceIntoSlice(dest, src reflect.Value) error {
if dest.IsNil() || dest.Len() < src.Len() {
dest.Set(reflect.MakeSlice(dest.Type(), src.Len(), src.Cap()))
}
if dest.Len() != src.Len() {
return fmt.Errorf(
"dbus.Store: type mismatch: "+
"slices are different lengths "+
"need: %d have: %d",
src.Len(), dest.Len())
}
for i := 0; i < src.Len(); i++ {
err := store(dest.Index(i), getVariantValue(src.Index(i)))
if err != nil {
return err
}
}
return nil
}
func getVariantValue(in reflect.Value) reflect.Value {
if isVariant(in.Type()) {
return reflect.ValueOf(in.Interface().(Variant).Value())
}
return in
}
func isVariant(t reflect.Type) bool {
return t == variantType
}
// An ObjectPath is an object path as defined by the D-Bus spec.
@ -177,15 +346,15 @@ func alignment(t reflect.Type) int {
return 4
case signatureType:
return 1
case interfacesType: // sometimes used for structs
return 8
case interfacesType:
return 4
}
switch t.Kind() {
case reflect.Uint8:
return 1
case reflect.Uint16, reflect.Int16:
return 2
case reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map:
case reflect.Uint, reflect.Int, reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map:
return 4
case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct:
return 8
@ -200,7 +369,7 @@ func isKeyType(t reflect.Type) bool {
switch t.Kind() {
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64,
reflect.String:
reflect.String, reflect.Uint, reflect.Int:
return true
}

291
vendor/github.com/godbus/dbus/default_handler.go generated vendored Normal file
View File

@ -0,0 +1,291 @@
package dbus
import (
"bytes"
"reflect"
"strings"
"sync"
)
func newIntrospectIntf(h *defaultHandler) *exportedIntf {
methods := make(map[string]Method)
methods["Introspect"] = exportedMethod{
reflect.ValueOf(func(msg Message) (string, *Error) {
path := msg.Headers[FieldPath].value.(ObjectPath)
return h.introspectPath(path), nil
}),
}
return newExportedIntf(methods, true)
}
//NewDefaultHandler returns an instance of the default
//call handler. This is useful if you want to implement only
//one of the two handlers but not both.
func NewDefaultHandler() *defaultHandler {
h := &defaultHandler{
objects: make(map[ObjectPath]*exportedObj),
defaultIntf: make(map[string]*exportedIntf),
}
h.defaultIntf["org.freedesktop.DBus.Introspectable"] = newIntrospectIntf(h)
return h
}
type defaultHandler struct {
sync.RWMutex
objects map[ObjectPath]*exportedObj
defaultIntf map[string]*exportedIntf
}
func (h *defaultHandler) PathExists(path ObjectPath) bool {
_, ok := h.objects[path]
return ok
}
func (h *defaultHandler) introspectPath(path ObjectPath) string {
subpath := make(map[string]struct{})
var xml bytes.Buffer
xml.WriteString("<node>")
for obj, _ := range h.objects {
p := string(path)
if p != "/" {
p += "/"
}
if strings.HasPrefix(string(obj), p) {
node_name := strings.Split(string(obj[len(p):]), "/")[0]
subpath[node_name] = struct{}{}
}
}
for s, _ := range subpath {
xml.WriteString("\n\t<node name=\"" + s + "\"/>")
}
xml.WriteString("\n</node>")
return xml.String()
}
func (h *defaultHandler) LookupObject(path ObjectPath) (ServerObject, bool) {
h.RLock()
defer h.RUnlock()
object, ok := h.objects[path]
if ok {
return object, ok
}
// If an object wasn't found for this exact path,
// look for a matching subtree registration
subtreeObject := newExportedObject()
path = path[:strings.LastIndex(string(path), "/")]
for len(path) > 0 {
object, ok = h.objects[path]
if ok {
for name, iface := range object.interfaces {
// Only include this handler if it registered for the subtree
if iface.isFallbackInterface() {
subtreeObject.interfaces[name] = iface
}
}
break
}
path = path[:strings.LastIndex(string(path), "/")]
}
for name, intf := range h.defaultIntf {
if _, exists := subtreeObject.interfaces[name]; exists {
continue
}
subtreeObject.interfaces[name] = intf
}
return subtreeObject, true
}
func (h *defaultHandler) AddObject(path ObjectPath, object *exportedObj) {
h.Lock()
h.objects[path] = object
h.Unlock()
}
func (h *defaultHandler) DeleteObject(path ObjectPath) {
h.Lock()
delete(h.objects, path)
h.Unlock()
}
type exportedMethod struct {
reflect.Value
}
func (m exportedMethod) Call(args ...interface{}) ([]interface{}, error) {
t := m.Type()
params := make([]reflect.Value, len(args))
for i := 0; i < len(args); i++ {
params[i] = reflect.ValueOf(args[i]).Elem()
}
ret := m.Value.Call(params)
err := ret[t.NumOut()-1].Interface().(*Error)
ret = ret[:t.NumOut()-1]
out := make([]interface{}, len(ret))
for i, val := range ret {
out[i] = val.Interface()
}
if err == nil {
//concrete type to interface nil is a special case
return out, nil
}
return out, err
}
func (m exportedMethod) NumArguments() int {
return m.Value.Type().NumIn()
}
func (m exportedMethod) ArgumentValue(i int) interface{} {
return reflect.Zero(m.Type().In(i)).Interface()
}
func (m exportedMethod) NumReturns() int {
return m.Value.Type().NumOut()
}
func (m exportedMethod) ReturnValue(i int) interface{} {
return reflect.Zero(m.Type().Out(i)).Interface()
}
func newExportedObject() *exportedObj {
return &exportedObj{
interfaces: make(map[string]*exportedIntf),
}
}
type exportedObj struct {
interfaces map[string]*exportedIntf
}
func (obj *exportedObj) LookupInterface(name string) (Interface, bool) {
if name == "" {
return obj, true
}
intf, exists := obj.interfaces[name]
return intf, exists
}
func (obj *exportedObj) AddInterface(name string, iface *exportedIntf) {
obj.interfaces[name] = iface
}
func (obj *exportedObj) DeleteInterface(name string) {
delete(obj.interfaces, name)
}
func (obj *exportedObj) LookupMethod(name string) (Method, bool) {
for _, intf := range obj.interfaces {
method, exists := intf.LookupMethod(name)
if exists {
return method, exists
}
}
return nil, false
}
func (obj *exportedObj) isFallbackInterface() bool {
return false
}
func newExportedIntf(methods map[string]Method, includeSubtree bool) *exportedIntf {
return &exportedIntf{
methods: methods,
includeSubtree: includeSubtree,
}
}
type exportedIntf struct {
methods map[string]Method
// Whether or not this export is for the entire subtree
includeSubtree bool
}
func (obj *exportedIntf) LookupMethod(name string) (Method, bool) {
out, exists := obj.methods[name]
return out, exists
}
func (obj *exportedIntf) isFallbackInterface() bool {
return obj.includeSubtree
}
//NewDefaultSignalHandler returns an instance of the default
//signal handler. This is useful if you want to implement only
//one of the two handlers but not both.
func NewDefaultSignalHandler() *defaultSignalHandler {
return &defaultSignalHandler{}
}
func isDefaultSignalHandler(handler SignalHandler) bool {
_, ok := handler.(*defaultSignalHandler)
return ok
}
type defaultSignalHandler struct {
sync.RWMutex
closed bool
signals []chan<- *Signal
}
func (sh *defaultSignalHandler) DeliverSignal(intf, name string, signal *Signal) {
go func() {
sh.RLock()
defer sh.RUnlock()
if sh.closed {
return
}
for _, ch := range sh.signals {
ch <- signal
}
}()
}
func (sh *defaultSignalHandler) Init() error {
sh.Lock()
sh.signals = make([]chan<- *Signal, 0)
sh.Unlock()
return nil
}
func (sh *defaultSignalHandler) Terminate() {
sh.Lock()
sh.closed = true
for _, ch := range sh.signals {
close(ch)
}
sh.signals = nil
sh.Unlock()
}
func (sh *defaultSignalHandler) addSignal(ch chan<- *Signal) {
sh.Lock()
defer sh.Unlock()
if sh.closed {
return
}
sh.signals = append(sh.signals, ch)
}
func (sh *defaultSignalHandler) removeSignal(ch chan<- *Signal) {
sh.Lock()
defer sh.Unlock()
if sh.closed {
return
}
for i := len(sh.signals) - 1; i >= 0; i-- {
if ch == sh.signals[i] {
copy(sh.signals[i:], sh.signals[i+1:])
sh.signals[len(sh.signals)-1] = nil
sh.signals = sh.signals[:len(sh.signals)-1]
}
}
}

View File

@ -19,6 +19,8 @@ respective D-Bus equivalents:
bool | BOOLEAN
int16 | INT16
uint16 | UINT16
int | INT32
uint | UINT32
int32 | INT32
uint32 | UINT32
int64 | INT64
@ -28,6 +30,7 @@ respective D-Bus equivalents:
ObjectPath | OBJECT_PATH
Signature | SIGNATURE
Variant | VARIANT
interface{} | VARIANT
UnixFDIndex | UNIX_FD
Slices and arrays encode as ARRAYs of their element type.
@ -41,6 +44,9 @@ be skipped.
Pointers encode as the value they're pointed to.
Types convertible to one of the base types above will be mapped as the
base type.
Trying to encode any other type or a slice, map or struct containing an
unsupported type will result in an InvalidTypeError.

View File

@ -96,10 +96,10 @@ func (enc *encoder) encode(v reflect.Value, depth int) {
case reflect.Uint16:
enc.binwrite(uint16(v.Uint()))
enc.pos += 2
case reflect.Int32:
case reflect.Int, reflect.Int32:
enc.binwrite(int32(v.Int()))
enc.pos += 4
case reflect.Uint32:
case reflect.Uint, reflect.Uint32:
enc.binwrite(uint32(v.Uint()))
enc.pos += 4
case reflect.Int64:
@ -202,6 +202,8 @@ func (enc *encoder) encode(v reflect.Value, depth int) {
panic(err)
}
enc.pos += length
case reflect.Interface:
enc.encode(reflect.ValueOf(MakeVariant(v.Interface())), depth)
default:
panic(InvalidTypeError{v.Type()})
}

View File

@ -1,7 +1,6 @@
package dbus
import (
"bytes"
"errors"
"fmt"
"reflect"
@ -9,32 +8,29 @@ import (
)
var (
errmsgInvalidArg = Error{
ErrMsgInvalidArg = Error{
"org.freedesktop.DBus.Error.InvalidArgs",
[]interface{}{"Invalid type / number of args"},
}
errmsgNoObject = Error{
ErrMsgNoObject = Error{
"org.freedesktop.DBus.Error.NoSuchObject",
[]interface{}{"No such object"},
}
errmsgUnknownMethod = Error{
ErrMsgUnknownMethod = Error{
"org.freedesktop.DBus.Error.UnknownMethod",
[]interface{}{"Unknown / invalid method"},
}
ErrMsgUnknownInterface = Error{
"org.freedesktop.DBus.Error.UnknownInterface",
[]interface{}{"Object does not implement the interface"},
}
)
// exportedObj represents an exported object. It stores a precomputed
// method table that represents the methods exported on the bus.
type exportedObj struct {
methods map[string]reflect.Value
// Whether or not this export is for the entire subtree
includeSubtree bool
}
func (obj exportedObj) Method(name string) (reflect.Value, bool) {
out, exists := obj.methods[name]
return out, exists
func MakeFailedError(err error) *Error {
return &Error{
"org.freedesktop.DBus.Error.Failed",
[]interface{}{err.Error()},
}
}
// Sender is a type which can be used in exported methods to receive the message
@ -63,7 +59,7 @@ func getMethods(in interface{}, mapping map[string]string) map[string]reflect.Va
// only track valid methods must return *Error as last arg
// and must be exported
if t.NumOut() == 0 ||
t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) ||
t.Out(t.NumOut()-1) != reflect.TypeOf(&ErrMsgInvalidArg) ||
methtype.PkgPath != "" {
continue
}
@ -73,119 +69,12 @@ func getMethods(in interface{}, mapping map[string]string) map[string]reflect.Va
return methods
}
// searchHandlers will look through all registered handlers looking for one
// to handle the given path. If a verbatim one isn't found, it will check for
// a subtree registration for the path as well.
func (conn *Conn) searchHandlers(path ObjectPath) (map[string]exportedObj, bool) {
conn.handlersLck.RLock()
defer conn.handlersLck.RUnlock()
func standardMethodArgumentDecode(m Method, sender string, msg *Message, body []interface{}) ([]interface{}, error) {
pointers := make([]interface{}, m.NumArguments())
decode := make([]interface{}, 0, len(body))
handlers, ok := conn.handlers[path]
if ok {
return handlers, ok
}
// If handlers weren't found for this exact path, look for a matching subtree
// registration
handlers = make(map[string]exportedObj)
path = path[:strings.LastIndex(string(path), "/")]
for len(path) > 0 {
var subtreeHandlers map[string]exportedObj
subtreeHandlers, ok = conn.handlers[path]
if ok {
for iface, handler := range subtreeHandlers {
// Only include this handler if it registered for the subtree
if handler.includeSubtree {
handlers[iface] = handler
}
}
break
}
path = path[:strings.LastIndex(string(path), "/")]
}
return handlers, ok
}
// handleCall handles the given method call (i.e. looks if it's one of the
// pre-implemented ones and searches for a corresponding handler if not).
func (conn *Conn) handleCall(msg *Message) {
name := msg.Headers[FieldMember].value.(string)
path := msg.Headers[FieldPath].value.(ObjectPath)
ifaceName, hasIface := msg.Headers[FieldInterface].value.(string)
sender, hasSender := msg.Headers[FieldSender].value.(string)
serial := msg.serial
if ifaceName == "org.freedesktop.DBus.Peer" {
switch name {
case "Ping":
conn.sendReply(sender, serial)
case "GetMachineId":
conn.sendReply(sender, serial, conn.uuid)
default:
conn.sendError(errmsgUnknownMethod, sender, serial)
}
return
} else if ifaceName == "org.freedesktop.DBus.Introspectable" && name == "Introspect" {
if _, ok := conn.handlers[path]; !ok {
subpath := make(map[string]struct{})
var xml bytes.Buffer
xml.WriteString("<node>")
for h, _ := range conn.handlers {
p := string(path)
if p != "/" {
p += "/"
}
if strings.HasPrefix(string(h), p) {
node_name := strings.Split(string(h[len(p):]), "/")[0]
subpath[node_name] = struct{}{}
}
}
for s, _ := range subpath {
xml.WriteString("\n\t<node name=\"" + s + "\"/>")
}
xml.WriteString("\n</node>")
conn.sendReply(sender, serial, xml.String())
return
}
}
if len(name) == 0 {
conn.sendError(errmsgUnknownMethod, sender, serial)
}
// Find the exported handler (if any) for this path
handlers, ok := conn.searchHandlers(path)
if !ok {
conn.sendError(errmsgNoObject, sender, serial)
return
}
var m reflect.Value
var exists bool
if hasIface {
iface := handlers[ifaceName]
m, exists = iface.Method(name)
} else {
for _, v := range handlers {
m, exists = v.Method(name)
if exists {
break
}
}
}
if !exists {
conn.sendError(errmsgUnknownMethod, sender, serial)
return
}
t := m.Type()
vs := msg.Body
pointers := make([]interface{}, t.NumIn())
decode := make([]interface{}, 0, len(vs))
for i := 0; i < t.NumIn(); i++ {
tp := t.In(i)
for i := 0; i < m.NumArguments(); i++ {
tp := reflect.TypeOf(m.ArgumentValue(i))
val := reflect.New(tp)
pointers[i] = val.Interface()
if tp == reflect.TypeOf((*Sender)(nil)).Elem() {
@ -197,26 +86,73 @@ func (conn *Conn) handleCall(msg *Message) {
}
}
if len(decode) != len(vs) {
conn.sendError(errmsgInvalidArg, sender, serial)
if len(decode) != len(body) {
return nil, ErrMsgInvalidArg
}
if err := Store(body, decode...); err != nil {
return nil, ErrMsgInvalidArg
}
return pointers, nil
}
func (conn *Conn) decodeArguments(m Method, sender string, msg *Message) ([]interface{}, error) {
if decoder, ok := m.(ArgumentDecoder); ok {
return decoder.DecodeArguments(conn, sender, msg, msg.Body)
}
return standardMethodArgumentDecode(m, sender, msg, msg.Body)
}
// handleCall handles the given method call (i.e. looks if it's one of the
// pre-implemented ones and searches for a corresponding handler if not).
func (conn *Conn) handleCall(msg *Message) {
name := msg.Headers[FieldMember].value.(string)
path := msg.Headers[FieldPath].value.(ObjectPath)
ifaceName, _ := msg.Headers[FieldInterface].value.(string)
sender, hasSender := msg.Headers[FieldSender].value.(string)
serial := msg.serial
if ifaceName == "org.freedesktop.DBus.Peer" {
switch name {
case "Ping":
conn.sendReply(sender, serial)
case "GetMachineId":
conn.sendReply(sender, serial, conn.uuid)
default:
conn.sendError(ErrMsgUnknownMethod, sender, serial)
}
return
}
if len(name) == 0 {
conn.sendError(ErrMsgUnknownMethod, sender, serial)
}
object, ok := conn.handler.LookupObject(path)
if !ok {
conn.sendError(ErrMsgNoObject, sender, serial)
return
}
if err := Store(vs, decode...); err != nil {
conn.sendError(errmsgInvalidArg, sender, serial)
iface, exists := object.LookupInterface(ifaceName)
if !exists {
conn.sendError(ErrMsgUnknownInterface, sender, serial)
return
}
// Extract parameters
params := make([]reflect.Value, len(pointers))
for i := 0; i < len(pointers); i++ {
params[i] = reflect.ValueOf(pointers[i]).Elem()
m, exists := iface.LookupMethod(name)
if !exists {
conn.sendError(ErrMsgUnknownMethod, sender, serial)
return
}
args, err := conn.decodeArguments(m, sender, msg)
if err != nil {
conn.sendError(err, sender, serial)
return
}
// Call method
ret := m.Call(params)
if em := ret[t.NumOut()-1].Interface().(*Error); em != nil {
conn.sendError(*em, sender, serial)
ret, err := m.Call(args...)
if err != nil {
conn.sendError(err, sender, serial)
return
}
@ -229,13 +165,11 @@ func (conn *Conn) handleCall(msg *Message) {
reply.Headers[FieldDestination] = msg.Headers[FieldSender]
}
reply.Headers[FieldReplySerial] = MakeVariant(msg.serial)
reply.Body = make([]interface{}, len(ret)-1)
for i := 0; i < len(ret)-1; i++ {
reply.Body[i] = ret[i].Interface()
}
if len(ret) != 1 {
reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...))
reply.Body = make([]interface{}, len(ret))
for i := 0; i < len(ret); i++ {
reply.Body[i] = ret[i]
}
reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...))
conn.outLck.RLock()
if !conn.closed {
conn.out <- reply
@ -375,7 +309,7 @@ func (conn *Conn) exportMethodTable(methods map[string]interface{}, path ObjectP
t := rval.Type()
// only track valid methods must return *Error as last arg
if t.NumOut() == 0 ||
t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) {
t.Out(t.NumOut()-1) != reflect.TypeOf(&ErrMsgInvalidArg) {
continue
}
out[name] = rval
@ -383,38 +317,49 @@ func (conn *Conn) exportMethodTable(methods map[string]interface{}, path ObjectP
return conn.export(out, path, iface, includeSubtree)
}
func (conn *Conn) unexport(h *defaultHandler, path ObjectPath, iface string) error {
if h.PathExists(path) {
obj := h.objects[path]
obj.DeleteInterface(iface)
if len(obj.interfaces) == 0 {
h.DeleteObject(path)
}
}
return nil
}
// exportWithMap is the worker function for all exports/registrations.
func (conn *Conn) export(methods map[string]reflect.Value, path ObjectPath, iface string, includeSubtree bool) error {
h, ok := conn.handler.(*defaultHandler)
if !ok {
return fmt.Errorf(
`dbus: export only allowed on the default hander handler have %T"`,
conn.handler)
}
if !path.IsValid() {
return fmt.Errorf(`dbus: Invalid path name: "%s"`, path)
}
conn.handlersLck.Lock()
defer conn.handlersLck.Unlock()
// Remove a previous export if the interface is nil
if methods == nil {
if _, ok := conn.handlers[path]; ok {
delete(conn.handlers[path], iface)
if len(conn.handlers[path]) == 0 {
delete(conn.handlers, path)
}
}
return nil
return conn.unexport(h, path, iface)
}
// If this is the first handler for this path, make a new map to hold all
// handlers for this path.
if _, ok := conn.handlers[path]; !ok {
conn.handlers[path] = make(map[string]exportedObj)
if !h.PathExists(path) {
h.AddObject(path, newExportedObject())
}
exportedMethods := make(map[string]Method)
for name, method := range methods {
exportedMethods[name] = exportedMethod{method}
}
// Finally, save this handler
conn.handlers[path][iface] = exportedObj{
methods: methods,
includeSubtree: includeSubtree,
}
obj := h.objects[path]
obj.AddInterface(iface, newExportedIntf(exportedMethods, includeSubtree))
return nil
}

View File

@ -43,7 +43,8 @@ func (o *Object) AddMatchSignal(iface, member string) *Call {
// will be allocated. Otherwise, ch has to be buffered or Go will panic.
//
// If the flags include FlagNoReplyExpected, ch is ignored and a Call structure
// is returned of which only the Err member is valid.
// is returned with any error in Err and a closed channel in Done containing
// the returned Call as it's one entry.
//
// If the method parameter contains a dot ('.'), the part before the last dot
// specifies the interface on which the method is called.
@ -97,11 +98,21 @@ func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface
}
o.conn.outLck.RLock()
defer o.conn.outLck.RUnlock()
done := make(chan *Call, 1)
call := &Call{
Err: nil,
Done: done,
}
defer func() {
call.Done <- call
close(done)
}()
if o.conn.closed {
return &Call{Err: ErrClosed}
call.Err = ErrClosed
return call
}
o.conn.out <- msg
return &Call{Err: nil}
return call
}
// GetProperty calls org.freedesktop.DBus.Properties.GetProperty on the given
@ -125,12 +136,12 @@ func (o *Object) GetProperty(p string) (Variant, error) {
return result, nil
}
// Destination returns the destination that calls on o are sent to.
// Destination returns the destination that calls on (o *Object) are sent to.
func (o *Object) Destination() string {
return o.dest
}
// Path returns the path that calls on o are sent to.
// Path returns the path that calls on (o *Object") are sent to.
func (o *Object) Path() ObjectPath {
return o.path
}

89
vendor/github.com/godbus/dbus/server_interfaces.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
package dbus
// Terminator allows a handler to implement a shutdown mechanism that
// is called when the connection terminates.
type Terminator interface {
Terminate()
}
// Handler is the representation of a D-Bus Application.
//
// The Handler must have a way to lookup objects given
// an ObjectPath. The returned object must implement the
// ServerObject interface.
type Handler interface {
LookupObject(path ObjectPath) (ServerObject, bool)
}
// ServerObject is the representation of an D-Bus Object.
//
// Objects are registered at a path for a given Handler.
// The Objects implement D-Bus interfaces. The semantics
// of Interface lookup is up to the implementation of
// the ServerObject. The ServerObject implementation may
// choose to implement empty string as a valid interface
// represeting all methods or not per the D-Bus specification.
type ServerObject interface {
LookupInterface(name string) (Interface, bool)
}
// An Interface is the representation of a D-Bus Interface.
//
// Interfaces are a grouping of methods implemented by the Objects.
// Interfaces are responsible for routing method calls.
type Interface interface {
LookupMethod(name string) (Method, bool)
}
// A Method represents the exposed methods on D-Bus.
type Method interface {
// Call requires that all arguments are decoded before being passed to it.
Call(args ...interface{}) ([]interface{}, error)
NumArguments() int
NumReturns() int
// ArgumentValue returns a representative value for the argument at position
// it should be of the proper type. reflect.Zero would be a good mechanism
// to use for this Value.
ArgumentValue(position int) interface{}
// ReturnValue returns a representative value for the return at position
// it should be of the proper type. reflect.Zero would be a good mechanism
// to use for this Value.
ReturnValue(position int) interface{}
}
// An Argument Decoder can decode arguments using the non-standard mechanism
//
// If a method implements this interface then the non-standard
// decoder will be used.
//
// Method arguments must be decoded from the message.
// The mechanism for doing this will vary based on the
// implementation of the method. A normal approach is provided
// as part of this library, but may be replaced with
// any other decoding scheme.
type ArgumentDecoder interface {
// To decode the arguments of a method the sender and message are
// provided incase the semantics of the implementer provides access
// to these as part of the method invocation.
DecodeArguments(conn *Conn, sender string, msg *Message, args []interface{}) ([]interface{}, error)
}
// A SignalHandler is responsible for delivering a signal.
//
// Signal delivery may be changed from the default channel
// based approach by Handlers implementing the SignalHandler
// interface.
type SignalHandler interface {
DeliverSignal(iface, name string, signal *Signal)
}
// A DBusError is used to convert a generic object to a D-Bus error.
//
// Any custom error mechanism may implement this interface to provide
// a custom encoding of the error on D-Bus. By default if a normal
// error is returned, it will be encoded as the generic
// "org.freedesktop.DBus.Error.Failed" error. By implementing this
// interface as well a custom encoding may be provided.
type DBusError interface {
DBusError() (string, []interface{})
}

View File

@ -57,12 +57,12 @@ func getSignature(t reflect.Type) string {
return "n"
case reflect.Uint16:
return "q"
case reflect.Int32:
case reflect.Int, reflect.Int32:
if t == unixFDType {
return "h"
}
return "i"
case reflect.Uint32:
case reflect.Uint, reflect.Uint32:
if t == unixFDIndexType {
return "h"
}
@ -101,6 +101,8 @@ func getSignature(t reflect.Type) string {
panic(InvalidTypeError{t})
}
return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}"
case reflect.Interface:
return "v"
}
panic(InvalidTypeError{t})
}
@ -162,7 +164,7 @@ func (e SignatureError) Error() string {
return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason)
}
// Try to read a single type from this string. If it was successfull, err is nil
// Try to read a single type from this string. If it was successful, err is nil
// and rem is the remaining unparsed part. Otherwise, err is a non-nil
// SignatureError and rem is "". depth is the current recursion depth which may
// not be greater than 64 and should be given as 0 on the first call.

View File

@ -4,8 +4,23 @@ import (
"encoding/binary"
"errors"
"io"
"unsafe"
)
var nativeEndian binary.ByteOrder
func detectEndianness() binary.ByteOrder {
var x uint32 = 0x01020304
if *(*byte)(unsafe.Pointer(&x)) == 0x01 {
return binary.BigEndian
}
return binary.LittleEndian
}
func init() {
nativeEndian = detectEndianness()
}
type genericTransport struct {
io.ReadWriteCloser
}
@ -31,5 +46,5 @@ func (t genericTransport) SendMessage(msg *Message) error {
return errors.New("dbus: unix fd passing not enabled")
}
}
return msg.EncodeTo(t, binary.LittleEndian)
return msg.EncodeTo(t, nativeEndian)
}

View File

@ -175,7 +175,7 @@ func (t *unixTransport) SendMessage(msg *Message) error {
msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds)))
oob := syscall.UnixRights(fds...)
buf := new(bytes.Buffer)
msg.EncodeTo(buf, binary.LittleEndian)
msg.EncodeTo(buf, nativeEndian)
n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil)
if err != nil {
return err
@ -184,7 +184,7 @@ func (t *unixTransport) SendMessage(msg *Message) error {
return io.ErrShortWrite
}
} else {
if err := msg.EncodeTo(t, binary.LittleEndian); err != nil {
if err := msg.EncodeTo(t, nativeEndian); err != nil {
return nil
}
}

Some files were not shown because too many files have changed in this diff Show More