Switch to github.com/golang/dep for vendoring
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
parent
d6ab91be27
commit
8e5b17cf13
15431 changed files with 3971413 additions and 8881 deletions
150
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/BUILD
generated
vendored
Normal file
150
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,150 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"container_gc.go",
|
||||
"convert.go",
|
||||
"docker.go",
|
||||
"docker_manager.go",
|
||||
"docker_manager_linux.go",
|
||||
"exec.go",
|
||||
"fake_docker_client.go",
|
||||
"fake_manager.go",
|
||||
"images.go",
|
||||
"instrumented_docker.go",
|
||||
"kube_docker_client.go",
|
||||
"labels.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/credentialprovider:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/custommetrics:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/kubelet/images:go_default_library",
|
||||
"//pkg/kubelet/leaky:go_default_library",
|
||||
"//pkg/kubelet/lifecycle:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
"//pkg/kubelet/network/hairpin:go_default_library",
|
||||
"//pkg/kubelet/prober/results:go_default_library",
|
||||
"//pkg/kubelet/qos:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/kubelet/util/cache:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/oom:go_default_library",
|
||||
"//pkg/util/procfs:go_default_library",
|
||||
"//pkg/util/selinux:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/util/term:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//vendor:github.com/docker/distribution/digest",
|
||||
"//vendor:github.com/docker/distribution/reference",
|
||||
"//vendor:github.com/docker/docker/pkg/jsonmessage",
|
||||
"//vendor:github.com/docker/docker/pkg/stdcopy",
|
||||
"//vendor:github.com/docker/engine-api/client",
|
||||
"//vendor:github.com/docker/engine-api/types",
|
||||
"//vendor:github.com/docker/engine-api/types/container",
|
||||
"//vendor:github.com/docker/engine-api/types/strslice",
|
||||
"//vendor:github.com/docker/engine-api/types/versions",
|
||||
"//vendor:github.com/docker/go-connections/nat",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/google/cadvisor/info/v1",
|
||||
"//vendor:golang.org/x/net/context",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"container_gc_test.go",
|
||||
"convert_test.go",
|
||||
"docker_manager_linux_test.go",
|
||||
"docker_manager_test.go",
|
||||
"docker_test.go",
|
||||
"images_test.go",
|
||||
"labels_test.go",
|
||||
],
|
||||
data = [
|
||||
"fixtures/seccomp/sub/subtest",
|
||||
"fixtures/seccomp/test",
|
||||
],
|
||||
library = ":go_default_library",
|
||||
tags = [
|
||||
"automanaged",
|
||||
],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/credentialprovider:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/kubelet/images:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
"//pkg/kubelet/network/mock_network:go_default_library",
|
||||
"//pkg/kubelet/network/testing:go_default_library",
|
||||
"//pkg/kubelet/prober/results:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/hash:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//vendor:github.com/docker/docker/pkg/jsonmessage",
|
||||
"//vendor:github.com/docker/engine-api/types",
|
||||
"//vendor:github.com/docker/engine-api/types/container",
|
||||
"//vendor:github.com/docker/engine-api/types/strslice",
|
||||
"//vendor:github.com/docker/go-connections/nat",
|
||||
"//vendor:github.com/golang/mock/gomock",
|
||||
"//vendor:github.com/google/cadvisor/info/v1",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
283
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/container_gc.go
generated
vendored
Normal file
283
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/container_gc.go
generated
vendored
Normal file
|
@ -0,0 +1,283 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
type containerGC struct {
|
||||
client DockerInterface
|
||||
podGetter podGetter
|
||||
containerLogsDir string
|
||||
}
|
||||
|
||||
func NewContainerGC(client DockerInterface, podGetter podGetter, containerLogsDir string) *containerGC {
|
||||
return &containerGC{
|
||||
client: client,
|
||||
podGetter: podGetter,
|
||||
containerLogsDir: containerLogsDir,
|
||||
}
|
||||
}
|
||||
|
||||
// Internal information kept for containers being considered for GC.
|
||||
type containerGCInfo struct {
|
||||
// Docker ID of the container.
|
||||
id string
|
||||
|
||||
// Docker name of the container.
|
||||
name string
|
||||
|
||||
// Creation time for the container.
|
||||
createTime time.Time
|
||||
|
||||
// Full pod name, including namespace in the format `namespace_podName`.
|
||||
// This comes from dockertools.ParseDockerName(...)
|
||||
podNameWithNamespace string
|
||||
|
||||
// Container name in pod
|
||||
containerName string
|
||||
}
|
||||
|
||||
// Containers are considered for eviction as units of (UID, container name) pair.
|
||||
type evictUnit struct {
|
||||
// UID of the pod.
|
||||
uid types.UID
|
||||
|
||||
// Name of the container in the pod.
|
||||
name string
|
||||
}
|
||||
|
||||
type containersByEvictUnit map[evictUnit][]containerGCInfo
|
||||
|
||||
// Returns the number of containers in this map.
|
||||
func (cu containersByEvictUnit) NumContainers() int {
|
||||
num := 0
|
||||
for key := range cu {
|
||||
num += len(cu[key])
|
||||
}
|
||||
|
||||
return num
|
||||
}
|
||||
|
||||
// Returns the number of pod in this map.
|
||||
func (cu containersByEvictUnit) NumEvictUnits() int {
|
||||
return len(cu)
|
||||
}
|
||||
|
||||
// Newest first.
|
||||
type byCreated []containerGCInfo
|
||||
|
||||
func (a byCreated) Len() int { return len(a) }
|
||||
func (a byCreated) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byCreated) Less(i, j int) bool { return a[i].createTime.After(a[j].createTime) }
|
||||
|
||||
func (cgc *containerGC) enforceMaxContainersPerEvictUnit(evictUnits containersByEvictUnit, MaxContainers int) {
|
||||
for uid := range evictUnits {
|
||||
toRemove := len(evictUnits[uid]) - MaxContainers
|
||||
|
||||
if toRemove > 0 {
|
||||
evictUnits[uid] = cgc.removeOldestN(evictUnits[uid], toRemove)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Removes the oldest toRemove containers and returns the resulting slice.
|
||||
func (cgc *containerGC) removeOldestN(containers []containerGCInfo, toRemove int) []containerGCInfo {
|
||||
// Remove from oldest to newest (last to first).
|
||||
numToKeep := len(containers) - toRemove
|
||||
for i := numToKeep; i < len(containers); i++ {
|
||||
cgc.removeContainer(containers[i].id, containers[i].podNameWithNamespace, containers[i].containerName)
|
||||
}
|
||||
|
||||
// Assume we removed the containers so that we're not too aggressive.
|
||||
return containers[:numToKeep]
|
||||
}
|
||||
|
||||
// Get all containers that are evictable. Evictable containers are: not running
|
||||
// and created more than MinAge ago.
|
||||
func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByEvictUnit, []containerGCInfo, error) {
|
||||
containers, err := GetKubeletDockerContainers(cgc.client, true)
|
||||
if err != nil {
|
||||
return containersByEvictUnit{}, []containerGCInfo{}, err
|
||||
}
|
||||
|
||||
unidentifiedContainers := make([]containerGCInfo, 0)
|
||||
evictUnits := make(containersByEvictUnit)
|
||||
newestGCTime := time.Now().Add(-minAge)
|
||||
for _, container := range containers {
|
||||
// Prune out running containers.
|
||||
data, err := cgc.client.InspectContainer(container.ID)
|
||||
if err != nil {
|
||||
// Container may have been removed already, skip.
|
||||
continue
|
||||
} else if data.State.Running {
|
||||
continue
|
||||
}
|
||||
|
||||
created, err := ParseDockerTimestamp(data.Created)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to parse Created timestamp %q for container %q", data.Created, container.ID)
|
||||
}
|
||||
if newestGCTime.Before(created) {
|
||||
continue
|
||||
}
|
||||
|
||||
containerInfo := containerGCInfo{
|
||||
id: container.ID,
|
||||
name: container.Names[0],
|
||||
createTime: created,
|
||||
}
|
||||
|
||||
containerName, _, err := ParseDockerName(container.Names[0])
|
||||
|
||||
if err != nil {
|
||||
unidentifiedContainers = append(unidentifiedContainers, containerInfo)
|
||||
} else {
|
||||
key := evictUnit{
|
||||
uid: containerName.PodUID,
|
||||
name: containerName.ContainerName,
|
||||
}
|
||||
containerInfo.podNameWithNamespace = containerName.PodFullName
|
||||
containerInfo.containerName = containerName.ContainerName
|
||||
evictUnits[key] = append(evictUnits[key], containerInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort the containers by age.
|
||||
for uid := range evictUnits {
|
||||
sort.Sort(byCreated(evictUnits[uid]))
|
||||
}
|
||||
|
||||
return evictUnits, unidentifiedContainers, nil
|
||||
}
|
||||
|
||||
// GarbageCollect removes dead containers using the specified container gc policy
|
||||
func (cgc *containerGC) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool) error {
|
||||
// Separate containers by evict units.
|
||||
evictUnits, unidentifiedContainers, err := cgc.evictableContainers(gcPolicy.MinAge)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove unidentified containers.
|
||||
for _, container := range unidentifiedContainers {
|
||||
glog.Infof("Removing unidentified dead container %q with ID %q", container.name, container.id)
|
||||
err = cgc.client.RemoveContainer(container.id, dockertypes.ContainerRemoveOptions{RemoveVolumes: true})
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to remove unidentified dead container %q: %v", container.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove deleted pod containers if all sources are ready.
|
||||
if allSourcesReady {
|
||||
for key, unit := range evictUnits {
|
||||
if cgc.isPodDeleted(key.uid) {
|
||||
cgc.removeOldestN(unit, len(unit)) // Remove all.
|
||||
delete(evictUnits, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enforce max containers per evict unit.
|
||||
if gcPolicy.MaxPerPodContainer >= 0 {
|
||||
cgc.enforceMaxContainersPerEvictUnit(evictUnits, gcPolicy.MaxPerPodContainer)
|
||||
}
|
||||
|
||||
// Enforce max total number of containers.
|
||||
if gcPolicy.MaxContainers >= 0 && evictUnits.NumContainers() > gcPolicy.MaxContainers {
|
||||
// Leave an equal number of containers per evict unit (min: 1).
|
||||
numContainersPerEvictUnit := gcPolicy.MaxContainers / evictUnits.NumEvictUnits()
|
||||
if numContainersPerEvictUnit < 1 {
|
||||
numContainersPerEvictUnit = 1
|
||||
}
|
||||
cgc.enforceMaxContainersPerEvictUnit(evictUnits, numContainersPerEvictUnit)
|
||||
|
||||
// If we still need to evict, evict oldest first.
|
||||
numContainers := evictUnits.NumContainers()
|
||||
if numContainers > gcPolicy.MaxContainers {
|
||||
flattened := make([]containerGCInfo, 0, numContainers)
|
||||
for uid := range evictUnits {
|
||||
flattened = append(flattened, evictUnits[uid]...)
|
||||
}
|
||||
sort.Sort(byCreated(flattened))
|
||||
|
||||
cgc.removeOldestN(flattened, numContainers-gcPolicy.MaxContainers)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove dead symlinks - should only happen on upgrade
|
||||
// from a k8s version without proper log symlink cleanup
|
||||
logSymlinks, _ := filepath.Glob(path.Join(cgc.containerLogsDir, fmt.Sprintf("*.%s", LogSuffix)))
|
||||
for _, logSymlink := range logSymlinks {
|
||||
if _, err = os.Stat(logSymlink); os.IsNotExist(err) {
|
||||
err = os.Remove(logSymlink)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to remove container log dead symlink %q: %v", logSymlink, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cgc *containerGC) removeContainer(id string, podNameWithNamespace string, containerName string) {
|
||||
glog.V(4).Infof("Removing container %q name %q", id, containerName)
|
||||
err := cgc.client.RemoveContainer(id, dockertypes.ContainerRemoveOptions{RemoveVolumes: true})
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to remove container %q: %v", id, err)
|
||||
}
|
||||
symlinkPath := LogSymlink(cgc.containerLogsDir, podNameWithNamespace, containerName, id)
|
||||
err = os.Remove(symlinkPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Warningf("Failed to remove container %q log symlink %q: %v", id, symlinkPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (cgc *containerGC) deleteContainer(id string) error {
|
||||
containerInfo, err := cgc.client.InspectContainer(id)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to inspect container %q: %v", id, err)
|
||||
return err
|
||||
}
|
||||
if containerInfo.State.Running {
|
||||
return fmt.Errorf("container %q is still running", id)
|
||||
}
|
||||
|
||||
containerName, _, err := ParseDockerName(containerInfo.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cgc.removeContainer(id, containerName.PodFullName, containerName.ContainerName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cgc *containerGC) isPodDeleted(podUID types.UID) bool {
|
||||
_, found := cgc.podGetter.GetPodByUID(podUID)
|
||||
return !found
|
||||
}
|
269
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/container_gc_test.go
generated
vendored
Normal file
269
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/container_gc_test.go
generated
vendored
Normal file
|
@ -0,0 +1,269 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
func newTestContainerGC(t *testing.T) (*containerGC, *FakeDockerClient) {
|
||||
fakeDocker := new(FakeDockerClient)
|
||||
fakePodGetter := newFakePodGetter()
|
||||
gc := NewContainerGC(fakeDocker, fakePodGetter, "")
|
||||
return gc, fakeDocker
|
||||
}
|
||||
|
||||
// Makes a stable time object, lower id is earlier time.
|
||||
func makeTime(id int) time.Time {
|
||||
var zero time.Time
|
||||
return zero.Add(time.Duration(id) * time.Second)
|
||||
}
|
||||
|
||||
// Makes a container with the specified properties.
|
||||
func makeContainer(id, uid, name string, running bool, created time.Time) *FakeContainer {
|
||||
return &FakeContainer{
|
||||
Name: fmt.Sprintf("/k8s_%s_bar_new_%s_42", name, uid),
|
||||
Running: running,
|
||||
ID: id,
|
||||
CreatedAt: created,
|
||||
}
|
||||
}
|
||||
|
||||
// Makes a container with unidentified name and specified properties.
|
||||
func makeUndefinedContainer(id string, running bool, created time.Time) *FakeContainer {
|
||||
return &FakeContainer{
|
||||
Name: "/k8s_unidentified",
|
||||
Running: running,
|
||||
ID: id,
|
||||
CreatedAt: created,
|
||||
}
|
||||
}
|
||||
|
||||
func addPods(podGetter podGetter, podUIDs ...types.UID) {
|
||||
fakePodGetter := podGetter.(*fakePodGetter)
|
||||
for _, uid := range podUIDs {
|
||||
fakePodGetter.pods[uid] = &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pod" + string(uid),
|
||||
Namespace: "test",
|
||||
UID: uid,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyStringArrayEqualsAnyOrder(t *testing.T, actual, expected []string) {
|
||||
act := make([]string, len(actual))
|
||||
exp := make([]string, len(expected))
|
||||
copy(act, actual)
|
||||
copy(exp, expected)
|
||||
|
||||
sort.StringSlice(act).Sort()
|
||||
sort.StringSlice(exp).Sort()
|
||||
|
||||
if !reflect.DeepEqual(exp, act) {
|
||||
t.Errorf("Expected(sorted): %#v, Actual(sorted): %#v", exp, act)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteContainerSkipRunningContainer(t *testing.T) {
|
||||
gc, fakeDocker := newTestContainerGC(t)
|
||||
fakeDocker.SetFakeContainers([]*FakeContainer{
|
||||
makeContainer("1876", "foo", "POD", true, makeTime(0)),
|
||||
})
|
||||
addPods(gc.podGetter, "foo")
|
||||
|
||||
assert.Error(t, gc.deleteContainer("1876"))
|
||||
assert.Len(t, fakeDocker.Removed, 0)
|
||||
}
|
||||
|
||||
func TestDeleteContainerRemoveDeadContainer(t *testing.T) {
|
||||
gc, fakeDocker := newTestContainerGC(t)
|
||||
fakeDocker.SetFakeContainers([]*FakeContainer{
|
||||
makeContainer("1876", "foo", "POD", false, makeTime(0)),
|
||||
})
|
||||
addPods(gc.podGetter, "foo")
|
||||
|
||||
assert.Nil(t, gc.deleteContainer("1876"))
|
||||
assert.Len(t, fakeDocker.Removed, 1)
|
||||
}
|
||||
|
||||
func TestGarbageCollectZeroMaxContainers(t *testing.T) {
|
||||
gc, fakeDocker := newTestContainerGC(t)
|
||||
fakeDocker.SetFakeContainers([]*FakeContainer{
|
||||
makeContainer("1876", "foo", "POD", false, makeTime(0)),
|
||||
})
|
||||
addPods(gc.podGetter, "foo")
|
||||
|
||||
assert.Nil(t, gc.GarbageCollect(kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: 1, MaxContainers: 0}, true))
|
||||
assert.Len(t, fakeDocker.Removed, 1)
|
||||
}
|
||||
|
||||
func TestGarbageCollectNoMaxPerPodContainerLimit(t *testing.T) {
|
||||
gc, fakeDocker := newTestContainerGC(t)
|
||||
fakeDocker.SetFakeContainers([]*FakeContainer{
|
||||
makeContainer("1876", "foo", "POD", false, makeTime(0)),
|
||||
makeContainer("2876", "foo1", "POD", false, makeTime(1)),
|
||||
makeContainer("3876", "foo2", "POD", false, makeTime(2)),
|
||||
makeContainer("4876", "foo3", "POD", false, makeTime(3)),
|
||||
makeContainer("5876", "foo4", "POD", false, makeTime(4)),
|
||||
})
|
||||
addPods(gc.podGetter, "foo", "foo1", "foo2", "foo3", "foo4")
|
||||
|
||||
assert.Nil(t, gc.GarbageCollect(kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: 4}, true))
|
||||
assert.Len(t, fakeDocker.Removed, 1)
|
||||
}
|
||||
|
||||
func TestGarbageCollectNoMaxLimit(t *testing.T) {
|
||||
gc, fakeDocker := newTestContainerGC(t)
|
||||
fakeDocker.SetFakeContainers([]*FakeContainer{
|
||||
makeContainer("1876", "foo", "POD", false, makeTime(0)),
|
||||
makeContainer("2876", "foo1", "POD", false, makeTime(0)),
|
||||
makeContainer("3876", "foo2", "POD", false, makeTime(0)),
|
||||
makeContainer("4876", "foo3", "POD", false, makeTime(0)),
|
||||
makeContainer("5876", "foo4", "POD", false, makeTime(0)),
|
||||
})
|
||||
addPods(gc.podGetter, "foo", "foo1", "foo2", "foo3", "foo4")
|
||||
|
||||
assert.Nil(t, gc.GarbageCollect(kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: -1}, true))
|
||||
assert.Len(t, fakeDocker.Removed, 0)
|
||||
}
|
||||
|
||||
func TestGarbageCollect(t *testing.T) {
|
||||
tests := []struct {
|
||||
containers []*FakeContainer
|
||||
expectedRemoved []string
|
||||
}{
|
||||
// Don't remove containers started recently.
|
||||
{
|
||||
containers: []*FakeContainer{
|
||||
makeContainer("1876", "foo", "POD", false, time.Now()),
|
||||
makeContainer("2876", "foo", "POD", false, time.Now()),
|
||||
makeContainer("3876", "foo", "POD", false, time.Now()),
|
||||
},
|
||||
},
|
||||
// Remove oldest containers.
|
||||
{
|
||||
containers: []*FakeContainer{
|
||||
makeContainer("1876", "foo", "POD", false, makeTime(0)),
|
||||
makeContainer("2876", "foo", "POD", false, makeTime(1)),
|
||||
makeContainer("3876", "foo", "POD", false, makeTime(2)),
|
||||
},
|
||||
expectedRemoved: []string{"1876"},
|
||||
},
|
||||
// Only remove non-running containers.
|
||||
{
|
||||
containers: []*FakeContainer{
|
||||
makeContainer("1876", "foo", "POD", true, makeTime(0)),
|
||||
makeContainer("2876", "foo", "POD", false, makeTime(1)),
|
||||
makeContainer("3876", "foo", "POD", false, makeTime(2)),
|
||||
makeContainer("4876", "foo", "POD", false, makeTime(3)),
|
||||
},
|
||||
expectedRemoved: []string{"2876"},
|
||||
},
|
||||
// Less than maxContainerCount doesn't delete any.
|
||||
{
|
||||
containers: []*FakeContainer{
|
||||
makeContainer("1876", "foo", "POD", false, makeTime(0)),
|
||||
},
|
||||
},
|
||||
// maxContainerCount applies per (UID,container) pair.
|
||||
{
|
||||
containers: []*FakeContainer{
|
||||
makeContainer("1876", "foo", "POD", false, makeTime(0)),
|
||||
makeContainer("2876", "foo", "POD", false, makeTime(1)),
|
||||
makeContainer("3876", "foo", "POD", false, makeTime(2)),
|
||||
makeContainer("1076", "foo", "bar", false, makeTime(0)),
|
||||
makeContainer("2076", "foo", "bar", false, makeTime(1)),
|
||||
makeContainer("3076", "foo", "bar", false, makeTime(2)),
|
||||
makeContainer("1176", "foo2", "POD", false, makeTime(0)),
|
||||
makeContainer("2176", "foo2", "POD", false, makeTime(1)),
|
||||
makeContainer("3176", "foo2", "POD", false, makeTime(2)),
|
||||
},
|
||||
expectedRemoved: []string{"1076", "1176", "1876"},
|
||||
},
|
||||
// Remove non-running unidentified Kubernetes containers.
|
||||
{
|
||||
containers: []*FakeContainer{
|
||||
makeUndefinedContainer("1876", true, makeTime(0)),
|
||||
makeUndefinedContainer("2876", false, makeTime(0)),
|
||||
makeContainer("3876", "foo", "POD", false, makeTime(0)),
|
||||
},
|
||||
expectedRemoved: []string{"2876"},
|
||||
},
|
||||
// Max limit applied and tries to keep from every pod.
|
||||
{
|
||||
containers: []*FakeContainer{
|
||||
makeContainer("1876", "foo", "POD", false, makeTime(0)),
|
||||
makeContainer("2876", "foo", "POD", false, makeTime(1)),
|
||||
makeContainer("3876", "foo1", "POD", false, makeTime(0)),
|
||||
makeContainer("4876", "foo1", "POD", false, makeTime(1)),
|
||||
makeContainer("5876", "foo2", "POD", false, makeTime(0)),
|
||||
makeContainer("6876", "foo2", "POD", false, makeTime(1)),
|
||||
makeContainer("7876", "foo3", "POD", false, makeTime(0)),
|
||||
makeContainer("8876", "foo3", "POD", false, makeTime(1)),
|
||||
makeContainer("9876", "foo4", "POD", false, makeTime(0)),
|
||||
makeContainer("10876", "foo4", "POD", false, makeTime(1)),
|
||||
},
|
||||
expectedRemoved: []string{"1876", "3876", "5876", "7876", "9876"},
|
||||
},
|
||||
// If more pods than limit allows, evicts oldest pod.
|
||||
{
|
||||
containers: []*FakeContainer{
|
||||
makeContainer("1876", "foo", "POD", false, makeTime(1)),
|
||||
makeContainer("2876", "foo", "POD", false, makeTime(2)),
|
||||
makeContainer("3876", "foo1", "POD", false, makeTime(1)),
|
||||
makeContainer("4876", "foo1", "POD", false, makeTime(2)),
|
||||
makeContainer("5876", "foo2", "POD", false, makeTime(0)),
|
||||
makeContainer("6876", "foo3", "POD", false, makeTime(1)),
|
||||
makeContainer("7876", "foo4", "POD", false, makeTime(0)),
|
||||
makeContainer("8876", "foo5", "POD", false, makeTime(1)),
|
||||
makeContainer("9876", "foo6", "POD", false, makeTime(2)),
|
||||
makeContainer("10876", "foo7", "POD", false, makeTime(1)),
|
||||
},
|
||||
expectedRemoved: []string{"1876", "3876", "5876", "7876"},
|
||||
},
|
||||
// Containers for deleted pods should be GC'd.
|
||||
{
|
||||
containers: []*FakeContainer{
|
||||
makeContainer("1876", "foo", "POD", false, makeTime(1)),
|
||||
makeContainer("2876", "foo", "POD", false, makeTime(2)),
|
||||
makeContainer("3876", "deleted", "POD", false, makeTime(1)),
|
||||
makeContainer("4876", "deleted", "POD", false, makeTime(2)),
|
||||
makeContainer("5876", "deleted", "POD", false, time.Now()), // Deleted pods still respect MinAge.
|
||||
},
|
||||
expectedRemoved: []string{"3876", "4876"},
|
||||
},
|
||||
}
|
||||
for i, test := range tests {
|
||||
t.Logf("Running test case with index %d", i)
|
||||
gc, fakeDocker := newTestContainerGC(t)
|
||||
fakeDocker.SetFakeContainers(test.containers)
|
||||
addPods(gc.podGetter, "foo", "foo1", "foo2", "foo3", "foo4", "foo5", "foo6", "foo7")
|
||||
assert.Nil(t, gc.GarbageCollect(kubecontainer.ContainerGCPolicy{MinAge: time.Hour, MaxPerPodContainer: 2, MaxContainers: 6}, true))
|
||||
verifyStringArrayEqualsAnyOrder(t, fakeDocker.Removed, test.expectedRemoved)
|
||||
}
|
||||
}
|
84
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/convert.go
generated
vendored
Normal file
84
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/convert.go
generated
vendored
Normal file
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
// This file contains helper functions to convert docker API types to runtime
|
||||
// (kubecontainer) types.
|
||||
const (
|
||||
statusRunningPrefix = "Up"
|
||||
statusExitedPrefix = "Exited"
|
||||
)
|
||||
|
||||
func mapState(state string) kubecontainer.ContainerState {
|
||||
// Parse the state string in dockertypes.Container. This could break when
|
||||
// we upgrade docker.
|
||||
switch {
|
||||
case strings.HasPrefix(state, statusRunningPrefix):
|
||||
return kubecontainer.ContainerStateRunning
|
||||
case strings.HasPrefix(state, statusExitedPrefix):
|
||||
return kubecontainer.ContainerStateExited
|
||||
default:
|
||||
return kubecontainer.ContainerStateUnknown
|
||||
}
|
||||
}
|
||||
|
||||
// Converts dockertypes.Container to kubecontainer.Container.
|
||||
func toRuntimeContainer(c *dockertypes.Container) (*kubecontainer.Container, error) {
|
||||
if c == nil {
|
||||
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container")
|
||||
}
|
||||
|
||||
dockerName, hash, err := getDockerContainerNameInfo(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &kubecontainer.Container{
|
||||
ID: kubecontainer.DockerID(c.ID).ContainerID(),
|
||||
Name: dockerName.ContainerName,
|
||||
Image: c.Image,
|
||||
ImageID: c.ImageID,
|
||||
Hash: hash,
|
||||
// (random-liu) docker uses status to indicate whether a container is running or exited.
|
||||
// However, in kubernetes we usually use state to indicate whether a container is running or exited,
|
||||
// while use status to indicate the comprehensive status of the container. So we have different naming
|
||||
// norm here.
|
||||
State: mapState(c.Status),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Converts dockertypes.Image to kubecontainer.Image.
|
||||
func toRuntimeImage(image *dockertypes.Image) (*kubecontainer.Image, error) {
|
||||
if image == nil {
|
||||
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime image")
|
||||
}
|
||||
|
||||
return &kubecontainer.Image{
|
||||
ID: image.ID,
|
||||
RepoTags: image.RepoTags,
|
||||
RepoDigests: image.RepoDigests,
|
||||
Size: image.VirtualSize,
|
||||
}, nil
|
||||
}
|
90
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/convert_test.go
generated
vendored
Normal file
90
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/convert_test.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
func TestMapState(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input string
|
||||
expected kubecontainer.ContainerState
|
||||
}{
|
||||
{input: "Up 5 hours", expected: kubecontainer.ContainerStateRunning},
|
||||
{input: "Exited (0) 2 hours ago", expected: kubecontainer.ContainerStateExited},
|
||||
{input: "Created", expected: kubecontainer.ContainerStateUnknown},
|
||||
{input: "Random string", expected: kubecontainer.ContainerStateUnknown},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
if actual := mapState(test.input); actual != test.expected {
|
||||
t.Errorf("Test[%d]: expected %q, got %q", i, test.expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestToRuntimeContainer(t *testing.T) {
|
||||
original := &dockertypes.Container{
|
||||
ID: "ab2cdf",
|
||||
Image: "bar_image",
|
||||
Names: []string{"/k8s_bar.5678_foo_ns_1234_42"},
|
||||
Status: "Up 5 hours",
|
||||
}
|
||||
expected := &kubecontainer.Container{
|
||||
ID: kubecontainer.ContainerID{Type: "docker", ID: "ab2cdf"},
|
||||
Name: "bar",
|
||||
Image: "bar_image",
|
||||
Hash: 0x5678,
|
||||
State: kubecontainer.ContainerStateRunning,
|
||||
}
|
||||
|
||||
actual, err := toRuntimeContainer(original)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(expected, actual) {
|
||||
t.Errorf("expected %#v, got %#v", expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToRuntimeImage(t *testing.T) {
|
||||
original := &dockertypes.Image{
|
||||
ID: "aeeea",
|
||||
RepoTags: []string{"abc", "def"},
|
||||
RepoDigests: []string{"123", "456"},
|
||||
VirtualSize: 1234,
|
||||
}
|
||||
expected := &kubecontainer.Image{
|
||||
ID: "aeeea",
|
||||
RepoTags: []string{"abc", "def"},
|
||||
RepoDigests: []string{"123", "456"},
|
||||
Size: 1234,
|
||||
}
|
||||
|
||||
actual, err := toRuntimeImage(original)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(expected, actual) {
|
||||
t.Errorf("expected %#v, got %#v", expected, actual)
|
||||
}
|
||||
}
|
410
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker.go
generated
vendored
Normal file
410
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker.go
generated
vendored
Normal file
|
@ -0,0 +1,410 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
dockerdigest "github.com/docker/distribution/digest"
|
||||
dockerref "github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
dockerapi "github.com/docker/engine-api/client"
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/credentialprovider"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/images"
|
||||
"k8s.io/kubernetes/pkg/kubelet/leaky"
|
||||
)
|
||||
|
||||
const (
|
||||
PodInfraContainerName = leaky.PodInfraContainerName
|
||||
DockerPrefix = "docker://"
|
||||
DockerPullablePrefix = "docker-pullable://"
|
||||
LogSuffix = "log"
|
||||
ext4MaxFileNameLen = 255
|
||||
)
|
||||
|
||||
// DockerInterface is an abstract interface for testability. It abstracts the interface of docker client.
|
||||
type DockerInterface interface {
|
||||
ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error)
|
||||
InspectContainer(id string) (*dockertypes.ContainerJSON, error)
|
||||
CreateContainer(dockertypes.ContainerCreateConfig) (*dockertypes.ContainerCreateResponse, error)
|
||||
StartContainer(id string) error
|
||||
StopContainer(id string, timeout int) error
|
||||
RemoveContainer(id string, opts dockertypes.ContainerRemoveOptions) error
|
||||
InspectImageByRef(imageRef string) (*dockertypes.ImageInspect, error)
|
||||
InspectImageByID(imageID string) (*dockertypes.ImageInspect, error)
|
||||
ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.Image, error)
|
||||
PullImage(image string, auth dockertypes.AuthConfig, opts dockertypes.ImagePullOptions) error
|
||||
RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDelete, error)
|
||||
ImageHistory(id string) ([]dockertypes.ImageHistory, error)
|
||||
Logs(string, dockertypes.ContainerLogsOptions, StreamOptions) error
|
||||
Version() (*dockertypes.Version, error)
|
||||
Info() (*dockertypes.Info, error)
|
||||
CreateExec(string, dockertypes.ExecConfig) (*dockertypes.ContainerExecCreateResponse, error)
|
||||
StartExec(string, dockertypes.ExecStartCheck, StreamOptions) error
|
||||
InspectExec(id string) (*dockertypes.ContainerExecInspect, error)
|
||||
AttachToContainer(string, dockertypes.ContainerAttachOptions, StreamOptions) error
|
||||
ResizeContainerTTY(id string, height, width int) error
|
||||
ResizeExecTTY(id string, height, width int) error
|
||||
}
|
||||
|
||||
// KubeletContainerName encapsulates a pod name and a Kubernetes container name.
|
||||
type KubeletContainerName struct {
|
||||
PodFullName string
|
||||
PodUID types.UID
|
||||
ContainerName string
|
||||
}
|
||||
|
||||
// containerNamePrefix is used to identify the containers on the node managed by this
|
||||
// process.
|
||||
var containerNamePrefix = "k8s"
|
||||
|
||||
// SetContainerNamePrefix allows the container prefix name for this process to be changed.
|
||||
// This is intended to support testing and bootstrapping experimentation. It cannot be
|
||||
// changed once the Kubelet starts.
|
||||
func SetContainerNamePrefix(prefix string) {
|
||||
containerNamePrefix = prefix
|
||||
}
|
||||
|
||||
// DockerPuller is an abstract interface for testability. It abstracts image pull operations.
|
||||
type DockerPuller interface {
|
||||
Pull(image string, secrets []v1.Secret) error
|
||||
GetImageRef(image string) (string, error)
|
||||
}
|
||||
|
||||
// dockerPuller is the default implementation of DockerPuller.
|
||||
type dockerPuller struct {
|
||||
client DockerInterface
|
||||
keyring credentialprovider.DockerKeyring
|
||||
}
|
||||
|
||||
// newDockerPuller creates a new instance of the default implementation of DockerPuller.
|
||||
func newDockerPuller(client DockerInterface) DockerPuller {
|
||||
return &dockerPuller{
|
||||
client: client,
|
||||
keyring: credentialprovider.NewDockerKeyring(),
|
||||
}
|
||||
}
|
||||
|
||||
func filterHTTPError(err error, image string) error {
|
||||
// docker/docker/pull/11314 prints detailed error info for docker pull.
|
||||
// When it hits 502, it returns a verbose html output including an inline svg,
|
||||
// which makes the output of kubectl get pods much harder to parse.
|
||||
// Here converts such verbose output to a concise one.
|
||||
jerr, ok := err.(*jsonmessage.JSONError)
|
||||
if ok && (jerr.Code == http.StatusBadGateway ||
|
||||
jerr.Code == http.StatusServiceUnavailable ||
|
||||
jerr.Code == http.StatusGatewayTimeout) {
|
||||
glog.V(2).Infof("Pulling image %q failed: %v", image, err)
|
||||
return images.RegistryUnavailable
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// matchImageTagOrSHA checks if the given image specifier is a valid image ref,
|
||||
// and that it matches the given image. It should fail on things like image IDs
|
||||
// (config digests) and other digest-only references, but succeed on image names
|
||||
// (`foo`), tag references (`foo:bar`), and manifest digest references
|
||||
// (`foo@sha256:xyz`).
|
||||
func matchImageTagOrSHA(inspected dockertypes.ImageInspect, image string) bool {
|
||||
// The image string follows the grammar specified here
|
||||
// https://github.com/docker/distribution/blob/master/reference/reference.go#L4
|
||||
named, err := dockerref.ParseNamed(image)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("couldn't parse image reference %q: %v", image, err)
|
||||
return false
|
||||
}
|
||||
_, isTagged := named.(dockerref.Tagged)
|
||||
digest, isDigested := named.(dockerref.Digested)
|
||||
if !isTagged && !isDigested {
|
||||
// No Tag or SHA specified, so just return what we have
|
||||
return true
|
||||
}
|
||||
|
||||
if isTagged {
|
||||
// Check the RepoTags for a match.
|
||||
for _, tag := range inspected.RepoTags {
|
||||
// An image name (without the tag/digest) can be [hostname '/'] component ['/' component]*
|
||||
// Because either the RepoTag or the name *may* contain the
|
||||
// hostname or not, we only check for the suffix match.
|
||||
if strings.HasSuffix(image, tag) || strings.HasSuffix(tag, image) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if isDigested {
|
||||
for _, repoDigest := range inspected.RepoDigests {
|
||||
named, err := dockerref.ParseNamed(repoDigest)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("couldn't parse image RepoDigest reference %q: %v", repoDigest, err)
|
||||
continue
|
||||
}
|
||||
if d, isDigested := named.(dockerref.Digested); isDigested {
|
||||
if digest.Digest().Algorithm().String() == d.Digest().Algorithm().String() &&
|
||||
digest.Digest().Hex() == d.Digest().Hex() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// process the ID as a digest
|
||||
id, err := dockerdigest.ParseDigest(inspected.ID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("couldn't parse image ID reference %q: %v", id, err)
|
||||
return false
|
||||
}
|
||||
if digest.Digest().Algorithm().String() == id.Algorithm().String() && digest.Digest().Hex() == id.Hex() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Inspected image (%q) does not match %s", inspected.ID, image)
|
||||
return false
|
||||
}
|
||||
|
||||
// matchImageIDOnly checks that the given image specifier is a digest-only
|
||||
// reference, and that it matches the given image.
|
||||
func matchImageIDOnly(inspected dockertypes.ImageInspect, image string) bool {
|
||||
// If the image ref is literally equal to the inspected image's ID,
|
||||
// just return true here (this might be the case for Docker 1.9,
|
||||
// where we won't have a digest for the ID)
|
||||
if inspected.ID == image {
|
||||
return true
|
||||
}
|
||||
|
||||
// Otherwise, we should try actual parsing to be more correct
|
||||
ref, err := dockerref.Parse(image)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("couldn't parse image reference %q: %v", image, err)
|
||||
return false
|
||||
}
|
||||
|
||||
digest, isDigested := ref.(dockerref.Digested)
|
||||
if !isDigested {
|
||||
glog.V(4).Infof("the image reference %q was not a digest reference")
|
||||
return false
|
||||
}
|
||||
|
||||
id, err := dockerdigest.ParseDigest(inspected.ID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("couldn't parse image ID reference %q: %v", id, err)
|
||||
return false
|
||||
}
|
||||
|
||||
if digest.Digest().Algorithm().String() == id.Algorithm().String() && digest.Digest().Hex() == id.Hex() {
|
||||
return true
|
||||
}
|
||||
|
||||
glog.V(4).Infof("The reference %s does not directly refer to the given image's ID (%q)", image, inspected.ID)
|
||||
return false
|
||||
}
|
||||
|
||||
func (p dockerPuller) Pull(image string, secrets []v1.Secret) error {
|
||||
keyring, err := credentialprovider.MakeDockerKeyring(secrets, p.keyring)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The only used image pull option RegistryAuth will be set in kube_docker_client
|
||||
opts := dockertypes.ImagePullOptions{}
|
||||
|
||||
creds, haveCredentials := keyring.Lookup(image)
|
||||
if !haveCredentials {
|
||||
glog.V(1).Infof("Pulling image %s without credentials", image)
|
||||
|
||||
err := p.client.PullImage(image, dockertypes.AuthConfig{}, opts)
|
||||
if err == nil {
|
||||
// Sometimes PullImage failed with no error returned.
|
||||
imageRef, ierr := p.GetImageRef(image)
|
||||
if ierr != nil {
|
||||
glog.Warningf("Failed to inspect image %s: %v", image, ierr)
|
||||
}
|
||||
if imageRef == "" {
|
||||
return fmt.Errorf("image pull failed for unknown error")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Image spec: [<registry>/]<repository>/<image>[:<version] so we count '/'
|
||||
explicitRegistry := (strings.Count(image, "/") == 2)
|
||||
// Hack, look for a private registry, and decorate the error with the lack of
|
||||
// credentials. This is heuristic, and really probably could be done better
|
||||
// by talking to the registry API directly from the kubelet here.
|
||||
if explicitRegistry {
|
||||
return fmt.Errorf("image pull failed for %s, this may be because there are no credentials on this request. details: (%v)", image, err)
|
||||
}
|
||||
|
||||
return filterHTTPError(err, image)
|
||||
}
|
||||
|
||||
var pullErrs []error
|
||||
for _, currentCreds := range creds {
|
||||
err = p.client.PullImage(image, credentialprovider.LazyProvide(currentCreds), opts)
|
||||
// If there was no error, return success
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
pullErrs = append(pullErrs, filterHTTPError(err, image))
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(pullErrs)
|
||||
}
|
||||
|
||||
func (p dockerPuller) GetImageRef(image string) (string, error) {
|
||||
resp, err := p.client.InspectImageByRef(image)
|
||||
if err == nil {
|
||||
if resp == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
imageRef := resp.ID
|
||||
if len(resp.RepoDigests) > 0 {
|
||||
imageRef = resp.RepoDigests[0]
|
||||
}
|
||||
return imageRef, nil
|
||||
}
|
||||
if _, ok := err.(imageNotFoundError); ok {
|
||||
return "", nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Creates a name which can be reversed to identify both full pod name and container name.
|
||||
// This function returns stable name, unique name and a unique id.
|
||||
// Although rand.Uint32() is not really unique, but it's enough for us because error will
|
||||
// only occur when instances of the same container in the same pod have the same UID. The
|
||||
// chance is really slim.
|
||||
func BuildDockerName(dockerName KubeletContainerName, container *v1.Container) (string, string, string) {
|
||||
containerName := dockerName.ContainerName + "." + strconv.FormatUint(kubecontainer.HashContainer(container), 16)
|
||||
stableName := fmt.Sprintf("%s_%s_%s_%s",
|
||||
containerNamePrefix,
|
||||
containerName,
|
||||
dockerName.PodFullName,
|
||||
dockerName.PodUID)
|
||||
UID := fmt.Sprintf("%08x", rand.Uint32())
|
||||
return stableName, fmt.Sprintf("%s_%s", stableName, UID), UID
|
||||
}
|
||||
|
||||
// Unpacks a container name, returning the pod full name and container name we would have used to
|
||||
// construct the docker name. If we are unable to parse the name, an error is returned.
|
||||
func ParseDockerName(name string) (dockerName *KubeletContainerName, hash uint64, err error) {
|
||||
// For some reason docker appears to be appending '/' to names.
|
||||
// If it's there, strip it.
|
||||
name = strings.TrimPrefix(name, "/")
|
||||
parts := strings.Split(name, "_")
|
||||
if len(parts) == 0 || parts[0] != containerNamePrefix {
|
||||
err = fmt.Errorf("failed to parse Docker container name %q into parts", name)
|
||||
return nil, 0, err
|
||||
}
|
||||
if len(parts) < 6 {
|
||||
// We have at least 5 fields. We may have more in the future.
|
||||
// Anything with less fields than this is not something we can
|
||||
// manage.
|
||||
glog.Warningf("found a container with the %q prefix, but too few fields (%d): %q", containerNamePrefix, len(parts), name)
|
||||
err = fmt.Errorf("Docker container name %q has less parts than expected %v", name, parts)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
nameParts := strings.Split(parts[1], ".")
|
||||
containerName := nameParts[0]
|
||||
if len(nameParts) > 1 {
|
||||
hash, err = strconv.ParseUint(nameParts[1], 16, 32)
|
||||
if err != nil {
|
||||
glog.Warningf("invalid container hash %q in container %q", nameParts[1], name)
|
||||
}
|
||||
}
|
||||
|
||||
podFullName := parts[2] + "_" + parts[3]
|
||||
podUID := types.UID(parts[4])
|
||||
|
||||
return &KubeletContainerName{podFullName, podUID, containerName}, hash, nil
|
||||
}
|
||||
|
||||
func LogSymlink(containerLogsDir, podFullName, containerName, dockerId string) string {
|
||||
suffix := fmt.Sprintf(".%s", LogSuffix)
|
||||
logPath := fmt.Sprintf("%s_%s-%s", podFullName, containerName, dockerId)
|
||||
// Length of a filename cannot exceed 255 characters in ext4 on Linux.
|
||||
if len(logPath) > ext4MaxFileNameLen-len(suffix) {
|
||||
logPath = logPath[:ext4MaxFileNameLen-len(suffix)]
|
||||
}
|
||||
return path.Join(containerLogsDir, logPath+suffix)
|
||||
}
|
||||
|
||||
// Get a *dockerapi.Client, either using the endpoint passed in, or using
|
||||
// DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT path per their spec
|
||||
func getDockerClient(dockerEndpoint string) (*dockerapi.Client, error) {
|
||||
if len(dockerEndpoint) > 0 {
|
||||
glog.Infof("Connecting to docker on %s", dockerEndpoint)
|
||||
return dockerapi.NewClient(dockerEndpoint, "", nil, nil)
|
||||
}
|
||||
return dockerapi.NewEnvClient()
|
||||
}
|
||||
|
||||
// ConnectToDockerOrDie creates docker client connecting to docker daemon.
|
||||
// If the endpoint passed in is "fake://", a fake docker client
|
||||
// will be returned. The program exits if error occurs. The requestTimeout
|
||||
// is the timeout for docker requests. If timeout is exceeded, the request
|
||||
// will be cancelled and throw out an error. If requestTimeout is 0, a default
|
||||
// value will be applied.
|
||||
func ConnectToDockerOrDie(dockerEndpoint string, requestTimeout, imagePullProgressDeadline time.Duration) DockerInterface {
|
||||
if dockerEndpoint == "fake://" {
|
||||
return NewFakeDockerClient()
|
||||
}
|
||||
client, err := getDockerClient(dockerEndpoint)
|
||||
if err != nil {
|
||||
glog.Fatalf("Couldn't connect to docker: %v", err)
|
||||
}
|
||||
glog.Infof("Start docker client with request timeout=%v", requestTimeout)
|
||||
return newKubeDockerClient(client, requestTimeout, imagePullProgressDeadline)
|
||||
}
|
||||
|
||||
// GetKubeletDockerContainers lists all container or just the running ones.
|
||||
// Returns a list of docker containers that we manage
|
||||
func GetKubeletDockerContainers(client DockerInterface, allContainers bool) ([]*dockertypes.Container, error) {
|
||||
result := []*dockertypes.Container{}
|
||||
containers, err := client.ListContainers(dockertypes.ContainerListOptions{All: allContainers})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := range containers {
|
||||
container := &containers[i]
|
||||
if len(container.Names) == 0 {
|
||||
continue
|
||||
}
|
||||
// Skip containers that we didn't create to allow users to manually
|
||||
// spin up their own containers if they want.
|
||||
if !strings.HasPrefix(container.Names[0], "/"+containerNamePrefix+"_") {
|
||||
glog.V(5).Infof("Docker Container: %s is not managed by kubelet.", container.Names[0])
|
||||
continue
|
||||
}
|
||||
result = append(result, container)
|
||||
}
|
||||
return result, nil
|
||||
}
|
2692
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_manager.go
generated
vendored
Normal file
2692
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_manager.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
75
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_manager_linux.go
generated
vendored
Normal file
75
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_manager_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
dockercontainer "github.com/docker/engine-api/types/container"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
// These two functions are OS specific (for now at least)
|
||||
func updateHostConfig(hc *dockercontainer.HostConfig, opts *kubecontainer.RunContainerOptions) {
|
||||
// no-op, there is a windows implementation that is different.
|
||||
}
|
||||
|
||||
func DefaultMemorySwap() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func getContainerIP(container *dockertypes.ContainerJSON) string {
|
||||
result := ""
|
||||
if container.NetworkSettings != nil {
|
||||
result = container.NetworkSettings.IPAddress
|
||||
|
||||
// Fall back to IPv6 address if no IPv4 address is present
|
||||
if result == "" {
|
||||
result = container.NetworkSettings.GlobalIPv6Address
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// We don't want to override the networking mode on Linux.
|
||||
func getNetworkingMode() string { return "" }
|
||||
|
||||
// Returns true if the container name matches the infrastructure's container name
|
||||
func containerProvidesPodIP(name *KubeletContainerName) bool {
|
||||
return name.ContainerName == PodInfraContainerName
|
||||
}
|
||||
|
||||
// Returns Seccomp and AppArmor Security options
|
||||
func (dm *DockerManager) getSecurityOpts(pod *v1.Pod, ctrName string) ([]dockerOpt, error) {
|
||||
var securityOpts []dockerOpt
|
||||
if seccompOpts, err := dm.getSeccompOpts(pod, ctrName); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
securityOpts = append(securityOpts, seccompOpts...)
|
||||
}
|
||||
|
||||
if appArmorOpts, err := dm.getAppArmorOpts(pod, ctrName); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
securityOpts = append(securityOpts, appArmorOpts...)
|
||||
}
|
||||
|
||||
return securityOpts, nil
|
||||
}
|
507
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_manager_linux_test.go
generated
vendored
Normal file
507
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_manager_linux_test.go
generated
vendored
Normal file
|
@ -0,0 +1,507 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"path"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/mock_network"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
)
|
||||
|
||||
func TestGetSecurityOpts(t *testing.T) {
|
||||
const containerName = "bar"
|
||||
pod := func(annotations map[string]string) *v1.Pod {
|
||||
p := makePod("foo", &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: containerName},
|
||||
},
|
||||
})
|
||||
p.Annotations = annotations
|
||||
return p
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
msg string
|
||||
pod *v1.Pod
|
||||
expectedOpts []string
|
||||
}{{
|
||||
msg: "No security annotations",
|
||||
pod: pod(nil),
|
||||
expectedOpts: []string{"seccomp=unconfined"},
|
||||
}, {
|
||||
msg: "Seccomp default",
|
||||
pod: pod(map[string]string{
|
||||
v1.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default",
|
||||
}),
|
||||
expectedOpts: nil,
|
||||
}, {
|
||||
msg: "AppArmor runtime/default",
|
||||
pod: pod(map[string]string{
|
||||
apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileRuntimeDefault,
|
||||
}),
|
||||
expectedOpts: []string{"seccomp=unconfined"},
|
||||
}, {
|
||||
msg: "AppArmor local profile",
|
||||
pod: pod(map[string]string{
|
||||
apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileNamePrefix + "foo",
|
||||
}),
|
||||
expectedOpts: []string{"seccomp=unconfined", "apparmor=foo"},
|
||||
}, {
|
||||
msg: "AppArmor and seccomp profile",
|
||||
pod: pod(map[string]string{
|
||||
v1.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default",
|
||||
apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileNamePrefix + "foo",
|
||||
}),
|
||||
expectedOpts: []string{"apparmor=foo"},
|
||||
}}
|
||||
|
||||
dm, _ := newTestDockerManagerWithVersion("1.11.1", "1.23")
|
||||
for i, test := range tests {
|
||||
securityOpts, err := dm.getSecurityOpts(test.pod, containerName)
|
||||
assert.NoError(t, err, "TestCase[%d]: %s", i, test.msg)
|
||||
opts, err := dm.fmtDockerOpts(securityOpts)
|
||||
assert.NoError(t, err, "TestCase[%d]: %s", i, test.msg)
|
||||
assert.Len(t, opts, len(test.expectedOpts), "TestCase[%d]: %s", i, test.msg)
|
||||
for _, opt := range test.expectedOpts {
|
||||
assert.Contains(t, opts, opt, "TestCase[%d]: %s", i, test.msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeccompIsUnconfinedByDefaultWithDockerV110(t *testing.T) {
|
||||
dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22")
|
||||
// We want to capture events.
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
dm.recorder = recorder
|
||||
|
||||
pod := makePod("foo", &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "bar"},
|
||||
},
|
||||
})
|
||||
|
||||
runSyncPod(t, dm, fakeDocker, pod, nil, false)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Create pod infra container.
|
||||
"create", "start", "inspect_container", "inspect_container",
|
||||
// Create container.
|
||||
"create", "start", "inspect_container",
|
||||
})
|
||||
|
||||
fakeDocker.Lock()
|
||||
if len(fakeDocker.Created) != 2 ||
|
||||
!matchString(t, "/k8s_POD\\.[a-f0-9]+_foo_new_", fakeDocker.Created[0]) ||
|
||||
!matchString(t, "/k8s_bar\\.[a-f0-9]+_foo_new_", fakeDocker.Created[1]) {
|
||||
t.Errorf("unexpected containers created %v", fakeDocker.Created)
|
||||
}
|
||||
fakeDocker.Unlock()
|
||||
|
||||
newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1])
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
assert.Contains(t, newContainer.HostConfig.SecurityOpt, "seccomp:unconfined", "Pods with Docker versions >= 1.10 must not have seccomp disabled by default")
|
||||
|
||||
cid := utilstrings.ShortenString(fakeDocker.Created[1], 12)
|
||||
assert.NoError(t, expectEvent(recorder, v1.EventTypeNormal, events.CreatedContainer,
|
||||
fmt.Sprintf("Created container with docker id %s; Security:[seccomp=unconfined]", cid)))
|
||||
}
|
||||
|
||||
func TestUnconfinedSeccompProfileWithDockerV110(t *testing.T) {
|
||||
dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22")
|
||||
pod := makePod("foo4", &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "bar4"},
|
||||
},
|
||||
})
|
||||
pod.Annotations = map[string]string{
|
||||
v1.SeccompPodAnnotationKey: "unconfined",
|
||||
}
|
||||
|
||||
runSyncPod(t, dm, fakeDocker, pod, nil, false)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Create pod infra container.
|
||||
"create", "start", "inspect_container", "inspect_container",
|
||||
// Create container.
|
||||
"create", "start", "inspect_container",
|
||||
})
|
||||
|
||||
fakeDocker.Lock()
|
||||
if len(fakeDocker.Created) != 2 ||
|
||||
!matchString(t, "/k8s_POD\\.[a-f0-9]+_foo4_new_", fakeDocker.Created[0]) ||
|
||||
!matchString(t, "/k8s_bar4\\.[a-f0-9]+_foo4_new_", fakeDocker.Created[1]) {
|
||||
t.Errorf("unexpected containers created %v", fakeDocker.Created)
|
||||
}
|
||||
fakeDocker.Unlock()
|
||||
|
||||
newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1])
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
assert.Contains(t, newContainer.HostConfig.SecurityOpt, "seccomp:unconfined", "Pods created with a secccomp annotation of unconfined should have seccomp:unconfined.")
|
||||
}
|
||||
|
||||
func TestDefaultSeccompProfileWithDockerV110(t *testing.T) {
|
||||
dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22")
|
||||
pod := makePod("foo1", &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "bar1"},
|
||||
},
|
||||
})
|
||||
pod.Annotations = map[string]string{
|
||||
v1.SeccompPodAnnotationKey: "docker/default",
|
||||
}
|
||||
|
||||
runSyncPod(t, dm, fakeDocker, pod, nil, false)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Create pod infra container.
|
||||
"create", "start", "inspect_container", "inspect_container",
|
||||
// Create container.
|
||||
"create", "start", "inspect_container",
|
||||
})
|
||||
|
||||
fakeDocker.Lock()
|
||||
if len(fakeDocker.Created) != 2 ||
|
||||
!matchString(t, "/k8s_POD\\.[a-f0-9]+_foo1_new_", fakeDocker.Created[0]) ||
|
||||
!matchString(t, "/k8s_bar1\\.[a-f0-9]+_foo1_new_", fakeDocker.Created[1]) {
|
||||
t.Errorf("unexpected containers created %v", fakeDocker.Created)
|
||||
}
|
||||
fakeDocker.Unlock()
|
||||
|
||||
newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1])
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
assert.NotContains(t, newContainer.HostConfig.SecurityOpt, "seccomp:unconfined", "Pods created with a secccomp annotation of docker/default should have empty security opt.")
|
||||
}
|
||||
|
||||
func TestSeccompContainerAnnotationTrumpsPod(t *testing.T) {
|
||||
dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22")
|
||||
pod := makePod("foo2", &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "bar2"},
|
||||
},
|
||||
})
|
||||
pod.Annotations = map[string]string{
|
||||
v1.SeccompPodAnnotationKey: "unconfined",
|
||||
v1.SeccompContainerAnnotationKeyPrefix + "bar2": "docker/default",
|
||||
}
|
||||
|
||||
runSyncPod(t, dm, fakeDocker, pod, nil, false)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Create pod infra container.
|
||||
"create", "start", "inspect_container", "inspect_container",
|
||||
// Create container.
|
||||
"create", "start", "inspect_container",
|
||||
})
|
||||
|
||||
fakeDocker.Lock()
|
||||
if len(fakeDocker.Created) != 2 ||
|
||||
!matchString(t, "/k8s_POD\\.[a-f0-9]+_foo2_new_", fakeDocker.Created[0]) ||
|
||||
!matchString(t, "/k8s_bar2\\.[a-f0-9]+_foo2_new_", fakeDocker.Created[1]) {
|
||||
t.Errorf("unexpected containers created %v", fakeDocker.Created)
|
||||
}
|
||||
fakeDocker.Unlock()
|
||||
|
||||
newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1])
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
assert.NotContains(t, newContainer.HostConfig.SecurityOpt, "seccomp:unconfined", "Container annotation should trump the pod annotation for seccomp.")
|
||||
}
|
||||
|
||||
func TestSecurityOptsAreNilWithDockerV19(t *testing.T) {
|
||||
dm, fakeDocker := newTestDockerManagerWithVersion("1.9.1", "1.21")
|
||||
pod := makePod("foo", &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "bar"},
|
||||
},
|
||||
})
|
||||
|
||||
runSyncPod(t, dm, fakeDocker, pod, nil, false)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Create pod infra container.
|
||||
"create", "start", "inspect_container", "inspect_container",
|
||||
// Create container.
|
||||
"create", "start", "inspect_container",
|
||||
})
|
||||
|
||||
fakeDocker.Lock()
|
||||
if len(fakeDocker.Created) != 2 ||
|
||||
!matchString(t, "/k8s_POD\\.[a-f0-9]+_foo_new_", fakeDocker.Created[0]) ||
|
||||
!matchString(t, "/k8s_bar\\.[a-f0-9]+_foo_new_", fakeDocker.Created[1]) {
|
||||
t.Errorf("unexpected containers created %v", fakeDocker.Created)
|
||||
}
|
||||
fakeDocker.Unlock()
|
||||
|
||||
newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1])
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
assert.NotContains(t, newContainer.HostConfig.SecurityOpt, "seccomp:unconfined", "Pods with Docker versions < 1.10 must not have seccomp disabled by default")
|
||||
}
|
||||
|
||||
func TestCreateAppArmorContanier(t *testing.T) {
|
||||
dm, fakeDocker := newTestDockerManagerWithVersion("1.11.1", "1.23")
|
||||
// We want to capture events.
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
dm.recorder = recorder
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
Annotations: map[string]string{
|
||||
apparmor.ContainerAnnotationKeyPrefix + "test": apparmor.ProfileNamePrefix + "test-profile",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "test"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
runSyncPod(t, dm, fakeDocker, pod, nil, false)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Create pod infra container.
|
||||
"create", "start", "inspect_container", "inspect_container",
|
||||
// Create container.
|
||||
"create", "start", "inspect_container",
|
||||
})
|
||||
|
||||
fakeDocker.Lock()
|
||||
if len(fakeDocker.Created) != 2 ||
|
||||
!matchString(t, "/k8s_POD\\.[a-f0-9]+_foo_new_", fakeDocker.Created[0]) ||
|
||||
!matchString(t, "/k8s_test\\.[a-f0-9]+_foo_new_", fakeDocker.Created[1]) {
|
||||
t.Errorf("unexpected containers created %v", fakeDocker.Created)
|
||||
}
|
||||
fakeDocker.Unlock()
|
||||
|
||||
// Verify security opts.
|
||||
newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1])
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
securityOpts := newContainer.HostConfig.SecurityOpt
|
||||
assert.Contains(t, securityOpts, "apparmor=test-profile", "Container should have apparmor security opt")
|
||||
|
||||
cid := utilstrings.ShortenString(fakeDocker.Created[1], 12)
|
||||
assert.NoError(t, expectEvent(recorder, v1.EventTypeNormal, events.CreatedContainer,
|
||||
fmt.Sprintf("Created container with docker id %s; Security:[seccomp=unconfined apparmor=test-profile]", cid)))
|
||||
}
|
||||
|
||||
func TestSeccompLocalhostProfileIsLoaded(t *testing.T) {
|
||||
tests := []struct {
|
||||
annotations map[string]string
|
||||
expectedSecOpt string
|
||||
expectedSecMsg string
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
annotations: map[string]string{
|
||||
v1.SeccompPodAnnotationKey: "localhost/test",
|
||||
},
|
||||
expectedSecOpt: `seccomp={"foo":"bar"}`,
|
||||
expectedSecMsg: "seccomp=test(md5:21aeae45053385adebd25311f9dd9cb1)",
|
||||
},
|
||||
{
|
||||
annotations: map[string]string{
|
||||
v1.SeccompPodAnnotationKey: "localhost/sub/subtest",
|
||||
},
|
||||
expectedSecOpt: `seccomp={"abc":"def"}`,
|
||||
expectedSecMsg: "seccomp=sub/subtest(md5:07c9bcb4db631f7ca191d6e0bca49f76)",
|
||||
},
|
||||
{
|
||||
annotations: map[string]string{
|
||||
v1.SeccompPodAnnotationKey: "localhost/not-existing",
|
||||
},
|
||||
expectedError: "cannot load seccomp profile",
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
dm, fakeDocker := newTestDockerManagerWithVersion("1.11.0", "1.23")
|
||||
// We want to capture events.
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
dm.recorder = recorder
|
||||
|
||||
dm.seccompProfileRoot = path.Join("fixtures", "seccomp")
|
||||
|
||||
pod := makePod("foo2", &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "bar2"},
|
||||
},
|
||||
})
|
||||
pod.Annotations = test.annotations
|
||||
|
||||
result := runSyncPod(t, dm, fakeDocker, pod, nil, test.expectedError != "")
|
||||
if test.expectedError != "" {
|
||||
assert.Contains(t, result.Error().Error(), test.expectedError)
|
||||
continue
|
||||
}
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Create pod infra container.
|
||||
"create", "start", "inspect_container", "inspect_container",
|
||||
// Create container.
|
||||
"create", "start", "inspect_container",
|
||||
})
|
||||
|
||||
fakeDocker.Lock()
|
||||
if len(fakeDocker.Created) != 2 ||
|
||||
!matchString(t, "/k8s_POD\\.[a-f0-9]+_foo2_new_", fakeDocker.Created[0]) ||
|
||||
!matchString(t, "/k8s_bar2\\.[a-f0-9]+_foo2_new_", fakeDocker.Created[1]) {
|
||||
t.Errorf("unexpected containers created %v", fakeDocker.Created)
|
||||
}
|
||||
fakeDocker.Unlock()
|
||||
|
||||
newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1])
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
assert.Contains(t, newContainer.HostConfig.SecurityOpt, test.expectedSecOpt, "The compacted seccomp json profile should be loaded.")
|
||||
|
||||
cid := utilstrings.ShortenString(fakeDocker.Created[1], 12)
|
||||
assert.NoError(t, expectEvent(recorder, v1.EventTypeNormal, events.CreatedContainer,
|
||||
fmt.Sprintf("Created container with docker id %s; Security:[%s]", cid, test.expectedSecMsg)),
|
||||
"testcase %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPodStatusFromNetworkPlugin(t *testing.T) {
|
||||
cases := []struct {
|
||||
pod *v1.Pod
|
||||
fakePodIP string
|
||||
containerID string
|
||||
infraContainerID string
|
||||
networkStatusError error
|
||||
expectRunning bool
|
||||
expectUnknown bool
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container"}},
|
||||
},
|
||||
},
|
||||
fakePodIP: "10.10.10.10",
|
||||
containerID: "123",
|
||||
infraContainerID: "9876",
|
||||
networkStatusError: nil,
|
||||
expectRunning: true,
|
||||
expectUnknown: false,
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container"}},
|
||||
},
|
||||
},
|
||||
fakePodIP: "",
|
||||
containerID: "123",
|
||||
infraContainerID: "9876",
|
||||
networkStatusError: fmt.Errorf("CNI plugin error"),
|
||||
expectRunning: false,
|
||||
expectUnknown: true,
|
||||
},
|
||||
}
|
||||
for _, test := range cases {
|
||||
dm, fakeDocker := newTestDockerManager()
|
||||
ctrl := gomock.NewController(t)
|
||||
fnp := mock_network.NewMockNetworkPlugin(ctrl)
|
||||
dm.networkPlugin = fnp
|
||||
|
||||
fakeDocker.SetFakeRunningContainers([]*FakeContainer{
|
||||
{
|
||||
ID: test.containerID,
|
||||
Name: fmt.Sprintf("/k8s_container_%s_%s_%s_42", test.pod.Name, test.pod.Namespace, test.pod.UID),
|
||||
Running: true,
|
||||
},
|
||||
{
|
||||
ID: test.infraContainerID,
|
||||
Name: fmt.Sprintf("/k8s_POD.%s_%s_%s_%s_42", strconv.FormatUint(generatePodInfraContainerHash(test.pod), 16), test.pod.Name, test.pod.Namespace, test.pod.UID),
|
||||
Running: true,
|
||||
},
|
||||
})
|
||||
|
||||
fnp.EXPECT().Name().Return("someNetworkPlugin").AnyTimes()
|
||||
var podNetworkStatus *network.PodNetworkStatus
|
||||
if test.fakePodIP != "" {
|
||||
podNetworkStatus = &network.PodNetworkStatus{IP: net.ParseIP(test.fakePodIP)}
|
||||
}
|
||||
fnp.EXPECT().GetPodNetworkStatus(test.pod.Namespace, test.pod.Name, kubecontainer.DockerID(test.infraContainerID).ContainerID()).Return(podNetworkStatus, test.networkStatusError)
|
||||
|
||||
podStatus, err := dm.GetPodStatus(test.pod.UID, test.pod.Name, test.pod.Namespace)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if podStatus.IP != test.fakePodIP {
|
||||
t.Errorf("Got wrong ip, expected %v, got %v", test.fakePodIP, podStatus.IP)
|
||||
}
|
||||
|
||||
expectedStatesCount := 0
|
||||
var expectedState kubecontainer.ContainerState
|
||||
if test.expectRunning {
|
||||
expectedState = kubecontainer.ContainerStateRunning
|
||||
} else if test.expectUnknown {
|
||||
expectedState = kubecontainer.ContainerStateUnknown
|
||||
} else {
|
||||
t.Errorf("Some state has to be expected")
|
||||
}
|
||||
for _, containerStatus := range podStatus.ContainerStatuses {
|
||||
if containerStatus.State == expectedState {
|
||||
expectedStatesCount++
|
||||
}
|
||||
}
|
||||
if expectedStatesCount < 1 {
|
||||
t.Errorf("Invalid count of containers with expected state")
|
||||
}
|
||||
}
|
||||
}
|
1929
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_manager_test.go
generated
vendored
Normal file
1929
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_manager_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
52
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_manager_unsupported.go
generated
vendored
Normal file
52
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_manager_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
// +build !linux,!windows
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
dockercontainer "github.com/docker/engine-api/types/container"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
// These two functions are OS specific (for now at least)
|
||||
func updateHostConfig(hc *dockercontainer.HostConfig, opts *kubecontainer.RunContainerOptions) {
|
||||
}
|
||||
|
||||
func DefaultMemorySwap() int64 {
|
||||
return -1
|
||||
}
|
||||
|
||||
func getContainerIP(container *dockertypes.ContainerJSON) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func getNetworkingMode() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func containerProvidesPodIP(name *KubeletContainerName) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns nil as both Seccomp and AppArmor security options are not valid on Windows
|
||||
func (dm *DockerManager) getSecurityOpts(pod *v1.Pod, ctrName string) ([]dockerOpt, error) {
|
||||
return nil, nil
|
||||
}
|
75
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_manager_windows.go
generated
vendored
Normal file
75
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_manager_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
dockercontainer "github.com/docker/engine-api/types/container"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
// These two functions are OS specific (for now at least)
|
||||
func updateHostConfig(hc *dockercontainer.HostConfig, opts *kubecontainer.RunContainerOptions) {
|
||||
// There is no /etc/resolv.conf in Windows, DNS and DNSSearch options would have to be passed to Docker runtime instead
|
||||
hc.DNS = opts.DNS
|
||||
hc.DNSSearch = opts.DNSSearch
|
||||
|
||||
// MemorySwap == -1 is not currently supported in Docker 1.14 on Windows
|
||||
// https://github.com/docker/docker/blob/master/daemon/daemon_windows.go#L175
|
||||
hc.Resources.MemorySwap = 0
|
||||
}
|
||||
|
||||
func DefaultMemorySwap() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func getContainerIP(container *dockertypes.ContainerJSON) string {
|
||||
if container.NetworkSettings != nil {
|
||||
for _, network := range container.NetworkSettings.Networks {
|
||||
if network.IPAddress != "" {
|
||||
return network.IPAddress
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getNetworkingMode() string {
|
||||
// Allow override via env variable. Otherwise, use a default "kubenet" network
|
||||
netMode := os.Getenv("CONTAINER_NETWORK")
|
||||
if netMode == "" {
|
||||
netMode = "kubenet"
|
||||
}
|
||||
return netMode
|
||||
}
|
||||
|
||||
// Infrastructure containers are not supported on Windows. For this reason, we
|
||||
// make sure to not grab the infra container's IP for the pod.
|
||||
func containerProvidesPodIP(name *KubeletContainerName) bool {
|
||||
return name.ContainerName != PodInfraContainerName
|
||||
}
|
||||
|
||||
// Returns nil as both Seccomp and AppArmor security options are not valid on Windows
|
||||
func (dm *DockerManager) getSecurityOpts(pod *v1.Pod, ctrName string) ([]dockerOpt, error) {
|
||||
return nil, nil
|
||||
}
|
1004
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_test.go
generated
vendored
Normal file
1004
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
168
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/exec.go
generated
vendored
Normal file
168
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/exec.go
generated
vendored
Normal file
|
@ -0,0 +1,168 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
"github.com/golang/glog"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
utilexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/term"
|
||||
)
|
||||
|
||||
// ExecHandler knows how to execute a command in a running Docker container.
|
||||
type ExecHandler interface {
|
||||
ExecInContainer(client DockerInterface, container *dockertypes.ContainerJSON, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan term.Size, timeout time.Duration) error
|
||||
}
|
||||
|
||||
// NsenterExecHandler executes commands in Docker containers using nsenter.
|
||||
type NsenterExecHandler struct{}
|
||||
|
||||
// TODO should we support nsenter in a container, running with elevated privs and --pid=host?
|
||||
func (*NsenterExecHandler) ExecInContainer(client DockerInterface, container *dockertypes.ContainerJSON, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan term.Size, timeout time.Duration) error {
|
||||
nsenter, err := exec.LookPath("nsenter")
|
||||
if err != nil {
|
||||
return fmt.Errorf("exec unavailable - unable to locate nsenter")
|
||||
}
|
||||
|
||||
containerPid := container.State.Pid
|
||||
|
||||
// TODO what if the container doesn't have `env`???
|
||||
args := []string{"-t", fmt.Sprintf("%d", containerPid), "-m", "-i", "-u", "-n", "-p", "--", "env", "-i"}
|
||||
args = append(args, fmt.Sprintf("HOSTNAME=%s", container.Config.Hostname))
|
||||
args = append(args, container.Config.Env...)
|
||||
args = append(args, cmd...)
|
||||
command := exec.Command(nsenter, args...)
|
||||
if tty {
|
||||
p, err := kubecontainer.StartPty(command)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer p.Close()
|
||||
|
||||
// make sure to close the stdout stream
|
||||
defer stdout.Close()
|
||||
|
||||
kubecontainer.HandleResizing(resize, func(size term.Size) {
|
||||
term.SetSize(p.Fd(), size)
|
||||
})
|
||||
|
||||
if stdin != nil {
|
||||
go io.Copy(p, stdin)
|
||||
}
|
||||
|
||||
if stdout != nil {
|
||||
go io.Copy(stdout, p)
|
||||
}
|
||||
|
||||
err = command.Wait()
|
||||
} else {
|
||||
if stdin != nil {
|
||||
// Use an os.Pipe here as it returns true *os.File objects.
|
||||
// This way, if you run 'kubectl exec <pod> -i bash' (no tty) and type 'exit',
|
||||
// the call below to command.Run() can unblock because its Stdin is the read half
|
||||
// of the pipe.
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go io.Copy(w, stdin)
|
||||
|
||||
command.Stdin = r
|
||||
}
|
||||
if stdout != nil {
|
||||
command.Stdout = stdout
|
||||
}
|
||||
if stderr != nil {
|
||||
command.Stderr = stderr
|
||||
}
|
||||
|
||||
err = command.Run()
|
||||
}
|
||||
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
return &utilexec.ExitErrorWrapper{ExitError: exitErr}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NativeExecHandler executes commands in Docker containers using Docker's exec API.
|
||||
type NativeExecHandler struct{}
|
||||
|
||||
func (*NativeExecHandler) ExecInContainer(client DockerInterface, container *dockertypes.ContainerJSON, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan term.Size, timeout time.Duration) error {
|
||||
createOpts := dockertypes.ExecConfig{
|
||||
Cmd: cmd,
|
||||
AttachStdin: stdin != nil,
|
||||
AttachStdout: stdout != nil,
|
||||
AttachStderr: stderr != nil,
|
||||
Tty: tty,
|
||||
}
|
||||
execObj, err := client.CreateExec(container.ID, createOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to exec in container - Exec setup failed - %v", err)
|
||||
}
|
||||
|
||||
// Have to start this before the call to client.StartExec because client.StartExec is a blocking
|
||||
// call :-( Otherwise, resize events don't get processed and the terminal never resizes.
|
||||
kubecontainer.HandleResizing(resize, func(size term.Size) {
|
||||
client.ResizeExecTTY(execObj.ID, int(size.Height), int(size.Width))
|
||||
})
|
||||
|
||||
startOpts := dockertypes.ExecStartCheck{Detach: false, Tty: tty}
|
||||
streamOpts := StreamOptions{
|
||||
InputStream: stdin,
|
||||
OutputStream: stdout,
|
||||
ErrorStream: stderr,
|
||||
RawTerminal: tty,
|
||||
}
|
||||
err = client.StartExec(execObj.ID, startOpts, streamOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(2 * time.Second)
|
||||
defer ticker.Stop()
|
||||
count := 0
|
||||
for {
|
||||
inspect, err2 := client.InspectExec(execObj.ID)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
if !inspect.Running {
|
||||
if inspect.ExitCode != 0 {
|
||||
err = &dockerExitError{inspect}
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
count++
|
||||
if count == 5 {
|
||||
glog.Errorf("Exec session %s in container %s terminated but process still running!", execObj.ID, container.ID)
|
||||
break
|
||||
}
|
||||
|
||||
<-ticker.C
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
629
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/fake_docker_client.go
generated
vendored
Normal file
629
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/fake_docker_client.go
generated
vendored
Normal file
|
@ -0,0 +1,629 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
dockercontainer "github.com/docker/engine-api/types/container"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
type calledDetail struct {
|
||||
name string
|
||||
arguments []interface{}
|
||||
}
|
||||
|
||||
// NewCalledDetail create a new call detail item.
|
||||
func NewCalledDetail(name string, arguments []interface{}) calledDetail {
|
||||
return calledDetail{name: name, arguments: arguments}
|
||||
}
|
||||
|
||||
// FakeDockerClient is a simple fake docker client, so that kubelet can be run for testing without requiring a real docker setup.
|
||||
type FakeDockerClient struct {
|
||||
sync.Mutex
|
||||
Clock clock.Clock
|
||||
RunningContainerList []dockertypes.Container
|
||||
ExitedContainerList []dockertypes.Container
|
||||
ContainerMap map[string]*dockertypes.ContainerJSON
|
||||
Image *dockertypes.ImageInspect
|
||||
Images []dockertypes.Image
|
||||
Errors map[string]error
|
||||
called []calledDetail
|
||||
pulled []string
|
||||
|
||||
// Created, Stopped and Removed all container docker ID
|
||||
Created []string
|
||||
Started []string
|
||||
Stopped []string
|
||||
Removed []string
|
||||
VersionInfo dockertypes.Version
|
||||
Information dockertypes.Info
|
||||
ExecInspect *dockertypes.ContainerExecInspect
|
||||
execCmd []string
|
||||
EnableSleep bool
|
||||
ImageHistoryMap map[string][]dockertypes.ImageHistory
|
||||
}
|
||||
|
||||
// We don't check docker version now, just set the docker version of fake docker client to 1.8.1.
|
||||
// Notice that if someday we also have minimum docker version requirement, this should also be updated.
|
||||
const fakeDockerVersion = "1.8.1"
|
||||
|
||||
func NewFakeDockerClient() *FakeDockerClient {
|
||||
return NewFakeDockerClientWithVersion(fakeDockerVersion, minimumDockerAPIVersion)
|
||||
}
|
||||
|
||||
func NewFakeDockerClientWithClock(c clock.Clock) *FakeDockerClient {
|
||||
return newClientWithVersionAndClock(fakeDockerVersion, minimumDockerAPIVersion, c)
|
||||
}
|
||||
|
||||
func NewFakeDockerClientWithVersion(version, apiVersion string) *FakeDockerClient {
|
||||
return newClientWithVersionAndClock(version, apiVersion, clock.RealClock{})
|
||||
}
|
||||
|
||||
func newClientWithVersionAndClock(version, apiVersion string, c clock.Clock) *FakeDockerClient {
|
||||
return &FakeDockerClient{
|
||||
VersionInfo: dockertypes.Version{Version: version, APIVersion: apiVersion},
|
||||
Errors: make(map[string]error),
|
||||
ContainerMap: make(map[string]*dockertypes.ContainerJSON),
|
||||
Clock: c,
|
||||
// default this to an empty result, so that we never have a nil non-error response from InspectImage
|
||||
Image: &dockertypes.ImageInspect{},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) InjectError(fn string, err error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.Errors[fn] = err
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) InjectErrors(errs map[string]error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
for fn, err := range errs {
|
||||
f.Errors[fn] = err
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) ClearErrors() {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.Errors = map[string]error{}
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) ClearCalls() {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = []calledDetail{}
|
||||
f.Stopped = []string{}
|
||||
f.pulled = []string{}
|
||||
f.Created = []string{}
|
||||
f.Removed = []string{}
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) getCalledNames() []string {
|
||||
names := []string{}
|
||||
for _, detail := range f.called {
|
||||
names = append(names, detail.name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// Because the new data type returned by engine-api is too complex to manually initialize, we need a
|
||||
// fake container which is easier to initialize.
|
||||
type FakeContainer struct {
|
||||
ID string
|
||||
Name string
|
||||
Running bool
|
||||
ExitCode int
|
||||
Pid int
|
||||
CreatedAt time.Time
|
||||
StartedAt time.Time
|
||||
FinishedAt time.Time
|
||||
Config *dockercontainer.Config
|
||||
HostConfig *dockercontainer.HostConfig
|
||||
}
|
||||
|
||||
// convertFakeContainer converts the fake container to real container
|
||||
func convertFakeContainer(f *FakeContainer) *dockertypes.ContainerJSON {
|
||||
if f.Config == nil {
|
||||
f.Config = &dockercontainer.Config{}
|
||||
}
|
||||
if f.HostConfig == nil {
|
||||
f.HostConfig = &dockercontainer.HostConfig{}
|
||||
}
|
||||
return &dockertypes.ContainerJSON{
|
||||
ContainerJSONBase: &dockertypes.ContainerJSONBase{
|
||||
ID: f.ID,
|
||||
Name: f.Name,
|
||||
State: &dockertypes.ContainerState{
|
||||
Running: f.Running,
|
||||
ExitCode: f.ExitCode,
|
||||
Pid: f.Pid,
|
||||
StartedAt: dockerTimestampToString(f.StartedAt),
|
||||
FinishedAt: dockerTimestampToString(f.FinishedAt),
|
||||
},
|
||||
Created: dockerTimestampToString(f.CreatedAt),
|
||||
HostConfig: f.HostConfig,
|
||||
},
|
||||
Config: f.Config,
|
||||
NetworkSettings: &dockertypes.NetworkSettings{},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) SetFakeContainers(containers []*FakeContainer) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
// Reset the lists and the map.
|
||||
f.ContainerMap = map[string]*dockertypes.ContainerJSON{}
|
||||
f.RunningContainerList = []dockertypes.Container{}
|
||||
f.ExitedContainerList = []dockertypes.Container{}
|
||||
|
||||
for i := range containers {
|
||||
c := containers[i]
|
||||
f.ContainerMap[c.ID] = convertFakeContainer(c)
|
||||
container := dockertypes.Container{
|
||||
Names: []string{c.Name},
|
||||
ID: c.ID,
|
||||
}
|
||||
if c.Running {
|
||||
f.RunningContainerList = append(f.RunningContainerList, container)
|
||||
} else {
|
||||
f.ExitedContainerList = append(f.ExitedContainerList, container)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) SetFakeRunningContainers(containers []*FakeContainer) {
|
||||
for _, c := range containers {
|
||||
c.Running = true
|
||||
}
|
||||
f.SetFakeContainers(containers)
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) AssertCalls(calls []string) (err error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
if !reflect.DeepEqual(calls, f.getCalledNames()) {
|
||||
err = fmt.Errorf("expected %#v, got %#v", calls, f.getCalledNames())
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) AssertCallDetails(calls ...calledDetail) (err error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
if !reflect.DeepEqual(calls, f.called) {
|
||||
err = fmt.Errorf("expected %#v, got %#v", calls, f.called)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) AssertCreated(created []string) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
actualCreated := []string{}
|
||||
for _, c := range f.Created {
|
||||
dockerName, _, err := ParseDockerName(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
actualCreated = append(actualCreated, dockerName.ContainerName)
|
||||
}
|
||||
sort.StringSlice(created).Sort()
|
||||
sort.StringSlice(actualCreated).Sort()
|
||||
if !reflect.DeepEqual(created, actualCreated) {
|
||||
return fmt.Errorf("expected %#v, got %#v", created, actualCreated)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) AssertStarted(started []string) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
sort.StringSlice(started).Sort()
|
||||
sort.StringSlice(f.Started).Sort()
|
||||
if !reflect.DeepEqual(started, f.Started) {
|
||||
return fmt.Errorf("expected %#v, got %#v", started, f.Started)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) AssertStopped(stopped []string) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
sort.StringSlice(stopped).Sort()
|
||||
sort.StringSlice(f.Stopped).Sort()
|
||||
if !reflect.DeepEqual(stopped, f.Stopped) {
|
||||
return fmt.Errorf("expected %#v, got %#v", stopped, f.Stopped)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) popError(op string) error {
|
||||
if f.Errors == nil {
|
||||
return nil
|
||||
}
|
||||
err, ok := f.Errors[op]
|
||||
if ok {
|
||||
delete(f.Errors, op)
|
||||
return err
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ListContainers is a test-spy implementation of DockerInterface.ListContainers.
|
||||
// It adds an entry "list" to the internal method call record.
|
||||
func (f *FakeDockerClient) ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "list"})
|
||||
err := f.popError("list")
|
||||
containerList := append([]dockertypes.Container{}, f.RunningContainerList...)
|
||||
if options.All {
|
||||
// Although the container is not sorted, but the container with the same name should be in order,
|
||||
// that is enough for us now.
|
||||
// TODO(random-liu): Is a fully sorted array needed?
|
||||
containerList = append(containerList, f.ExitedContainerList...)
|
||||
}
|
||||
return containerList, err
|
||||
}
|
||||
|
||||
// InspectContainer is a test-spy implementation of DockerInterface.InspectContainer.
|
||||
// It adds an entry "inspect" to the internal method call record.
|
||||
func (f *FakeDockerClient) InspectContainer(id string) (*dockertypes.ContainerJSON, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "inspect_container"})
|
||||
err := f.popError("inspect_container")
|
||||
if container, ok := f.ContainerMap[id]; ok {
|
||||
return container, err
|
||||
}
|
||||
if err != nil {
|
||||
// Use the custom error if it exists.
|
||||
return nil, err
|
||||
}
|
||||
return nil, fmt.Errorf("container %q not found", id)
|
||||
}
|
||||
|
||||
// InspectImageByRef is a test-spy implementation of DockerInterface.InspectImageByRef.
|
||||
// It adds an entry "inspect" to the internal method call record.
|
||||
func (f *FakeDockerClient) InspectImageByRef(name string) (*dockertypes.ImageInspect, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "inspect_image"})
|
||||
err := f.popError("inspect_image")
|
||||
return f.Image, err
|
||||
}
|
||||
|
||||
// InspectImageByID is a test-spy implementation of DockerInterface.InspectImageByID.
|
||||
// It adds an entry "inspect" to the internal method call record.
|
||||
func (f *FakeDockerClient) InspectImageByID(name string) (*dockertypes.ImageInspect, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "inspect_image"})
|
||||
err := f.popError("inspect_image")
|
||||
return f.Image, err
|
||||
}
|
||||
|
||||
// Sleeps random amount of time with the normal distribution with given mean and stddev
|
||||
// (in milliseconds), we never sleep less than cutOffMillis
|
||||
func (f *FakeDockerClient) normalSleep(mean, stdDev, cutOffMillis int) {
|
||||
if !f.EnableSleep {
|
||||
return
|
||||
}
|
||||
cutoff := (time.Duration)(cutOffMillis) * time.Millisecond
|
||||
delay := (time.Duration)(rand.NormFloat64()*float64(stdDev)+float64(mean)) * time.Millisecond
|
||||
if delay < cutoff {
|
||||
delay = cutoff
|
||||
}
|
||||
time.Sleep(delay)
|
||||
}
|
||||
|
||||
// CreateContainer is a test-spy implementation of DockerInterface.CreateContainer.
|
||||
// It adds an entry "create" to the internal method call record.
|
||||
func (f *FakeDockerClient) CreateContainer(c dockertypes.ContainerCreateConfig) (*dockertypes.ContainerCreateResponse, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "create"})
|
||||
if err := f.popError("create"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// This is not a very good fake. We'll just add this container's name to the list.
|
||||
// Docker likes to add a '/', so copy that behavior.
|
||||
name := "/" + c.Name
|
||||
id := name
|
||||
f.Created = append(f.Created, name)
|
||||
// The newest container should be in front, because we assume so in GetPodStatus()
|
||||
f.RunningContainerList = append([]dockertypes.Container{
|
||||
{ID: name, Names: []string{name}, Image: c.Config.Image, Labels: c.Config.Labels},
|
||||
}, f.RunningContainerList...)
|
||||
f.ContainerMap[name] = convertFakeContainer(&FakeContainer{
|
||||
ID: id, Name: name, Config: c.Config, HostConfig: c.HostConfig, CreatedAt: f.Clock.Now()})
|
||||
f.normalSleep(100, 25, 25)
|
||||
return &dockertypes.ContainerCreateResponse{ID: id}, nil
|
||||
}
|
||||
|
||||
// StartContainer is a test-spy implementation of DockerInterface.StartContainer.
|
||||
// It adds an entry "start" to the internal method call record.
|
||||
func (f *FakeDockerClient) StartContainer(id string) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "start"})
|
||||
if err := f.popError("start"); err != nil {
|
||||
return err
|
||||
}
|
||||
f.Started = append(f.Started, id)
|
||||
container, ok := f.ContainerMap[id]
|
||||
if !ok {
|
||||
container = convertFakeContainer(&FakeContainer{ID: id, Name: id})
|
||||
}
|
||||
container.State.Running = true
|
||||
container.State.Pid = os.Getpid()
|
||||
container.State.StartedAt = dockerTimestampToString(f.Clock.Now())
|
||||
container.NetworkSettings.IPAddress = "2.3.4.5"
|
||||
f.ContainerMap[id] = container
|
||||
f.updateContainerStatus(id, statusRunningPrefix)
|
||||
f.normalSleep(200, 50, 50)
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopContainer is a test-spy implementation of DockerInterface.StopContainer.
|
||||
// It adds an entry "stop" to the internal method call record.
|
||||
func (f *FakeDockerClient) StopContainer(id string, timeout int) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "stop"})
|
||||
if err := f.popError("stop"); err != nil {
|
||||
return err
|
||||
}
|
||||
f.Stopped = append(f.Stopped, id)
|
||||
// Container status should be Updated before container moved to ExitedContainerList
|
||||
f.updateContainerStatus(id, statusExitedPrefix)
|
||||
var newList []dockertypes.Container
|
||||
for _, container := range f.RunningContainerList {
|
||||
if container.ID == id {
|
||||
// The newest exited container should be in front. Because we assume so in GetPodStatus()
|
||||
f.ExitedContainerList = append([]dockertypes.Container{container}, f.ExitedContainerList...)
|
||||
continue
|
||||
}
|
||||
newList = append(newList, container)
|
||||
}
|
||||
f.RunningContainerList = newList
|
||||
container, ok := f.ContainerMap[id]
|
||||
if !ok {
|
||||
container = convertFakeContainer(&FakeContainer{
|
||||
ID: id,
|
||||
Name: id,
|
||||
Running: false,
|
||||
StartedAt: time.Now().Add(-time.Second),
|
||||
FinishedAt: time.Now(),
|
||||
})
|
||||
} else {
|
||||
container.State.FinishedAt = dockerTimestampToString(f.Clock.Now())
|
||||
container.State.Running = false
|
||||
}
|
||||
f.ContainerMap[id] = container
|
||||
f.normalSleep(200, 50, 50)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) RemoveContainer(id string, opts dockertypes.ContainerRemoveOptions) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "remove"})
|
||||
err := f.popError("remove")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range f.ExitedContainerList {
|
||||
if f.ExitedContainerList[i].ID == id {
|
||||
delete(f.ContainerMap, id)
|
||||
f.ExitedContainerList = append(f.ExitedContainerList[:i], f.ExitedContainerList[i+1:]...)
|
||||
f.Removed = append(f.Removed, id)
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
// To be a good fake, report error if container is not stopped.
|
||||
return fmt.Errorf("container not stopped")
|
||||
}
|
||||
|
||||
// Logs is a test-spy implementation of DockerInterface.Logs.
|
||||
// It adds an entry "logs" to the internal method call record.
|
||||
func (f *FakeDockerClient) Logs(id string, opts dockertypes.ContainerLogsOptions, sopts StreamOptions) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "logs"})
|
||||
return f.popError("logs")
|
||||
}
|
||||
|
||||
// PullImage is a test-spy implementation of DockerInterface.PullImage.
|
||||
// It adds an entry "pull" to the internal method call record.
|
||||
func (f *FakeDockerClient) PullImage(image string, auth dockertypes.AuthConfig, opts dockertypes.ImagePullOptions) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "pull"})
|
||||
err := f.popError("pull")
|
||||
if err == nil {
|
||||
authJson, _ := json.Marshal(auth)
|
||||
f.Image = &dockertypes.ImageInspect{
|
||||
ID: image,
|
||||
RepoTags: []string{image},
|
||||
}
|
||||
f.pulled = append(f.pulled, fmt.Sprintf("%s using %s", image, string(authJson)))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) Version() (*dockertypes.Version, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
return &f.VersionInfo, f.popError("version")
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) Info() (*dockertypes.Info, error) {
|
||||
return &f.Information, nil
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) CreateExec(id string, opts dockertypes.ExecConfig) (*dockertypes.ContainerExecCreateResponse, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.execCmd = opts.Cmd
|
||||
f.called = append(f.called, calledDetail{name: "create_exec"})
|
||||
return &dockertypes.ContainerExecCreateResponse{ID: "12345678"}, nil
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) StartExec(startExec string, opts dockertypes.ExecStartCheck, sopts StreamOptions) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "start_exec"})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) AttachToContainer(id string, opts dockertypes.ContainerAttachOptions, sopts StreamOptions) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "attach"})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) InspectExec(id string) (*dockertypes.ContainerExecInspect, error) {
|
||||
return f.ExecInspect, f.popError("inspect_exec")
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.Image, error) {
|
||||
f.called = append(f.called, calledDetail{name: "list_images"})
|
||||
err := f.popError("list_images")
|
||||
return f.Images, err
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDelete, error) {
|
||||
f.called = append(f.called, calledDetail{name: "remove_image", arguments: []interface{}{image, opts}})
|
||||
err := f.popError("remove_image")
|
||||
if err == nil {
|
||||
for i := range f.Images {
|
||||
if f.Images[i].ID == image {
|
||||
f.Images = append(f.Images[:i], f.Images[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return []dockertypes.ImageDelete{{Deleted: image}}, err
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) InjectImages(images []dockertypes.Image) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.Images = append(f.Images, images...)
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) updateContainerStatus(id, status string) {
|
||||
for i := range f.RunningContainerList {
|
||||
if f.RunningContainerList[i].ID == id {
|
||||
f.RunningContainerList[i].Status = status
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) ResizeExecTTY(id string, height, width int) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "resize_exec"})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) ResizeContainerTTY(id string, height, width int) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "resize_container"})
|
||||
return nil
|
||||
}
|
||||
|
||||
// FakeDockerPuller is a stub implementation of DockerPuller.
|
||||
type FakeDockerPuller struct {
|
||||
sync.Mutex
|
||||
|
||||
HasImages []string
|
||||
ImagesPulled []string
|
||||
|
||||
// Every pull will return the first error here, and then reslice
|
||||
// to remove it. Will give nil errors if this slice is empty.
|
||||
ErrorsToInject []error
|
||||
}
|
||||
|
||||
// Pull records the image pull attempt, and optionally injects an error.
|
||||
func (f *FakeDockerPuller) Pull(image string, secrets []v1.Secret) (err error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.ImagesPulled = append(f.ImagesPulled, image)
|
||||
|
||||
if len(f.ErrorsToInject) > 0 {
|
||||
err = f.ErrorsToInject[0]
|
||||
f.ErrorsToInject = f.ErrorsToInject[1:]
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *FakeDockerPuller) GetImageRef(name string) (string, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if f.HasImages == nil {
|
||||
return name, nil
|
||||
}
|
||||
for _, s := range f.HasImages {
|
||||
if s == name {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
func (f *FakeDockerClient) ImageHistory(id string) ([]dockertypes.ImageHistory, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.called = append(f.called, calledDetail{name: "image_history"})
|
||||
history := f.ImageHistoryMap[id]
|
||||
return history, nil
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) InjectImageHistory(data map[string][]dockertypes.ImageHistory) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.ImageHistoryMap = data
|
||||
}
|
||||
|
||||
// dockerTimestampToString converts the timestamp to string
|
||||
func dockerTimestampToString(t time.Time) string {
|
||||
return t.Format(time.RFC3339Nano)
|
||||
}
|
78
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/fake_manager.go
generated
vendored
Normal file
78
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/fake_manager.go
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/cache"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/util/oom"
|
||||
"k8s.io/kubernetes/pkg/util/procfs"
|
||||
)
|
||||
|
||||
func NewFakeDockerManager(
|
||||
client DockerInterface,
|
||||
recorder record.EventRecorder,
|
||||
livenessManager proberesults.Manager,
|
||||
containerRefManager *kubecontainer.RefManager,
|
||||
machineInfo *cadvisorapi.MachineInfo,
|
||||
podInfraContainerImage string,
|
||||
qps float32,
|
||||
burst int,
|
||||
containerLogsDir string,
|
||||
osInterface kubecontainer.OSInterface,
|
||||
networkPlugin network.NetworkPlugin,
|
||||
runtimeHelper kubecontainer.RuntimeHelper,
|
||||
httpClient kubetypes.HttpGetter, imageBackOff *flowcontrol.Backoff) *DockerManager {
|
||||
|
||||
fakeOOMAdjuster := oom.NewFakeOOMAdjuster()
|
||||
fakeProcFs := procfs.NewFakeProcFS()
|
||||
fakePodGetter := &fakePodGetter{}
|
||||
dm := NewDockerManager(client, recorder, livenessManager, containerRefManager, fakePodGetter, machineInfo, podInfraContainerImage, qps,
|
||||
burst, containerLogsDir, osInterface, networkPlugin, runtimeHelper, httpClient, &NativeExecHandler{},
|
||||
fakeOOMAdjuster, fakeProcFs, false, imageBackOff, false, false, true, "/var/lib/kubelet/seccomp")
|
||||
dm.dockerPuller = &FakeDockerPuller{}
|
||||
|
||||
// ttl of version cache is set to 0 so we always call version api directly in tests.
|
||||
dm.versionCache = cache.NewObjectCache(
|
||||
func() (interface{}, error) {
|
||||
return dm.getVersionInfo()
|
||||
},
|
||||
0,
|
||||
)
|
||||
return dm
|
||||
}
|
||||
|
||||
type fakePodGetter struct {
|
||||
pods map[types.UID]*v1.Pod
|
||||
}
|
||||
|
||||
func newFakePodGetter() *fakePodGetter {
|
||||
return &fakePodGetter{make(map[types.UID]*v1.Pod)}
|
||||
}
|
||||
|
||||
func (f *fakePodGetter) GetPodByUID(uid types.UID) (*v1.Pod, bool) {
|
||||
pod, found := f.pods[uid]
|
||||
return pod, found
|
||||
}
|
3
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/fixtures/seccomp/sub/subtest
generated
vendored
Normal file
3
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/fixtures/seccomp/sub/subtest
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"abc": "def"
|
||||
}
|
3
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/fixtures/seccomp/test
generated
vendored
Normal file
3
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/fixtures/seccomp/test
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"foo": "bar"
|
||||
}
|
102
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/images.go
generated
vendored
Normal file
102
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/images.go
generated
vendored
Normal file
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
runtime "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
// imageStatsProvider exposes stats about all images currently available.
|
||||
type imageStatsProvider struct {
|
||||
sync.Mutex
|
||||
// layers caches the current layers, key is the layer ID.
|
||||
layers map[string]*dockertypes.ImageHistory
|
||||
// imageToLayerIDs maps image to its layer IDs.
|
||||
imageToLayerIDs map[string][]string
|
||||
// Docker remote API client
|
||||
c DockerInterface
|
||||
}
|
||||
|
||||
func newImageStatsProvider(c DockerInterface) *imageStatsProvider {
|
||||
return &imageStatsProvider{
|
||||
layers: make(map[string]*dockertypes.ImageHistory),
|
||||
imageToLayerIDs: make(map[string][]string),
|
||||
c: c,
|
||||
}
|
||||
}
|
||||
|
||||
func (isp *imageStatsProvider) ImageStats() (*runtime.ImageStats, error) {
|
||||
images, err := isp.c.ListImages(dockertypes.ImageListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list docker images - %v", err)
|
||||
}
|
||||
// Take the lock to protect the cache
|
||||
isp.Lock()
|
||||
defer isp.Unlock()
|
||||
// Create new cache each time, this is a little more memory consuming, but:
|
||||
// * ImageStats is only called every 10 seconds
|
||||
// * We use pointers and reference to copy cache elements.
|
||||
// The memory usage should be acceptable.
|
||||
// TODO(random-liu): Add more logic to implement in place cache update.
|
||||
newLayers := make(map[string]*dockertypes.ImageHistory)
|
||||
newImageToLayerIDs := make(map[string][]string)
|
||||
for _, image := range images {
|
||||
layerIDs, ok := isp.imageToLayerIDs[image.ID]
|
||||
if !ok {
|
||||
// Get information about the various layers of the given docker image.
|
||||
history, err := isp.c.ImageHistory(image.ID)
|
||||
if err != nil {
|
||||
// Skip the image and inspect again in next ImageStats if the image is still there
|
||||
glog.V(2).Infof("failed to get history of docker image %+v - %v", image, err)
|
||||
continue
|
||||
}
|
||||
// Cache each layer
|
||||
for i := range history {
|
||||
layer := &history[i]
|
||||
key := layer.ID
|
||||
// Some of the layers are empty.
|
||||
// We are hoping that these layers are unique to each image.
|
||||
// Still keying with the CreatedBy field to be safe.
|
||||
if key == "" || key == "<missing>" {
|
||||
key = key + layer.CreatedBy
|
||||
}
|
||||
layerIDs = append(layerIDs, key)
|
||||
newLayers[key] = layer
|
||||
}
|
||||
} else {
|
||||
for _, layerID := range layerIDs {
|
||||
newLayers[layerID] = isp.layers[layerID]
|
||||
}
|
||||
}
|
||||
newImageToLayerIDs[image.ID] = layerIDs
|
||||
}
|
||||
ret := &runtime.ImageStats{}
|
||||
// Calculate the total storage bytes
|
||||
for _, layer := range newLayers {
|
||||
ret.TotalStorageBytes += uint64(layer.Size)
|
||||
}
|
||||
// Update current cache
|
||||
isp.layers = newLayers
|
||||
isp.imageToLayerIDs = newImageToLayerIDs
|
||||
return ret, nil
|
||||
}
|
334
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/images_test.go
generated
vendored
Normal file
334
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/images_test.go
generated
vendored
Normal file
|
@ -0,0 +1,334 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestImageStatsNoImages(t *testing.T) {
|
||||
fakeDockerClient := NewFakeDockerClientWithVersion("1.2.3", "1.2")
|
||||
isp := newImageStatsProvider(fakeDockerClient)
|
||||
st, err := isp.ImageStats()
|
||||
as := assert.New(t)
|
||||
as.NoError(err)
|
||||
as.NoError(fakeDockerClient.AssertCalls([]string{"list_images"}))
|
||||
as.Equal(st.TotalStorageBytes, uint64(0))
|
||||
}
|
||||
|
||||
func TestImageStatsWithImages(t *testing.T) {
|
||||
fakeDockerClient := NewFakeDockerClientWithVersion("1.2.3", "1.2")
|
||||
fakeHistoryData := map[string][]dockertypes.ImageHistory{
|
||||
"busybox": {
|
||||
{
|
||||
ID: "0123456",
|
||||
CreatedBy: "foo",
|
||||
Size: 100,
|
||||
},
|
||||
{
|
||||
ID: "0123457",
|
||||
CreatedBy: "duplicate",
|
||||
Size: 200,
|
||||
},
|
||||
{
|
||||
ID: "<missing>",
|
||||
CreatedBy: "baz",
|
||||
Size: 300,
|
||||
},
|
||||
},
|
||||
"kubelet": {
|
||||
{
|
||||
ID: "1123456",
|
||||
CreatedBy: "foo",
|
||||
Size: 200,
|
||||
},
|
||||
{
|
||||
ID: "<missing>",
|
||||
CreatedBy: "1baz",
|
||||
Size: 400,
|
||||
},
|
||||
},
|
||||
"busybox-new": {
|
||||
{
|
||||
ID: "01234567",
|
||||
CreatedBy: "foo",
|
||||
Size: 100,
|
||||
},
|
||||
{
|
||||
ID: "0123457",
|
||||
CreatedBy: "duplicate",
|
||||
Size: 200,
|
||||
},
|
||||
{
|
||||
ID: "<missing>",
|
||||
CreatedBy: "baz",
|
||||
Size: 300,
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeDockerClient.InjectImageHistory(fakeHistoryData)
|
||||
fakeDockerClient.InjectImages([]dockertypes.Image{
|
||||
{
|
||||
ID: "busybox",
|
||||
},
|
||||
{
|
||||
ID: "kubelet",
|
||||
},
|
||||
{
|
||||
ID: "busybox-new",
|
||||
},
|
||||
})
|
||||
isp := newImageStatsProvider(fakeDockerClient)
|
||||
st, err := isp.ImageStats()
|
||||
as := assert.New(t)
|
||||
as.NoError(err)
|
||||
as.NoError(fakeDockerClient.AssertCalls([]string{"list_images", "image_history", "image_history", "image_history"}))
|
||||
const expectedOutput uint64 = 1300
|
||||
as.Equal(expectedOutput, st.TotalStorageBytes, "expected %d, got %d", expectedOutput, st.TotalStorageBytes)
|
||||
}
|
||||
|
||||
func TestImageStatsWithCachedImages(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
oldLayers map[string]*dockertypes.ImageHistory
|
||||
oldImageToLayerIDs map[string][]string
|
||||
images []dockertypes.Image
|
||||
history map[string][]dockertypes.ImageHistory
|
||||
expectedCalls []string
|
||||
expectedLayers map[string]*dockertypes.ImageHistory
|
||||
expectedImageToLayerIDs map[string][]string
|
||||
expectedTotalStorageSize uint64
|
||||
}{
|
||||
{
|
||||
// No cache
|
||||
oldLayers: make(map[string]*dockertypes.ImageHistory),
|
||||
oldImageToLayerIDs: make(map[string][]string),
|
||||
images: []dockertypes.Image{
|
||||
{
|
||||
ID: "busybox",
|
||||
},
|
||||
{
|
||||
ID: "kubelet",
|
||||
},
|
||||
},
|
||||
history: map[string][]dockertypes.ImageHistory{
|
||||
"busybox": {
|
||||
{
|
||||
ID: "0123456",
|
||||
CreatedBy: "foo",
|
||||
Size: 100,
|
||||
},
|
||||
{
|
||||
ID: "<missing>",
|
||||
CreatedBy: "baz",
|
||||
Size: 300,
|
||||
},
|
||||
},
|
||||
"kubelet": {
|
||||
{
|
||||
ID: "1123456",
|
||||
CreatedBy: "foo",
|
||||
Size: 200,
|
||||
},
|
||||
{
|
||||
ID: "<missing>",
|
||||
CreatedBy: "1baz",
|
||||
Size: 400,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCalls: []string{"list_images", "image_history", "image_history"},
|
||||
expectedLayers: map[string]*dockertypes.ImageHistory{
|
||||
"0123456": {
|
||||
ID: "0123456",
|
||||
CreatedBy: "foo",
|
||||
Size: 100,
|
||||
},
|
||||
"1123456": {
|
||||
ID: "1123456",
|
||||
CreatedBy: "foo",
|
||||
Size: 200,
|
||||
},
|
||||
"<missing>baz": {
|
||||
ID: "<missing>",
|
||||
CreatedBy: "baz",
|
||||
Size: 300,
|
||||
},
|
||||
"<missing>1baz": {
|
||||
ID: "<missing>",
|
||||
CreatedBy: "1baz",
|
||||
Size: 400,
|
||||
},
|
||||
},
|
||||
expectedImageToLayerIDs: map[string][]string{
|
||||
"busybox": {"0123456", "<missing>baz"},
|
||||
"kubelet": {"1123456", "<missing>1baz"},
|
||||
},
|
||||
expectedTotalStorageSize: 1000,
|
||||
},
|
||||
{
|
||||
// Use cache value
|
||||
oldLayers: map[string]*dockertypes.ImageHistory{
|
||||
"0123456": {
|
||||
ID: "0123456",
|
||||
CreatedBy: "foo",
|
||||
Size: 100,
|
||||
},
|
||||
"<missing>baz": {
|
||||
ID: "<missing>",
|
||||
CreatedBy: "baz",
|
||||
Size: 300,
|
||||
},
|
||||
},
|
||||
oldImageToLayerIDs: map[string][]string{
|
||||
"busybox": {"0123456", "<missing>baz"},
|
||||
},
|
||||
images: []dockertypes.Image{
|
||||
{
|
||||
ID: "busybox",
|
||||
},
|
||||
{
|
||||
ID: "kubelet",
|
||||
},
|
||||
},
|
||||
history: map[string][]dockertypes.ImageHistory{
|
||||
"busybox": {
|
||||
{
|
||||
ID: "0123456",
|
||||
CreatedBy: "foo",
|
||||
Size: 100,
|
||||
},
|
||||
{
|
||||
ID: "<missing>",
|
||||
CreatedBy: "baz",
|
||||
Size: 300,
|
||||
},
|
||||
},
|
||||
"kubelet": {
|
||||
{
|
||||
ID: "1123456",
|
||||
CreatedBy: "foo",
|
||||
Size: 200,
|
||||
},
|
||||
{
|
||||
ID: "<missing>",
|
||||
CreatedBy: "1baz",
|
||||
Size: 400,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCalls: []string{"list_images", "image_history"},
|
||||
expectedLayers: map[string]*dockertypes.ImageHistory{
|
||||
"0123456": {
|
||||
ID: "0123456",
|
||||
CreatedBy: "foo",
|
||||
Size: 100,
|
||||
},
|
||||
"1123456": {
|
||||
ID: "1123456",
|
||||
CreatedBy: "foo",
|
||||
Size: 200,
|
||||
},
|
||||
"<missing>baz": {
|
||||
ID: "<missing>",
|
||||
CreatedBy: "baz",
|
||||
Size: 300,
|
||||
},
|
||||
"<missing>1baz": {
|
||||
ID: "<missing>",
|
||||
CreatedBy: "1baz",
|
||||
Size: 400,
|
||||
},
|
||||
},
|
||||
expectedImageToLayerIDs: map[string][]string{
|
||||
"busybox": {"0123456", "<missing>baz"},
|
||||
"kubelet": {"1123456", "<missing>1baz"},
|
||||
},
|
||||
expectedTotalStorageSize: 1000,
|
||||
},
|
||||
{
|
||||
// Unused cache value
|
||||
oldLayers: map[string]*dockertypes.ImageHistory{
|
||||
"0123456": {
|
||||
ID: "0123456",
|
||||
CreatedBy: "foo",
|
||||
Size: 100,
|
||||
},
|
||||
"<missing>baz": {
|
||||
ID: "<missing>",
|
||||
CreatedBy: "baz",
|
||||
Size: 300,
|
||||
},
|
||||
},
|
||||
oldImageToLayerIDs: map[string][]string{
|
||||
"busybox": {"0123456", "<missing>baz"},
|
||||
},
|
||||
images: []dockertypes.Image{
|
||||
{
|
||||
ID: "kubelet",
|
||||
},
|
||||
},
|
||||
history: map[string][]dockertypes.ImageHistory{
|
||||
"kubelet": {
|
||||
{
|
||||
ID: "1123456",
|
||||
CreatedBy: "foo",
|
||||
Size: 200,
|
||||
},
|
||||
{
|
||||
ID: "<missing>",
|
||||
CreatedBy: "1baz",
|
||||
Size: 400,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCalls: []string{"list_images", "image_history"},
|
||||
expectedLayers: map[string]*dockertypes.ImageHistory{
|
||||
"1123456": {
|
||||
ID: "1123456",
|
||||
CreatedBy: "foo",
|
||||
Size: 200,
|
||||
},
|
||||
"<missing>1baz": {
|
||||
ID: "<missing>",
|
||||
CreatedBy: "1baz",
|
||||
Size: 400,
|
||||
},
|
||||
},
|
||||
expectedImageToLayerIDs: map[string][]string{
|
||||
"kubelet": {"1123456", "<missing>1baz"},
|
||||
},
|
||||
expectedTotalStorageSize: 600,
|
||||
},
|
||||
} {
|
||||
fakeDockerClient := NewFakeDockerClientWithVersion("1.2.3", "1.2")
|
||||
fakeDockerClient.InjectImages(test.images)
|
||||
fakeDockerClient.InjectImageHistory(test.history)
|
||||
isp := newImageStatsProvider(fakeDockerClient)
|
||||
isp.layers = test.oldLayers
|
||||
isp.imageToLayerIDs = test.oldImageToLayerIDs
|
||||
st, err := isp.ImageStats()
|
||||
as := assert.New(t)
|
||||
as.NoError(err)
|
||||
as.NoError(fakeDockerClient.AssertCalls(test.expectedCalls))
|
||||
as.Equal(test.expectedLayers, isp.layers, "expected %+v, got %+v", test.expectedLayers, isp.layers)
|
||||
as.Equal(test.expectedImageToLayerIDs, isp.imageToLayerIDs, "expected %+v, got %+v", test.expectedImageToLayerIDs, isp.imageToLayerIDs)
|
||||
as.Equal(test.expectedTotalStorageSize, st.TotalStorageBytes, "expected %d, got %d", test.expectedTotalStorageSize, st.TotalStorageBytes)
|
||||
}
|
||||
}
|
242
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/instrumented_docker.go
generated
vendored
Normal file
242
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/instrumented_docker.go
generated
vendored
Normal file
|
@ -0,0 +1,242 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
)
|
||||
|
||||
// instrumentedDockerInterface wraps the DockerInterface and records the operations
|
||||
// and errors metrics.
|
||||
type instrumentedDockerInterface struct {
|
||||
client DockerInterface
|
||||
}
|
||||
|
||||
// Creates an instrumented DockerInterface from an existing DockerInterface.
|
||||
func NewInstrumentedDockerInterface(dockerClient DockerInterface) DockerInterface {
|
||||
return instrumentedDockerInterface{
|
||||
client: dockerClient,
|
||||
}
|
||||
}
|
||||
|
||||
// recordOperation records the duration of the operation.
|
||||
func recordOperation(operation string, start time.Time) {
|
||||
metrics.DockerOperations.WithLabelValues(operation).Inc()
|
||||
metrics.DockerOperationsLatency.WithLabelValues(operation).Observe(metrics.SinceInMicroseconds(start))
|
||||
}
|
||||
|
||||
// recordError records error for metric if an error occurred.
|
||||
func recordError(operation string, err error) {
|
||||
if err != nil {
|
||||
if _, ok := err.(operationTimeout); ok {
|
||||
metrics.DockerOperationsTimeout.WithLabelValues(operation).Inc()
|
||||
}
|
||||
// Docker operation timeout error is also a docker error, so we don't add else here.
|
||||
metrics.DockerOperationsErrors.WithLabelValues(operation).Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) {
|
||||
const operation = "list_containers"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.client.ListContainers(options)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) InspectContainer(id string) (*dockertypes.ContainerJSON, error) {
|
||||
const operation = "inspect_container"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.client.InspectContainer(id)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) CreateContainer(opts dockertypes.ContainerCreateConfig) (*dockertypes.ContainerCreateResponse, error) {
|
||||
const operation = "create_container"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.client.CreateContainer(opts)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) StartContainer(id string) error {
|
||||
const operation = "start_container"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
err := in.client.StartContainer(id)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) StopContainer(id string, timeout int) error {
|
||||
const operation = "stop_container"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
err := in.client.StopContainer(id, timeout)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) RemoveContainer(id string, opts dockertypes.ContainerRemoveOptions) error {
|
||||
const operation = "remove_container"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
err := in.client.RemoveContainer(id, opts)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) InspectImageByRef(image string) (*dockertypes.ImageInspect, error) {
|
||||
const operation = "inspect_image"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.client.InspectImageByRef(image)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) InspectImageByID(image string) (*dockertypes.ImageInspect, error) {
|
||||
const operation = "inspect_image"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.client.InspectImageByID(image)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.Image, error) {
|
||||
const operation = "list_images"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.client.ListImages(opts)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) PullImage(imageID string, auth dockertypes.AuthConfig, opts dockertypes.ImagePullOptions) error {
|
||||
const operation = "pull_image"
|
||||
defer recordOperation(operation, time.Now())
|
||||
err := in.client.PullImage(imageID, auth, opts)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDelete, error) {
|
||||
const operation = "remove_image"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
imageDelete, err := in.client.RemoveImage(image, opts)
|
||||
recordError(operation, err)
|
||||
return imageDelete, err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) Logs(id string, opts dockertypes.ContainerLogsOptions, sopts StreamOptions) error {
|
||||
const operation = "logs"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
err := in.client.Logs(id, opts, sopts)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) Version() (*dockertypes.Version, error) {
|
||||
const operation = "version"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.client.Version()
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) Info() (*dockertypes.Info, error) {
|
||||
const operation = "info"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.client.Info()
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) CreateExec(id string, opts dockertypes.ExecConfig) (*dockertypes.ContainerExecCreateResponse, error) {
|
||||
const operation = "create_exec"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.client.CreateExec(id, opts)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) StartExec(startExec string, opts dockertypes.ExecStartCheck, sopts StreamOptions) error {
|
||||
const operation = "start_exec"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
err := in.client.StartExec(startExec, opts, sopts)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) InspectExec(id string) (*dockertypes.ContainerExecInspect, error) {
|
||||
const operation = "inspect_exec"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.client.InspectExec(id)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) AttachToContainer(id string, opts dockertypes.ContainerAttachOptions, sopts StreamOptions) error {
|
||||
const operation = "attach"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
err := in.client.AttachToContainer(id, opts, sopts)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) ImageHistory(id string) ([]dockertypes.ImageHistory, error) {
|
||||
const operation = "image_history"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.client.ImageHistory(id)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) ResizeExecTTY(id string, height, width int) error {
|
||||
const operation = "resize_exec"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
err := in.client.ResizeExecTTY(id, height, width)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in instrumentedDockerInterface) ResizeContainerTTY(id string, height, width int) error {
|
||||
const operation = "resize_container"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
err := in.client.ResizeContainerTTY(id, height, width)
|
||||
recordError(operation, err)
|
||||
return err
|
||||
}
|
635
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/kube_docker_client.go
generated
vendored
Normal file
635
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/kube_docker_client.go
generated
vendored
Normal file
|
@ -0,0 +1,635 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
dockermessage "github.com/docker/docker/pkg/jsonmessage"
|
||||
dockerstdcopy "github.com/docker/docker/pkg/stdcopy"
|
||||
dockerapi "github.com/docker/engine-api/client"
|
||||
dockertypes "github.com/docker/engine-api/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// kubeDockerClient is a wrapped layer of docker client for kubelet internal use. This layer is added to:
|
||||
// 1) Redirect stream for exec and attach operations.
|
||||
// 2) Wrap the context in this layer to make the DockerInterface cleaner.
|
||||
// 3) Stabilize the DockerInterface. The engine-api is still under active development, the interface
|
||||
// is not stabilized yet. However, the DockerInterface is used in many files in Kubernetes, we may
|
||||
// not want to change the interface frequently. With this layer, we can port the engine api to the
|
||||
// DockerInterface to avoid changing DockerInterface as much as possible.
|
||||
// (See
|
||||
// * https://github.com/docker/engine-api/issues/89
|
||||
// * https://github.com/docker/engine-api/issues/137
|
||||
// * https://github.com/docker/engine-api/pull/140)
|
||||
// TODO(random-liu): Swith to new docker interface by refactoring the functions in the old DockerInterface
|
||||
// one by one.
|
||||
type kubeDockerClient struct {
|
||||
// timeout is the timeout of short running docker operations.
|
||||
timeout time.Duration
|
||||
// If no pulling progress is made before imagePullProgressDeadline, the image pulling will be cancelled.
|
||||
// Docker reports image progress for every 512kB block, so normally there shouldn't be too long interval
|
||||
// between progress updates.
|
||||
imagePullProgressDeadline time.Duration
|
||||
client *dockerapi.Client
|
||||
}
|
||||
|
||||
// Make sure that kubeDockerClient implemented the DockerInterface.
|
||||
var _ DockerInterface = &kubeDockerClient{}
|
||||
|
||||
// There are 2 kinds of docker operations categorized by running time:
|
||||
// * Long running operation: The long running operation could run for arbitrary long time, and the running time
|
||||
// usually depends on some uncontrollable factors. These operations include: PullImage, Logs, StartExec, AttachToContainer.
|
||||
// * Non-long running operation: Given the maximum load of the system, the non-long running operation should finish
|
||||
// in expected and usually short time. These include all other operations.
|
||||
// kubeDockerClient only applies timeout on non-long running operations.
|
||||
const (
|
||||
// defaultTimeout is the default timeout of short running docker operations.
|
||||
defaultTimeout = 2 * time.Minute
|
||||
|
||||
// defaultShmSize is the default ShmSize to use (in bytes) if not specified.
|
||||
defaultShmSize = int64(1024 * 1024 * 64)
|
||||
|
||||
// defaultImagePullingProgressReportInterval is the default interval of image pulling progress reporting.
|
||||
defaultImagePullingProgressReportInterval = 10 * time.Second
|
||||
)
|
||||
|
||||
// newKubeDockerClient creates an kubeDockerClient from an existing docker client. If requestTimeout is 0,
|
||||
// defaultTimeout will be applied.
|
||||
func newKubeDockerClient(dockerClient *dockerapi.Client, requestTimeout, imagePullProgressDeadline time.Duration) DockerInterface {
|
||||
if requestTimeout == 0 {
|
||||
requestTimeout = defaultTimeout
|
||||
}
|
||||
|
||||
k := &kubeDockerClient{
|
||||
client: dockerClient,
|
||||
timeout: requestTimeout,
|
||||
imagePullProgressDeadline: imagePullProgressDeadline,
|
||||
}
|
||||
// Notice that this assumes that docker is running before kubelet is started.
|
||||
v, err := k.Version()
|
||||
if err != nil {
|
||||
glog.Errorf("failed to retrieve docker version: %v", err)
|
||||
glog.Warningf("Using empty version for docker client, this may sometimes cause compatibility issue.")
|
||||
} else {
|
||||
// Update client version with real api version.
|
||||
dockerClient.UpdateClientVersion(v.APIVersion)
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) {
|
||||
ctx, cancel := d.getTimeoutContext()
|
||||
defer cancel()
|
||||
containers, err := d.client.ContainerList(ctx, options)
|
||||
if ctxErr := contextError(ctx); ctxErr != nil {
|
||||
return nil, ctxErr
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return containers, nil
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) InspectContainer(id string) (*dockertypes.ContainerJSON, error) {
|
||||
ctx, cancel := d.getTimeoutContext()
|
||||
defer cancel()
|
||||
containerJSON, err := d.client.ContainerInspect(ctx, id)
|
||||
if ctxErr := contextError(ctx); ctxErr != nil {
|
||||
return nil, ctxErr
|
||||
}
|
||||
if err != nil {
|
||||
if dockerapi.IsErrContainerNotFound(err) {
|
||||
return nil, containerNotFoundError{ID: id}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return &containerJSON, nil
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) CreateContainer(opts dockertypes.ContainerCreateConfig) (*dockertypes.ContainerCreateResponse, error) {
|
||||
ctx, cancel := d.getTimeoutContext()
|
||||
defer cancel()
|
||||
// we provide an explicit default shm size as to not depend on docker daemon.
|
||||
// TODO: evaluate exposing this as a knob in the API
|
||||
if opts.HostConfig != nil && opts.HostConfig.ShmSize <= 0 {
|
||||
opts.HostConfig.ShmSize = defaultShmSize
|
||||
}
|
||||
createResp, err := d.client.ContainerCreate(ctx, opts.Config, opts.HostConfig, opts.NetworkingConfig, opts.Name)
|
||||
if ctxErr := contextError(ctx); ctxErr != nil {
|
||||
return nil, ctxErr
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &createResp, nil
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) StartContainer(id string) error {
|
||||
ctx, cancel := d.getTimeoutContext()
|
||||
defer cancel()
|
||||
err := d.client.ContainerStart(ctx, id)
|
||||
if ctxErr := contextError(ctx); ctxErr != nil {
|
||||
return ctxErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Stopping an already stopped container will not cause an error in engine-v1.
|
||||
func (d *kubeDockerClient) StopContainer(id string, timeout int) error {
|
||||
ctx, cancel := d.getCustomTimeoutContext(time.Duration(timeout) * time.Second)
|
||||
defer cancel()
|
||||
err := d.client.ContainerStop(ctx, id, timeout)
|
||||
if ctxErr := contextError(ctx); ctxErr != nil {
|
||||
return ctxErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) RemoveContainer(id string, opts dockertypes.ContainerRemoveOptions) error {
|
||||
ctx, cancel := d.getTimeoutContext()
|
||||
defer cancel()
|
||||
err := d.client.ContainerRemove(ctx, id, opts)
|
||||
if ctxErr := contextError(ctx); ctxErr != nil {
|
||||
return ctxErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) inspectImageRaw(ref string) (*dockertypes.ImageInspect, error) {
|
||||
ctx, cancel := d.getTimeoutContext()
|
||||
defer cancel()
|
||||
resp, _, err := d.client.ImageInspectWithRaw(ctx, ref, true)
|
||||
if ctxErr := contextError(ctx); ctxErr != nil {
|
||||
return nil, ctxErr
|
||||
}
|
||||
if err != nil {
|
||||
if dockerapi.IsErrImageNotFound(err) {
|
||||
err = imageNotFoundError{ID: ref}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) InspectImageByID(imageID string) (*dockertypes.ImageInspect, error) {
|
||||
resp, err := d.inspectImageRaw(imageID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !matchImageIDOnly(*resp, imageID) {
|
||||
return nil, imageNotFoundError{ID: imageID}
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) InspectImageByRef(imageRef string) (*dockertypes.ImageInspect, error) {
|
||||
resp, err := d.inspectImageRaw(imageRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !matchImageTagOrSHA(*resp, imageRef) {
|
||||
return nil, imageNotFoundError{ID: imageRef}
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) ImageHistory(id string) ([]dockertypes.ImageHistory, error) {
|
||||
ctx, cancel := d.getTimeoutContext()
|
||||
defer cancel()
|
||||
resp, err := d.client.ImageHistory(ctx, id)
|
||||
if ctxErr := contextError(ctx); ctxErr != nil {
|
||||
return nil, ctxErr
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.Image, error) {
|
||||
ctx, cancel := d.getTimeoutContext()
|
||||
defer cancel()
|
||||
images, err := d.client.ImageList(ctx, opts)
|
||||
if ctxErr := contextError(ctx); ctxErr != nil {
|
||||
return nil, ctxErr
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return images, nil
|
||||
}
|
||||
|
||||
func base64EncodeAuth(auth dockertypes.AuthConfig) (string, error) {
|
||||
var buf bytes.Buffer
|
||||
if err := json.NewEncoder(&buf).Encode(auth); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.URLEncoding.EncodeToString(buf.Bytes()), nil
|
||||
}
|
||||
|
||||
// progress is a wrapper of dockermessage.JSONMessage with a lock protecting it.
|
||||
type progress struct {
|
||||
sync.RWMutex
|
||||
// message stores the latest docker json message.
|
||||
message *dockermessage.JSONMessage
|
||||
// timestamp of the latest update.
|
||||
timestamp time.Time
|
||||
}
|
||||
|
||||
func newProgress() *progress {
|
||||
return &progress{timestamp: time.Now()}
|
||||
}
|
||||
|
||||
func (p *progress) set(msg *dockermessage.JSONMessage) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
p.message = msg
|
||||
p.timestamp = time.Now()
|
||||
}
|
||||
|
||||
func (p *progress) get() (string, time.Time) {
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
if p.message == nil {
|
||||
return "No progress", p.timestamp
|
||||
}
|
||||
// The following code is based on JSONMessage.Display
|
||||
var prefix string
|
||||
if p.message.ID != "" {
|
||||
prefix = fmt.Sprintf("%s: ", p.message.ID)
|
||||
}
|
||||
if p.message.Progress == nil {
|
||||
return fmt.Sprintf("%s%s", prefix, p.message.Status), p.timestamp
|
||||
}
|
||||
return fmt.Sprintf("%s%s %s", prefix, p.message.Status, p.message.Progress.String()), p.timestamp
|
||||
}
|
||||
|
||||
// progressReporter keeps the newest image pulling progress and periodically report the newest progress.
|
||||
type progressReporter struct {
|
||||
*progress
|
||||
image string
|
||||
cancel context.CancelFunc
|
||||
stopCh chan struct{}
|
||||
imagePullProgressDeadline time.Duration
|
||||
}
|
||||
|
||||
// newProgressReporter creates a new progressReporter for specific image with specified reporting interval
|
||||
func newProgressReporter(image string, cancel context.CancelFunc, imagePullProgressDeadline time.Duration) *progressReporter {
|
||||
return &progressReporter{
|
||||
progress: newProgress(),
|
||||
image: image,
|
||||
cancel: cancel,
|
||||
stopCh: make(chan struct{}),
|
||||
imagePullProgressDeadline: imagePullProgressDeadline,
|
||||
}
|
||||
}
|
||||
|
||||
// start starts the progressReporter
|
||||
func (p *progressReporter) start() {
|
||||
go func() {
|
||||
ticker := time.NewTicker(defaultImagePullingProgressReportInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
// TODO(random-liu): Report as events.
|
||||
select {
|
||||
case <-ticker.C:
|
||||
progress, timestamp := p.progress.get()
|
||||
// If there is no progress for p.imagePullProgressDeadline, cancel the operation.
|
||||
if time.Now().Sub(timestamp) > p.imagePullProgressDeadline {
|
||||
glog.Errorf("Cancel pulling image %q because of no progress for %v, latest progress: %q", p.image, p.imagePullProgressDeadline, progress)
|
||||
p.cancel()
|
||||
return
|
||||
}
|
||||
glog.V(2).Infof("Pulling image %q: %q", p.image, progress)
|
||||
case <-p.stopCh:
|
||||
progress, _ := p.progress.get()
|
||||
glog.V(2).Infof("Stop pulling image %q: %q", p.image, progress)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// stop stops the progressReporter
|
||||
func (p *progressReporter) stop() {
|
||||
close(p.stopCh)
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) PullImage(image string, auth dockertypes.AuthConfig, opts dockertypes.ImagePullOptions) error {
|
||||
// RegistryAuth is the base64 encoded credentials for the registry
|
||||
base64Auth, err := base64EncodeAuth(auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts.RegistryAuth = base64Auth
|
||||
ctx, cancel := d.getCancelableContext()
|
||||
defer cancel()
|
||||
resp, err := d.client.ImagePull(ctx, image, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Close()
|
||||
reporter := newProgressReporter(image, cancel, d.imagePullProgressDeadline)
|
||||
reporter.start()
|
||||
defer reporter.stop()
|
||||
decoder := json.NewDecoder(resp)
|
||||
for {
|
||||
var msg dockermessage.JSONMessage
|
||||
err := decoder.Decode(&msg)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if msg.Error != nil {
|
||||
return msg.Error
|
||||
}
|
||||
reporter.set(&msg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDelete, error) {
|
||||
ctx, cancel := d.getTimeoutContext()
|
||||
defer cancel()
|
||||
resp, err := d.client.ImageRemove(ctx, image, opts)
|
||||
if ctxErr := contextError(ctx); ctxErr != nil {
|
||||
return nil, ctxErr
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) Logs(id string, opts dockertypes.ContainerLogsOptions, sopts StreamOptions) error {
|
||||
ctx, cancel := d.getCancelableContext()
|
||||
defer cancel()
|
||||
resp, err := d.client.ContainerLogs(ctx, id, opts)
|
||||
if ctxErr := contextError(ctx); ctxErr != nil {
|
||||
return ctxErr
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Close()
|
||||
return d.redirectResponseToOutputStream(sopts.RawTerminal, sopts.OutputStream, sopts.ErrorStream, resp)
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) Version() (*dockertypes.Version, error) {
|
||||
ctx, cancel := d.getTimeoutContext()
|
||||
defer cancel()
|
||||
resp, err := d.client.ServerVersion(ctx)
|
||||
if ctxErr := contextError(ctx); ctxErr != nil {
|
||||
return nil, ctxErr
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) Info() (*dockertypes.Info, error) {
|
||||
ctx, cancel := d.getTimeoutContext()
|
||||
defer cancel()
|
||||
resp, err := d.client.Info(ctx)
|
||||
if ctxErr := contextError(ctx); ctxErr != nil {
|
||||
return nil, ctxErr
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// TODO(random-liu): Add unit test for exec and attach functions, just like what go-dockerclient did.
|
||||
func (d *kubeDockerClient) CreateExec(id string, opts dockertypes.ExecConfig) (*dockertypes.ContainerExecCreateResponse, error) {
|
||||
ctx, cancel := d.getTimeoutContext()
|
||||
defer cancel()
|
||||
resp, err := d.client.ContainerExecCreate(ctx, id, opts)
|
||||
if ctxErr := contextError(ctx); ctxErr != nil {
|
||||
return nil, ctxErr
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) StartExec(startExec string, opts dockertypes.ExecStartCheck, sopts StreamOptions) error {
|
||||
ctx, cancel := d.getCancelableContext()
|
||||
defer cancel()
|
||||
if opts.Detach {
|
||||
err := d.client.ContainerExecStart(ctx, startExec, opts)
|
||||
if ctxErr := contextError(ctx); ctxErr != nil {
|
||||
return ctxErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
resp, err := d.client.ContainerExecAttach(ctx, startExec, dockertypes.ExecConfig{
|
||||
Detach: opts.Detach,
|
||||
Tty: opts.Tty,
|
||||
})
|
||||
if ctxErr := contextError(ctx); ctxErr != nil {
|
||||
return ctxErr
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Close()
|
||||
return d.holdHijackedConnection(sopts.RawTerminal || opts.Tty, sopts.InputStream, sopts.OutputStream, sopts.ErrorStream, resp)
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) InspectExec(id string) (*dockertypes.ContainerExecInspect, error) {
|
||||
ctx, cancel := d.getTimeoutContext()
|
||||
defer cancel()
|
||||
resp, err := d.client.ContainerExecInspect(ctx, id)
|
||||
if ctxErr := contextError(ctx); ctxErr != nil {
|
||||
return nil, ctxErr
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) AttachToContainer(id string, opts dockertypes.ContainerAttachOptions, sopts StreamOptions) error {
|
||||
ctx, cancel := d.getCancelableContext()
|
||||
defer cancel()
|
||||
resp, err := d.client.ContainerAttach(ctx, id, opts)
|
||||
if ctxErr := contextError(ctx); ctxErr != nil {
|
||||
return ctxErr
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Close()
|
||||
return d.holdHijackedConnection(sopts.RawTerminal, sopts.InputStream, sopts.OutputStream, sopts.ErrorStream, resp)
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) ResizeExecTTY(id string, height, width int) error {
|
||||
ctx, cancel := d.getCancelableContext()
|
||||
defer cancel()
|
||||
return d.client.ContainerExecResize(ctx, id, dockertypes.ResizeOptions{
|
||||
Height: height,
|
||||
Width: width,
|
||||
})
|
||||
}
|
||||
|
||||
func (d *kubeDockerClient) ResizeContainerTTY(id string, height, width int) error {
|
||||
ctx, cancel := d.getCancelableContext()
|
||||
defer cancel()
|
||||
return d.client.ContainerResize(ctx, id, dockertypes.ResizeOptions{
|
||||
Height: height,
|
||||
Width: width,
|
||||
})
|
||||
}
|
||||
|
||||
// redirectResponseToOutputStream redirect the response stream to stdout and stderr. When tty is true, all stream will
|
||||
// only be redirected to stdout.
|
||||
func (d *kubeDockerClient) redirectResponseToOutputStream(tty bool, outputStream, errorStream io.Writer, resp io.Reader) error {
|
||||
if outputStream == nil {
|
||||
outputStream = ioutil.Discard
|
||||
}
|
||||
if errorStream == nil {
|
||||
errorStream = ioutil.Discard
|
||||
}
|
||||
var err error
|
||||
if tty {
|
||||
_, err = io.Copy(outputStream, resp)
|
||||
} else {
|
||||
_, err = dockerstdcopy.StdCopy(outputStream, errorStream, resp)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// holdHijackedConnection hold the HijackedResponse, redirect the inputStream to the connection, and redirect the response
|
||||
// stream to stdout and stderr. NOTE: If needed, we could also add context in this function.
|
||||
func (d *kubeDockerClient) holdHijackedConnection(tty bool, inputStream io.Reader, outputStream, errorStream io.Writer, resp dockertypes.HijackedResponse) error {
|
||||
receiveStdout := make(chan error)
|
||||
if outputStream != nil || errorStream != nil {
|
||||
go func() {
|
||||
receiveStdout <- d.redirectResponseToOutputStream(tty, outputStream, errorStream, resp.Reader)
|
||||
}()
|
||||
}
|
||||
|
||||
stdinDone := make(chan struct{})
|
||||
go func() {
|
||||
if inputStream != nil {
|
||||
io.Copy(resp.Conn, inputStream)
|
||||
}
|
||||
resp.CloseWrite()
|
||||
close(stdinDone)
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-receiveStdout:
|
||||
return err
|
||||
case <-stdinDone:
|
||||
if outputStream != nil || errorStream != nil {
|
||||
return <-receiveStdout
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getCancelableContext returns a new cancelable context. For long running requests without timeout, we use cancelable
|
||||
// context to avoid potential resource leak, although the current implementation shouldn't leak resource.
|
||||
func (d *kubeDockerClient) getCancelableContext() (context.Context, context.CancelFunc) {
|
||||
return context.WithCancel(context.Background())
|
||||
}
|
||||
|
||||
// getTimeoutContext returns a new context with default request timeout
|
||||
func (d *kubeDockerClient) getTimeoutContext() (context.Context, context.CancelFunc) {
|
||||
return context.WithTimeout(context.Background(), d.timeout)
|
||||
}
|
||||
|
||||
// getCustomTimeoutContext returns a new context with a specific request timeout
|
||||
func (d *kubeDockerClient) getCustomTimeoutContext(timeout time.Duration) (context.Context, context.CancelFunc) {
|
||||
// Pick the larger of the two
|
||||
if d.timeout > timeout {
|
||||
timeout = d.timeout
|
||||
}
|
||||
return context.WithTimeout(context.Background(), timeout)
|
||||
}
|
||||
|
||||
// ParseDockerTimestamp parses the timestamp returned by DockerInterface from string to time.Time
|
||||
func ParseDockerTimestamp(s string) (time.Time, error) {
|
||||
// Timestamp returned by Docker is in time.RFC3339Nano format.
|
||||
return time.Parse(time.RFC3339Nano, s)
|
||||
}
|
||||
|
||||
// contextError checks the context, and returns error if the context is timeout.
|
||||
func contextError(ctx context.Context) error {
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
return operationTimeout{err: ctx.Err()}
|
||||
}
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
// StreamOptions are the options used to configure the stream redirection
|
||||
type StreamOptions struct {
|
||||
RawTerminal bool
|
||||
InputStream io.Reader
|
||||
OutputStream io.Writer
|
||||
ErrorStream io.Writer
|
||||
}
|
||||
|
||||
// operationTimeout is the error returned when the docker operations are timeout.
|
||||
type operationTimeout struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e operationTimeout) Error() string {
|
||||
return fmt.Sprintf("operation timeout: %v", e.err)
|
||||
}
|
||||
|
||||
// containerNotFoundError is the error returned by InspectContainer when container not found. We
|
||||
// add this error type for testability. We don't use the original error returned by engine-api
|
||||
// because dockertypes.containerNotFoundError is private, we can't create and inject it in our test.
|
||||
type containerNotFoundError struct {
|
||||
ID string
|
||||
}
|
||||
|
||||
func (e containerNotFoundError) Error() string {
|
||||
return fmt.Sprintf("no such container: %q", e.ID)
|
||||
}
|
||||
|
||||
// imageNotFoundError is the error returned by InspectImage when image not found.
|
||||
type imageNotFoundError struct {
|
||||
ID string
|
||||
}
|
||||
|
||||
func (e imageNotFoundError) Error() string {
|
||||
return fmt.Sprintf("no such image: %q", e.ID)
|
||||
}
|
||||
|
||||
// IsImageNotFoundError checks whether the error is image not found error. This is exposed
|
||||
// to share with dockershim.
|
||||
func IsImageNotFoundError(err error) bool {
|
||||
_, ok := err.(imageNotFoundError)
|
||||
return ok
|
||||
}
|
245
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/labels.go
generated
vendored
Normal file
245
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/labels.go
generated
vendored
Normal file
|
@ -0,0 +1,245 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubetypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/custommetrics"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
)
|
||||
|
||||
// This file contains all docker label related constants and functions, including:
|
||||
// * label setters and getters
|
||||
// * label filters (maybe in the future)
|
||||
|
||||
const (
|
||||
kubernetesPodDeletionGracePeriodLabel = "io.kubernetes.pod.deletionGracePeriod"
|
||||
kubernetesPodTerminationGracePeriodLabel = "io.kubernetes.pod.terminationGracePeriod"
|
||||
|
||||
kubernetesContainerHashLabel = "io.kubernetes.container.hash"
|
||||
kubernetesContainerRestartCountLabel = "io.kubernetes.container.restartCount"
|
||||
kubernetesContainerTerminationMessagePathLabel = "io.kubernetes.container.terminationMessagePath"
|
||||
kubernetesContainerPreStopHandlerLabel = "io.kubernetes.container.preStopHandler"
|
||||
kubernetesContainerPortsLabel = "io.kubernetes.container.ports" // Added in 1.4
|
||||
|
||||
// TODO(random-liu): Keep this for old containers, remove this when we drop support for v1.1.
|
||||
kubernetesPodLabel = "io.kubernetes.pod.data"
|
||||
|
||||
cadvisorPrometheusMetricsLabel = "io.cadvisor.metric.prometheus"
|
||||
)
|
||||
|
||||
// Container information which has been labelled on each docker container
|
||||
// TODO(random-liu): The type of Hash should be compliance with kubelet container status.
|
||||
type labelledContainerInfo struct {
|
||||
PodName string
|
||||
PodNamespace string
|
||||
PodUID kubetypes.UID
|
||||
PodDeletionGracePeriod *int64
|
||||
PodTerminationGracePeriod *int64
|
||||
Name string
|
||||
Hash string
|
||||
RestartCount int
|
||||
TerminationMessagePath string
|
||||
PreStopHandler *v1.Handler
|
||||
Ports []v1.ContainerPort
|
||||
}
|
||||
|
||||
func newLabels(container *v1.Container, pod *v1.Pod, restartCount int, enableCustomMetrics bool) map[string]string {
|
||||
labels := map[string]string{}
|
||||
labels[types.KubernetesPodNameLabel] = pod.Name
|
||||
labels[types.KubernetesPodNamespaceLabel] = pod.Namespace
|
||||
labels[types.KubernetesPodUIDLabel] = string(pod.UID)
|
||||
if pod.DeletionGracePeriodSeconds != nil {
|
||||
labels[kubernetesPodDeletionGracePeriodLabel] = strconv.FormatInt(*pod.DeletionGracePeriodSeconds, 10)
|
||||
}
|
||||
if pod.Spec.TerminationGracePeriodSeconds != nil {
|
||||
labels[kubernetesPodTerminationGracePeriodLabel] = strconv.FormatInt(*pod.Spec.TerminationGracePeriodSeconds, 10)
|
||||
}
|
||||
|
||||
labels[types.KubernetesContainerNameLabel] = container.Name
|
||||
labels[kubernetesContainerHashLabel] = strconv.FormatUint(kubecontainer.HashContainer(container), 16)
|
||||
labels[kubernetesContainerRestartCountLabel] = strconv.Itoa(restartCount)
|
||||
labels[kubernetesContainerTerminationMessagePathLabel] = container.TerminationMessagePath
|
||||
if container.Lifecycle != nil && container.Lifecycle.PreStop != nil {
|
||||
// Using json enconding so that the PreStop handler object is readable after writing as a label
|
||||
rawPreStop, err := json.Marshal(container.Lifecycle.PreStop)
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to marshal lifecycle PreStop handler for container %q of pod %q: %v", container.Name, format.Pod(pod), err)
|
||||
} else {
|
||||
labels[kubernetesContainerPreStopHandlerLabel] = string(rawPreStop)
|
||||
}
|
||||
}
|
||||
if len(container.Ports) > 0 {
|
||||
rawContainerPorts, err := json.Marshal(container.Ports)
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to marshal container ports for container %q for pod %q: %v", container.Name, format.Pod(pod), err)
|
||||
} else {
|
||||
labels[kubernetesContainerPortsLabel] = string(rawContainerPorts)
|
||||
}
|
||||
}
|
||||
if enableCustomMetrics {
|
||||
path, err := custommetrics.GetCAdvisorCustomMetricsDefinitionPath(container)
|
||||
if path != nil && err == nil {
|
||||
labels[cadvisorPrometheusMetricsLabel] = *path
|
||||
}
|
||||
}
|
||||
|
||||
return labels
|
||||
}
|
||||
|
||||
func getContainerInfoFromLabel(labels map[string]string) *labelledContainerInfo {
|
||||
var err error
|
||||
containerInfo := &labelledContainerInfo{
|
||||
PodName: getStringValueFromLabel(labels, types.KubernetesPodNameLabel),
|
||||
PodNamespace: getStringValueFromLabel(labels, types.KubernetesPodNamespaceLabel),
|
||||
PodUID: kubetypes.UID(getStringValueFromLabel(labels, types.KubernetesPodUIDLabel)),
|
||||
Name: getStringValueFromLabel(labels, types.KubernetesContainerNameLabel),
|
||||
Hash: getStringValueFromLabel(labels, kubernetesContainerHashLabel),
|
||||
TerminationMessagePath: getStringValueFromLabel(labels, kubernetesContainerTerminationMessagePathLabel),
|
||||
}
|
||||
if containerInfo.RestartCount, err = getIntValueFromLabel(labels, kubernetesContainerRestartCountLabel); err != nil {
|
||||
logError(containerInfo, kubernetesContainerRestartCountLabel, err)
|
||||
}
|
||||
if containerInfo.PodDeletionGracePeriod, err = getInt64PointerFromLabel(labels, kubernetesPodDeletionGracePeriodLabel); err != nil {
|
||||
logError(containerInfo, kubernetesPodDeletionGracePeriodLabel, err)
|
||||
}
|
||||
if containerInfo.PodTerminationGracePeriod, err = getInt64PointerFromLabel(labels, kubernetesPodTerminationGracePeriodLabel); err != nil {
|
||||
logError(containerInfo, kubernetesPodTerminationGracePeriodLabel, err)
|
||||
}
|
||||
preStopHandler := &v1.Handler{}
|
||||
if found, err := getJsonObjectFromLabel(labels, kubernetesContainerPreStopHandlerLabel, preStopHandler); err != nil {
|
||||
logError(containerInfo, kubernetesContainerPreStopHandlerLabel, err)
|
||||
} else if found {
|
||||
containerInfo.PreStopHandler = preStopHandler
|
||||
}
|
||||
containerPorts := []v1.ContainerPort{}
|
||||
if found, err := getJsonObjectFromLabel(labels, kubernetesContainerPortsLabel, &containerPorts); err != nil {
|
||||
logError(containerInfo, kubernetesContainerPortsLabel, err)
|
||||
} else if found {
|
||||
containerInfo.Ports = containerPorts
|
||||
}
|
||||
supplyContainerInfoWithOldLabel(labels, containerInfo)
|
||||
return containerInfo
|
||||
}
|
||||
|
||||
func getStringValueFromLabel(labels map[string]string, label string) string {
|
||||
if value, found := labels[label]; found {
|
||||
return value
|
||||
}
|
||||
// Do not report error, because there should be many old containers without label now.
|
||||
glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label)
|
||||
// Return empty string "" for these containers, the caller will get value by other ways.
|
||||
return ""
|
||||
}
|
||||
|
||||
func getIntValueFromLabel(labels map[string]string, label string) (int, error) {
|
||||
if strValue, found := labels[label]; found {
|
||||
intValue, err := strconv.Atoi(strValue)
|
||||
if err != nil {
|
||||
// This really should not happen. Just set value to 0 to handle this abnormal case
|
||||
return 0, err
|
||||
}
|
||||
return intValue, nil
|
||||
}
|
||||
// Do not report error, because there should be many old containers without label now.
|
||||
glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label)
|
||||
// Just set the value to 0
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func getInt64PointerFromLabel(labels map[string]string, label string) (*int64, error) {
|
||||
if strValue, found := labels[label]; found {
|
||||
int64Value, err := strconv.ParseInt(strValue, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &int64Value, nil
|
||||
}
|
||||
// Because it's normal that a container has no PodDeletionGracePeriod and PodTerminationGracePeriod label,
|
||||
// don't report any error here.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// getJsonObjectFromLabel returns a bool value indicating whether an object is found
|
||||
func getJsonObjectFromLabel(labels map[string]string, label string, value interface{}) (bool, error) {
|
||||
if strValue, found := labels[label]; found {
|
||||
err := json.Unmarshal([]byte(strValue), value)
|
||||
return found, err
|
||||
}
|
||||
// Because it's normal that a container has no PreStopHandler label, don't report any error here.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// The label kubernetesPodLabel is added a long time ago (#7421), it serialized the whole v1.Pod to a docker label.
|
||||
// We want to remove this label because it serialized too much useless information. However kubelet may still work
|
||||
// with old containers which only have this label for a long time until we completely deprecate the old label.
|
||||
// Before that to ensure correctness we have to supply information with the old labels when newly added labels
|
||||
// are not available.
|
||||
// TODO(random-liu): Remove this function when we can completely remove label kubernetesPodLabel, probably after
|
||||
// dropping support for v1.1.
|
||||
func supplyContainerInfoWithOldLabel(labels map[string]string, containerInfo *labelledContainerInfo) {
|
||||
// Get v1.Pod from old label
|
||||
var pod *v1.Pod
|
||||
data, found := labels[kubernetesPodLabel]
|
||||
if !found {
|
||||
// Don't report any error here, because it's normal that a container has no pod label, especially
|
||||
// when we gradually deprecate the old label
|
||||
return
|
||||
}
|
||||
pod = &v1.Pod{}
|
||||
if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), []byte(data), pod); err != nil {
|
||||
// If the pod label can't be parsed, we should report an error
|
||||
logError(containerInfo, kubernetesPodLabel, err)
|
||||
return
|
||||
}
|
||||
if containerInfo.PodDeletionGracePeriod == nil {
|
||||
containerInfo.PodDeletionGracePeriod = pod.DeletionGracePeriodSeconds
|
||||
}
|
||||
if containerInfo.PodTerminationGracePeriod == nil {
|
||||
containerInfo.PodTerminationGracePeriod = pod.Spec.TerminationGracePeriodSeconds
|
||||
}
|
||||
|
||||
// Get v1.Container from v1.Pod
|
||||
var container *v1.Container
|
||||
for i := range pod.Spec.Containers {
|
||||
if pod.Spec.Containers[i].Name == containerInfo.Name {
|
||||
container = &pod.Spec.Containers[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if container == nil {
|
||||
glog.Errorf("Unable to find container %q in pod %q", containerInfo.Name, format.Pod(pod))
|
||||
return
|
||||
}
|
||||
if containerInfo.PreStopHandler == nil && container.Lifecycle != nil {
|
||||
containerInfo.PreStopHandler = container.Lifecycle.PreStop
|
||||
}
|
||||
}
|
||||
|
||||
func logError(containerInfo *labelledContainerInfo, label string, err error) {
|
||||
glog.Errorf("Unable to get %q for container %q of pod %q: %v", label, containerInfo.Name,
|
||||
kubecontainer.BuildPodFullName(containerInfo.PodName, containerInfo.PodNamespace), err)
|
||||
}
|
141
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/labels_test.go
generated
vendored
Normal file
141
vendor/k8s.io/kubernetes/pkg/kubelet/dockertools/labels_test.go
generated
vendored
Normal file
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
)
|
||||
|
||||
func TestLabels(t *testing.T) {
|
||||
restartCount := 5
|
||||
deletionGracePeriod := int64(10)
|
||||
terminationGracePeriod := int64(10)
|
||||
lifecycle := &v1.Lifecycle{
|
||||
// Left PostStart as nil
|
||||
PreStop: &v1.Handler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: []string{"action1", "action2"},
|
||||
},
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "path",
|
||||
Host: "host",
|
||||
Port: intstr.FromInt(8080),
|
||||
Scheme: "scheme",
|
||||
},
|
||||
TCPSocket: &v1.TCPSocketAction{
|
||||
Port: intstr.FromString("80"),
|
||||
},
|
||||
},
|
||||
}
|
||||
containerPorts := []v1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
HostPort: 80,
|
||||
ContainerPort: 8080,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
Name: "https",
|
||||
HostPort: 443,
|
||||
ContainerPort: 6443,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
}
|
||||
container := &v1.Container{
|
||||
Name: "test_container",
|
||||
Ports: containerPorts,
|
||||
TerminationMessagePath: "/somepath",
|
||||
Lifecycle: lifecycle,
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "test_pod",
|
||||
Namespace: "test_pod_namespace",
|
||||
UID: "test_pod_uid",
|
||||
DeletionGracePeriodSeconds: &deletionGracePeriod,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{*container},
|
||||
TerminationGracePeriodSeconds: &terminationGracePeriod,
|
||||
},
|
||||
}
|
||||
expected := &labelledContainerInfo{
|
||||
PodName: pod.Name,
|
||||
PodNamespace: pod.Namespace,
|
||||
PodUID: pod.UID,
|
||||
PodDeletionGracePeriod: pod.DeletionGracePeriodSeconds,
|
||||
PodTerminationGracePeriod: pod.Spec.TerminationGracePeriodSeconds,
|
||||
Name: container.Name,
|
||||
Hash: strconv.FormatUint(kubecontainer.HashContainer(container), 16),
|
||||
RestartCount: restartCount,
|
||||
TerminationMessagePath: container.TerminationMessagePath,
|
||||
PreStopHandler: container.Lifecycle.PreStop,
|
||||
Ports: containerPorts,
|
||||
}
|
||||
|
||||
// Test whether we can get right information from label
|
||||
labels := newLabels(container, pod, restartCount, false)
|
||||
containerInfo := getContainerInfoFromLabel(labels)
|
||||
if !reflect.DeepEqual(containerInfo, expected) {
|
||||
t.Errorf("expected %v, got %v", expected, containerInfo)
|
||||
}
|
||||
|
||||
// Test when DeletionGracePeriodSeconds, TerminationGracePeriodSeconds and Lifecycle are nil,
|
||||
// the information got from label should also be nil
|
||||
container.Lifecycle = nil
|
||||
pod.DeletionGracePeriodSeconds = nil
|
||||
pod.Spec.TerminationGracePeriodSeconds = nil
|
||||
expected.PodDeletionGracePeriod = nil
|
||||
expected.PodTerminationGracePeriod = nil
|
||||
expected.PreStopHandler = nil
|
||||
// Because container is changed, the Hash should be updated
|
||||
expected.Hash = strconv.FormatUint(kubecontainer.HashContainer(container), 16)
|
||||
labels = newLabels(container, pod, restartCount, false)
|
||||
containerInfo = getContainerInfoFromLabel(labels)
|
||||
if !reflect.DeepEqual(containerInfo, expected) {
|
||||
t.Errorf("expected %v, got %v", expected, containerInfo)
|
||||
}
|
||||
|
||||
// Test when DeletionGracePeriodSeconds, TerminationGracePeriodSeconds and Lifecycle are nil,
|
||||
// but the old label kubernetesPodLabel is set, the information got from label should also be set
|
||||
pod.DeletionGracePeriodSeconds = &deletionGracePeriod
|
||||
pod.Spec.TerminationGracePeriodSeconds = &terminationGracePeriod
|
||||
container.Lifecycle = lifecycle
|
||||
data, err := runtime.Encode(testapi.Default.Codec(), pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to encode pod %q into string: %v", format.Pod(pod), err)
|
||||
}
|
||||
labels[kubernetesPodLabel] = string(data)
|
||||
expected.PodDeletionGracePeriod = pod.DeletionGracePeriodSeconds
|
||||
expected.PodTerminationGracePeriod = pod.Spec.TerminationGracePeriodSeconds
|
||||
expected.PreStopHandler = container.Lifecycle.PreStop
|
||||
// Do not update expected.Hash here, because we directly use the labels in last test, so we never
|
||||
// changed the kubernetesContainerHashLabel in this test, the expected.Hash shouldn't be changed.
|
||||
containerInfo = getContainerInfoFromLabel(labels)
|
||||
if !reflect.DeepEqual(containerInfo, expected) {
|
||||
t.Errorf("expected %v, got %v", expected, containerInfo)
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue