Vendor: Update k8s version

Signed-off-by: Michał Żyłowski <michal.zylowski@intel.com>
This commit is contained in:
Michał Żyłowski 2017-02-03 14:41:32 +01:00
parent dfa93414c5
commit 52baf68d50
3756 changed files with 113013 additions and 92675 deletions

View file

@ -47,15 +47,15 @@ go_library(
"//pkg/kubelet/util/format:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//pkg/securitycontext:go_default_library",
"//pkg/util/clock:go_default_library",
"//pkg/util/exec:go_default_library",
"//pkg/util/flowcontrol:go_default_library",
"//pkg/util/oom:go_default_library",
"//pkg/util/procfs:go_default_library",
"//pkg/util/selinux:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/util/tail:go_default_library",
"//pkg/util/term:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor:github.com/armon/circbuf",
"//vendor:github.com/docker/distribution/digest",
"//vendor:github.com/docker/distribution/reference",
"//vendor:github.com/docker/docker/pkg/jsonmessage",
@ -76,6 +76,8 @@ go_library(
"//vendor:k8s.io/apimachinery/pkg/util/errors",
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
"//vendor:k8s.io/apimachinery/pkg/util/sets",
"//vendor:k8s.io/client-go/util/clock",
"//vendor:k8s.io/client-go/util/flowcontrol",
],
)
@ -116,9 +118,7 @@ go_test(
"//pkg/kubelet/types:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//pkg/util/clock:go_default_library",
"//pkg/util/exec:go_default_library",
"//pkg/util/flowcontrol:go_default_library",
"//pkg/util/hash:go_default_library",
"//pkg/util/intstr:go_default_library",
"//pkg/util/strings:go_default_library",
@ -130,9 +130,12 @@ go_test(
"//vendor:github.com/golang/mock/gomock",
"//vendor:github.com/google/cadvisor/info/v1",
"//vendor:github.com/stretchr/testify/assert",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/runtime",
"//vendor:k8s.io/apimachinery/pkg/types",
"//vendor:k8s.io/apimachinery/pkg/util/sets",
"//vendor:k8s.io/client-go/util/clock",
"//vendor:k8s.io/client-go/util/flowcontrol",
],
)

View file

@ -24,13 +24,14 @@ import (
"time"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/api/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
func newTestContainerGC(t *testing.T) (*containerGC, *FakeDockerClient) {
fakeDocker := new(FakeDockerClient)
fakeDocker := NewFakeDockerClient()
fakePodGetter := newFakePodGetter()
gc := NewContainerGC(fakeDocker, fakePodGetter, "")
return gc, fakeDocker
@ -66,7 +67,7 @@ func addPods(podGetter podGetter, podUIDs ...types.UID) {
fakePodGetter := podGetter.(*fakePodGetter)
for _, uid := range podUIDs {
fakePodGetter.pods[uid] = &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uid),
Namespace: "test",
UID: uid,

View file

@ -33,6 +33,7 @@ import (
"sync"
"time"
"github.com/armon/circbuf"
dockertypes "github.com/docker/engine-api/types"
dockercontainer "github.com/docker/engine-api/types/container"
dockerstrslice "github.com/docker/engine-api/types/strslice"
@ -47,6 +48,7 @@ import (
kubetypes "k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
@ -65,11 +67,11 @@ import (
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/security/apparmor"
"k8s.io/kubernetes/pkg/securitycontext"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/procfs"
"k8s.io/kubernetes/pkg/util/selinux"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/util/tail"
"k8s.io/kubernetes/pkg/util/term"
utilversion "k8s.io/kubernetes/pkg/util/version"
)
@ -139,9 +141,6 @@ type DockerManager struct {
// wrapped image puller.
imagePuller images.ImageManager
// Root of the Docker runtime.
dockerRoot string
// cgroup driver used by Docker runtime.
cgroupDriver string
@ -238,10 +237,6 @@ func NewDockerManager(
// Wrap the docker client with instrumentedDockerInterface
client = NewInstrumentedDockerInterface(client)
// Work out the location of the Docker runtime, defaulting to /var/lib/docker
// if there are any problems.
dockerRoot := "/var/lib/docker"
// cgroup driver is only detectable in docker 1.11+
// when the execution driver is not detectable, we provide the cgroupfs form.
// if your docker engine is configured to use the systemd cgroup driver, and you
@ -252,11 +247,7 @@ func NewDockerManager(
dockerInfo, err := client.Info()
if err != nil {
glog.Errorf("Failed to execute Info() call to the Docker client: %v", err)
glog.Warningf("Using fallback default of /var/lib/docker for location of Docker runtime")
} else {
dockerRoot = dockerInfo.DockerRootDir
glog.Infof("Setting dockerRoot to %s", dockerRoot)
cgroupDriver = dockerInfo.CgroupDriver
glog.Infof("Setting cgroupDriver to %s", cgroupDriver)
}
@ -269,7 +260,6 @@ func NewDockerManager(
machineInfo: machineInfo,
podInfraContainerImage: podInfraContainerImage,
dockerPuller: newDockerPuller(client),
dockerRoot: dockerRoot,
cgroupDriver: cgroupDriver,
containerLogsDir: containerLogsDir,
networkPlugin: networkPlugin,
@ -482,19 +472,12 @@ func (dm *DockerManager) inspectContainer(id string, podName, podNamespace strin
startedAt = createdAt
}
terminationMessagePath := containerInfo.TerminationMessagePath
if terminationMessagePath != "" {
for _, mount := range iResult.Mounts {
if mount.Destination == terminationMessagePath {
path := mount.Source
if data, err := ioutil.ReadFile(path); err != nil {
message = fmt.Sprintf("Error on reading termination-log %s: %v", path, err)
} else {
message = string(data)
}
}
}
// retrieve the termination message from logs, file, or file with fallback to logs in case of failure
fallbackToLogs := containerInfo.TerminationMessagePolicy == v1.TerminationMessageFallbackToLogsOnError && (iResult.State.ExitCode != 0 || iResult.State.OOMKilled)
if msg := getTerminationMessage(dm.c, iResult, containerInfo.TerminationMessagePath, fallbackToLogs); len(msg) > 0 {
message = msg
}
status.State = kubecontainer.ContainerStateExited
status.Message = message
status.Reason = reason
@ -508,6 +491,49 @@ func (dm *DockerManager) inspectContainer(id string, podName, podNamespace strin
return &status, "", nil
}
func getTerminationMessage(c DockerInterface, iResult *dockertypes.ContainerJSON, terminationMessagePath string, fallbackToLogs bool) string {
if len(terminationMessagePath) != 0 {
for _, mount := range iResult.Mounts {
if mount.Destination != terminationMessagePath {
continue
}
path := mount.Source
data, _, err := tail.ReadAtMost(path, kubecontainer.MaxContainerTerminationMessageLength)
if err != nil {
return fmt.Sprintf("Error on reading termination log %s: %v", path, err)
}
if !fallbackToLogs || len(data) != 0 {
return string(data)
}
}
}
if !fallbackToLogs {
return ""
}
return readLastStringFromContainerLogs(c, iResult.Name)
}
// readLastStringFromContainerLogs attempts to a certain amount from the end of the logs for containerName.
// It will attempt to avoid reading excessive logs from the server, which may result in underestimating the amount
// of logs to fetch (such that the length of the response message is < max).
func readLastStringFromContainerLogs(c DockerInterface, containerName string) string {
logOptions := dockertypes.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
}
buf, _ := circbuf.NewBuffer(kubecontainer.MaxContainerTerminationMessageLogLength)
streamOptions := StreamOptions{
ErrorStream: buf,
OutputStream: buf,
}
logOptions.Tail = strconv.FormatInt(kubecontainer.MaxContainerTerminationMessageLogLines, 10)
if err := c.Logs(containerName, logOptions, streamOptions); err != nil {
return fmt.Sprintf("Error on reading termination message from logs: %v", err)
}
return buf.String()
}
// makeEnvList converts EnvVar list to a list of strings, in the form of
// '<key>=<value>', which can be understood by docker.
func makeEnvList(envs []kubecontainer.EnvVar) (result []string) {
@ -672,17 +698,24 @@ func (dm *DockerManager) runContainer(
fs, err := os.Create(containerLogPath)
if err != nil {
// TODO: Clean up the previously created dir? return the error?
glog.Errorf("Error on creating termination-log file %q: %v", containerLogPath, err)
utilruntime.HandleError(fmt.Errorf("error creating termination-log file %q: %v", containerLogPath, err))
} else {
fs.Close() // Close immediately; we're just doing a `touch` here
b := fmt.Sprintf("%s:%s", containerLogPath, container.TerminationMessagePath)
// Chmod is needed because ioutil.WriteFile() ends up calling
// open(2) to create the file, so the final mode used is "mode &
// ~umask". But we want to make sure the specified mode is used
// in the file no matter what the umask is.
if err := os.Chmod(containerLogPath, 0666); err != nil {
utilruntime.HandleError(fmt.Errorf("unable to set termination-log file permissions %q: %v", containerLogPath, err))
}
// Have docker relabel the termination log path if SELinux is
// enabled.
b := fmt.Sprintf("%s:%s", containerLogPath, container.TerminationMessagePath)
if selinux.SELinuxEnabled() {
b += ":Z"
}
binds = append(binds, b)
}
}

View file

@ -27,6 +27,7 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -282,7 +283,7 @@ func TestCreateAppArmorContanier(t *testing.T) {
dm.recorder = recorder
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
@ -417,7 +418,7 @@ func TestGetPodStatusFromNetworkPlugin(t *testing.T) {
}{
{
pod: &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
@ -435,7 +436,7 @@ func TestGetPodStatusFromNetworkPlugin(t *testing.T) {
},
{
pod: &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",

View file

@ -17,6 +17,7 @@ limitations under the License.
package dockertools
import (
"flag"
"fmt"
"io/ioutil"
"net"
@ -36,9 +37,12 @@ import (
"github.com/golang/mock/gomock"
cadvisorapi "github.com/google/cadvisor/info/v1"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
kubetypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/clock"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/v1"
@ -52,12 +56,25 @@ import (
nettest "k8s.io/kubernetes/pkg/kubelet/network/testing"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/util/clock"
uexec "k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/intstr"
)
var testTempDir string
func TestMain(m *testing.M) {
dir, err := ioutil.TempDir("", "dockertools")
if err != nil {
panic(err)
}
testTempDir = dir
flag.Parse()
status := m.Run()
os.RemoveAll(testTempDir)
os.Exit(status)
}
type fakeHTTP struct {
url string
err error
@ -80,7 +97,7 @@ func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *
var opts kubecontainer.RunContainerOptions
var err error
if len(container.TerminationMessagePath) != 0 {
testPodContainerDir, err = ioutil.TempDir("", "fooPodContainerDir")
testPodContainerDir, err = ioutil.TempDir(testTempDir, "fooPodContainerDir")
if err != nil {
return nil, err
}
@ -164,7 +181,7 @@ func newTestDockerManagerWithHTTPClient(fakeHTTPClient *fakeHTTP) (*DockerManage
}
func newTestDockerManagerWithVersion(version, apiVersion string) (*DockerManager, *FakeDockerClient) {
fakeDocker := NewFakeDockerClientWithVersion(version, apiVersion)
fakeDocker := NewFakeDockerClient().WithVersion(version, apiVersion)
return createTestDockerManagerWithFakeImageManager(nil, fakeDocker)
}
@ -1918,7 +1935,7 @@ func makePod(name string, spec *v1.PodSpec) *v1.Pod {
spec = &v1.PodSpec{Containers: []v1.Container{{Name: "foo"}, {Name: "bar"}}}
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: name,
Namespace: "new",

View file

@ -53,6 +53,7 @@ func (*NsenterExecHandler) ExecInContainer(client DockerInterface, container *do
args = append(args, container.Config.Env...)
args = append(args, cmd...)
command := exec.Command(nsenter, args...)
var cmdErr error
if tty {
p, err := kubecontainer.StartPty(command)
if err != nil {
@ -75,7 +76,7 @@ func (*NsenterExecHandler) ExecInContainer(client DockerInterface, container *do
go io.Copy(stdout, p)
}
err = command.Wait()
cmdErr = command.Wait()
} else {
if stdin != nil {
// Use an os.Pipe here as it returns true *os.File objects.
@ -97,13 +98,13 @@ func (*NsenterExecHandler) ExecInContainer(client DockerInterface, container *do
command.Stderr = stderr
}
err = command.Run()
cmdErr = command.Run()
}
if exitErr, ok := err.(*exec.ExitError); ok {
if exitErr, ok := cmdErr.(*exec.ExitError); ok {
return &utilexec.ExitErrorWrapper{ExitError: exitErr}
}
return err
return cmdErr
}
// NativeExecHandler executes commands in Docker containers using Docker's exec API.

View file

@ -28,7 +28,7 @@ import (
dockertypes "github.com/docker/engine-api/types"
dockercontainer "github.com/docker/engine-api/types/container"
"k8s.io/kubernetes/pkg/util/clock"
"k8s.io/client-go/util/clock"
"k8s.io/kubernetes/pkg/api/v1"
)
@ -55,8 +55,9 @@ type FakeDockerClient struct {
Errors map[string]error
called []calledDetail
pulled []string
EnableTrace bool
// Created, Stopped and Removed all container docker ID
// Created, Started, Stopped and Removed all contain container docker ID
Created []string
Started []string
Stopped []string
@ -74,25 +75,64 @@ type FakeDockerClient struct {
const fakeDockerVersion = "1.8.1"
func NewFakeDockerClient() *FakeDockerClient {
return NewFakeDockerClientWithVersion(fakeDockerVersion, minimumDockerAPIVersion)
}
func NewFakeDockerClientWithClock(c clock.Clock) *FakeDockerClient {
return newClientWithVersionAndClock(fakeDockerVersion, minimumDockerAPIVersion, c)
}
func NewFakeDockerClientWithVersion(version, apiVersion string) *FakeDockerClient {
return newClientWithVersionAndClock(version, apiVersion, clock.RealClock{})
}
func newClientWithVersionAndClock(version, apiVersion string, c clock.Clock) *FakeDockerClient {
return &FakeDockerClient{
VersionInfo: dockertypes.Version{Version: version, APIVersion: apiVersion},
VersionInfo: dockertypes.Version{Version: fakeDockerVersion, APIVersion: minimumDockerAPIVersion},
Errors: make(map[string]error),
ContainerMap: make(map[string]*dockertypes.ContainerJSON),
Clock: c,
Clock: clock.RealClock{},
// default this to an empty result, so that we never have a nil non-error response from InspectImage
Image: &dockertypes.ImageInspect{},
// default this to true, so that we trace calls, image pulls and container lifecycle
EnableTrace: true,
}
}
func (f *FakeDockerClient) WithClock(c clock.Clock) *FakeDockerClient {
f.Lock()
defer f.Unlock()
f.Clock = c
return f
}
func (f *FakeDockerClient) WithVersion(version, apiVersion string) *FakeDockerClient {
f.Lock()
defer f.Unlock()
f.VersionInfo = dockertypes.Version{Version: version, APIVersion: apiVersion}
return f
}
func (f *FakeDockerClient) WithTraceDisabled() *FakeDockerClient {
f.Lock()
defer f.Unlock()
f.EnableTrace = false
return f
}
func (f *FakeDockerClient) appendCalled(callDetail calledDetail) {
if f.EnableTrace {
f.called = append(f.called, callDetail)
}
}
func (f *FakeDockerClient) appendPulled(pull string) {
if f.EnableTrace {
f.pulled = append(f.pulled, pull)
}
}
func (f *FakeDockerClient) appendContainerTrace(traceCategory string, containerName string) {
if !f.EnableTrace {
return
}
switch traceCategory {
case "Created":
f.Created = append(f.Created, containerName)
case "Started":
f.Started = append(f.Started, containerName)
case "Stopped":
f.Stopped = append(f.Stopped, containerName)
case "Removed":
f.Removed = append(f.Removed, containerName)
}
}
@ -120,9 +160,10 @@ func (f *FakeDockerClient) ClearCalls() {
f.Lock()
defer f.Unlock()
f.called = []calledDetail{}
f.Stopped = []string{}
f.pulled = []string{}
f.Created = []string{}
f.Started = []string{}
f.Stopped = []string{}
f.Removed = []string{}
}
@ -270,6 +311,17 @@ func (f *FakeDockerClient) AssertStopped(stopped []string) error {
return nil
}
func (f *FakeDockerClient) AssertRemoved(removed []string) error {
f.Lock()
defer f.Unlock()
sort.StringSlice(removed).Sort()
sort.StringSlice(f.Removed).Sort()
if !reflect.DeepEqual(removed, f.Removed) {
return fmt.Errorf("expected %#v, got %#v", removed, f.Removed)
}
return nil
}
func (f *FakeDockerClient) popError(op string) error {
if f.Errors == nil {
return nil
@ -288,7 +340,7 @@ func (f *FakeDockerClient) popError(op string) error {
func (f *FakeDockerClient) ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) {
f.Lock()
defer f.Unlock()
f.called = append(f.called, calledDetail{name: "list"})
f.appendCalled(calledDetail{name: "list"})
err := f.popError("list")
containerList := append([]dockertypes.Container{}, f.RunningContainerList...)
if options.All {
@ -305,7 +357,7 @@ func (f *FakeDockerClient) ListContainers(options dockertypes.ContainerListOptio
func (f *FakeDockerClient) InspectContainer(id string) (*dockertypes.ContainerJSON, error) {
f.Lock()
defer f.Unlock()
f.called = append(f.called, calledDetail{name: "inspect_container"})
f.appendCalled(calledDetail{name: "inspect_container"})
err := f.popError("inspect_container")
if container, ok := f.ContainerMap[id]; ok {
return container, err
@ -322,7 +374,7 @@ func (f *FakeDockerClient) InspectContainer(id string) (*dockertypes.ContainerJS
func (f *FakeDockerClient) InspectImageByRef(name string) (*dockertypes.ImageInspect, error) {
f.Lock()
defer f.Unlock()
f.called = append(f.called, calledDetail{name: "inspect_image"})
f.appendCalled(calledDetail{name: "inspect_image"})
err := f.popError("inspect_image")
return f.Image, err
}
@ -332,7 +384,7 @@ func (f *FakeDockerClient) InspectImageByRef(name string) (*dockertypes.ImageIns
func (f *FakeDockerClient) InspectImageByID(name string) (*dockertypes.ImageInspect, error) {
f.Lock()
defer f.Unlock()
f.called = append(f.called, calledDetail{name: "inspect_image"})
f.appendCalled(calledDetail{name: "inspect_image"})
err := f.popError("inspect_image")
return f.Image, err
}
@ -356,7 +408,7 @@ func (f *FakeDockerClient) normalSleep(mean, stdDev, cutOffMillis int) {
func (f *FakeDockerClient) CreateContainer(c dockertypes.ContainerCreateConfig) (*dockertypes.ContainerCreateResponse, error) {
f.Lock()
defer f.Unlock()
f.called = append(f.called, calledDetail{name: "create"})
f.appendCalled(calledDetail{name: "create"})
if err := f.popError("create"); err != nil {
return nil, err
}
@ -364,7 +416,7 @@ func (f *FakeDockerClient) CreateContainer(c dockertypes.ContainerCreateConfig)
// Docker likes to add a '/', so copy that behavior.
name := "/" + c.Name
id := name
f.Created = append(f.Created, name)
f.appendContainerTrace("Created", name)
// The newest container should be in front, because we assume so in GetPodStatus()
f.RunningContainerList = append([]dockertypes.Container{
{ID: name, Names: []string{name}, Image: c.Config.Image, Labels: c.Config.Labels},
@ -380,11 +432,11 @@ func (f *FakeDockerClient) CreateContainer(c dockertypes.ContainerCreateConfig)
func (f *FakeDockerClient) StartContainer(id string) error {
f.Lock()
defer f.Unlock()
f.called = append(f.called, calledDetail{name: "start"})
f.appendCalled(calledDetail{name: "start"})
if err := f.popError("start"); err != nil {
return err
}
f.Started = append(f.Started, id)
f.appendContainerTrace("Started", id)
container, ok := f.ContainerMap[id]
if !ok {
container = convertFakeContainer(&FakeContainer{ID: id, Name: id})
@ -404,11 +456,11 @@ func (f *FakeDockerClient) StartContainer(id string) error {
func (f *FakeDockerClient) StopContainer(id string, timeout int) error {
f.Lock()
defer f.Unlock()
f.called = append(f.called, calledDetail{name: "stop"})
f.appendCalled(calledDetail{name: "stop"})
if err := f.popError("stop"); err != nil {
return err
}
f.Stopped = append(f.Stopped, id)
f.appendContainerTrace("Stopped", id)
// Container status should be Updated before container moved to ExitedContainerList
f.updateContainerStatus(id, statusExitedPrefix)
var newList []dockertypes.Container
@ -442,7 +494,7 @@ func (f *FakeDockerClient) StopContainer(id string, timeout int) error {
func (f *FakeDockerClient) RemoveContainer(id string, opts dockertypes.ContainerRemoveOptions) error {
f.Lock()
defer f.Unlock()
f.called = append(f.called, calledDetail{name: "remove"})
f.appendCalled(calledDetail{name: "remove"})
err := f.popError("remove")
if err != nil {
return err
@ -451,7 +503,7 @@ func (f *FakeDockerClient) RemoveContainer(id string, opts dockertypes.Container
if f.ExitedContainerList[i].ID == id {
delete(f.ContainerMap, id)
f.ExitedContainerList = append(f.ExitedContainerList[:i], f.ExitedContainerList[i+1:]...)
f.Removed = append(f.Removed, id)
f.appendContainerTrace("Removed", id)
return nil
}
@ -465,7 +517,7 @@ func (f *FakeDockerClient) RemoveContainer(id string, opts dockertypes.Container
func (f *FakeDockerClient) Logs(id string, opts dockertypes.ContainerLogsOptions, sopts StreamOptions) error {
f.Lock()
defer f.Unlock()
f.called = append(f.called, calledDetail{name: "logs"})
f.appendCalled(calledDetail{name: "logs"})
return f.popError("logs")
}
@ -474,7 +526,7 @@ func (f *FakeDockerClient) Logs(id string, opts dockertypes.ContainerLogsOptions
func (f *FakeDockerClient) PullImage(image string, auth dockertypes.AuthConfig, opts dockertypes.ImagePullOptions) error {
f.Lock()
defer f.Unlock()
f.called = append(f.called, calledDetail{name: "pull"})
f.appendCalled(calledDetail{name: "pull"})
err := f.popError("pull")
if err == nil {
authJson, _ := json.Marshal(auth)
@ -482,7 +534,7 @@ func (f *FakeDockerClient) PullImage(image string, auth dockertypes.AuthConfig,
ID: image,
RepoTags: []string{image},
}
f.pulled = append(f.pulled, fmt.Sprintf("%s using %s", image, string(authJson)))
f.appendPulled(fmt.Sprintf("%s using %s", image, string(authJson)))
}
return err
}
@ -501,21 +553,21 @@ func (f *FakeDockerClient) CreateExec(id string, opts dockertypes.ExecConfig) (*
f.Lock()
defer f.Unlock()
f.execCmd = opts.Cmd
f.called = append(f.called, calledDetail{name: "create_exec"})
f.appendCalled(calledDetail{name: "create_exec"})
return &dockertypes.ContainerExecCreateResponse{ID: "12345678"}, nil
}
func (f *FakeDockerClient) StartExec(startExec string, opts dockertypes.ExecStartCheck, sopts StreamOptions) error {
f.Lock()
defer f.Unlock()
f.called = append(f.called, calledDetail{name: "start_exec"})
f.appendCalled(calledDetail{name: "start_exec"})
return nil
}
func (f *FakeDockerClient) AttachToContainer(id string, opts dockertypes.ContainerAttachOptions, sopts StreamOptions) error {
f.Lock()
defer f.Unlock()
f.called = append(f.called, calledDetail{name: "attach"})
f.appendCalled(calledDetail{name: "attach"})
return nil
}
@ -524,13 +576,13 @@ func (f *FakeDockerClient) InspectExec(id string) (*dockertypes.ContainerExecIns
}
func (f *FakeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.Image, error) {
f.called = append(f.called, calledDetail{name: "list_images"})
f.appendCalled(calledDetail{name: "list_images"})
err := f.popError("list_images")
return f.Images, err
}
func (f *FakeDockerClient) RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDelete, error) {
f.called = append(f.called, calledDetail{name: "remove_image", arguments: []interface{}{image, opts}})
f.appendCalled(calledDetail{name: "remove_image", arguments: []interface{}{image, opts}})
err := f.popError("remove_image")
if err == nil {
for i := range f.Images {
@ -560,14 +612,14 @@ func (f *FakeDockerClient) updateContainerStatus(id, status string) {
func (f *FakeDockerClient) ResizeExecTTY(id string, height, width int) error {
f.Lock()
defer f.Unlock()
f.called = append(f.called, calledDetail{name: "resize_exec"})
f.appendCalled(calledDetail{name: "resize_exec"})
return nil
}
func (f *FakeDockerClient) ResizeContainerTTY(id string, height, width int) error {
f.Lock()
defer f.Unlock()
f.called = append(f.called, calledDetail{name: "resize_container"})
f.appendCalled(calledDetail{name: "resize_container"})
return nil
}
@ -612,7 +664,7 @@ func (f *FakeDockerPuller) GetImageRef(name string) (string, error) {
func (f *FakeDockerClient) ImageHistory(id string) ([]dockertypes.ImageHistory, error) {
f.Lock()
defer f.Unlock()
f.called = append(f.called, calledDetail{name: "image_history"})
f.appendCalled(calledDetail{name: "image_history"})
history := f.ImageHistoryMap[id]
return history, nil
}

View file

@ -19,6 +19,7 @@ package dockertools
import (
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -26,7 +27,6 @@ import (
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/cache"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/procfs"
)

View file

@ -24,7 +24,7 @@ import (
)
func TestImageStatsNoImages(t *testing.T) {
fakeDockerClient := NewFakeDockerClientWithVersion("1.2.3", "1.2")
fakeDockerClient := NewFakeDockerClient().WithVersion("1.2.3", "1.2")
isp := newImageStatsProvider(fakeDockerClient)
st, err := isp.ImageStats()
as := assert.New(t)
@ -34,7 +34,7 @@ func TestImageStatsNoImages(t *testing.T) {
}
func TestImageStatsWithImages(t *testing.T) {
fakeDockerClient := NewFakeDockerClientWithVersion("1.2.3", "1.2")
fakeDockerClient := NewFakeDockerClient().WithVersion("1.2.3", "1.2")
fakeHistoryData := map[string][]dockertypes.ImageHistory{
"busybox": {
{
@ -317,7 +317,7 @@ func TestImageStatsWithCachedImages(t *testing.T) {
expectedTotalStorageSize: 600,
},
} {
fakeDockerClient := NewFakeDockerClientWithVersion("1.2.3", "1.2")
fakeDockerClient := NewFakeDockerClient().WithVersion("1.2.3", "1.2")
fakeDockerClient.InjectImages(test.images)
fakeDockerClient.InjectImageHistory(test.history)
isp := newImageStatsProvider(fakeDockerClient)

View file

@ -39,11 +39,12 @@ const (
kubernetesPodDeletionGracePeriodLabel = "io.kubernetes.pod.deletionGracePeriod"
kubernetesPodTerminationGracePeriodLabel = "io.kubernetes.pod.terminationGracePeriod"
kubernetesContainerHashLabel = "io.kubernetes.container.hash"
kubernetesContainerRestartCountLabel = "io.kubernetes.container.restartCount"
kubernetesContainerTerminationMessagePathLabel = "io.kubernetes.container.terminationMessagePath"
kubernetesContainerPreStopHandlerLabel = "io.kubernetes.container.preStopHandler"
kubernetesContainerPortsLabel = "io.kubernetes.container.ports" // Added in 1.4
kubernetesContainerHashLabel = "io.kubernetes.container.hash"
kubernetesContainerRestartCountLabel = "io.kubernetes.container.restartCount"
kubernetesContainerTerminationMessagePathLabel = "io.kubernetes.container.terminationMessagePath"
kubernetesContainerTerminationMessagePolicyLabel = "io.kubernetes.container.terminationMessagePolicy"
kubernetesContainerPreStopHandlerLabel = "io.kubernetes.container.preStopHandler"
kubernetesContainerPortsLabel = "io.kubernetes.container.ports" // Added in 1.4
// TODO(random-liu): Keep this for old containers, remove this when we drop support for v1.1.
kubernetesPodLabel = "io.kubernetes.pod.data"
@ -63,6 +64,7 @@ type labelledContainerInfo struct {
Hash string
RestartCount int
TerminationMessagePath string
TerminationMessagePolicy v1.TerminationMessagePolicy
PreStopHandler *v1.Handler
Ports []v1.ContainerPort
}
@ -83,6 +85,7 @@ func newLabels(container *v1.Container, pod *v1.Pod, restartCount int, enableCus
labels[kubernetesContainerHashLabel] = strconv.FormatUint(kubecontainer.HashContainer(container), 16)
labels[kubernetesContainerRestartCountLabel] = strconv.Itoa(restartCount)
labels[kubernetesContainerTerminationMessagePathLabel] = container.TerminationMessagePath
labels[kubernetesContainerTerminationMessagePolicyLabel] = string(container.TerminationMessagePolicy)
if container.Lifecycle != nil && container.Lifecycle.PreStop != nil {
// Using json enconding so that the PreStop handler object is readable after writing as a label
rawPreStop, err := json.Marshal(container.Lifecycle.PreStop)
@ -118,7 +121,8 @@ func getContainerInfoFromLabel(labels map[string]string) *labelledContainerInfo
PodUID: kubetypes.UID(getStringValueFromLabel(labels, types.KubernetesPodUIDLabel)),
Name: getStringValueFromLabel(labels, types.KubernetesContainerNameLabel),
Hash: getStringValueFromLabel(labels, kubernetesContainerHashLabel),
TerminationMessagePath: getStringValueFromLabel(labels, kubernetesContainerTerminationMessagePathLabel),
TerminationMessagePath: getStringValueFromLabel(labels, kubernetesContainerTerminationMessagePathLabel),
TerminationMessagePolicy: v1.TerminationMessagePolicy(getStringValueFromLabel(labels, kubernetesContainerTerminationMessagePolicyLabel)),
}
if containerInfo.RestartCount, err = getIntValueFromLabel(labels, kubernetesContainerRestartCountLabel); err != nil {
logError(containerInfo, kubernetesContainerRestartCountLabel, err)

View file

@ -21,6 +21,7 @@ import (
"strconv"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/v1"
@ -71,7 +72,7 @@ func TestLabels(t *testing.T) {
Lifecycle: lifecycle,
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "test_pod",
Namespace: "test_pod_namespace",
UID: "test_pod_uid",